Total coverage: 216857 (12%)of 1885592
4 4 4 4 4 4 4 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 // SPDX-License-Identifier: GPL-2.0-or-later /* * usbusx2y.c - ALSA USB US-428 Driver * 2005-04-14 Karsten Wiese Version 0.8.7.2: Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom. Tested ok with kernel 2.6.12-rc2. 2004-12-14 Karsten Wiese Version 0.8.7.1: snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open. 2004-12-02 Karsten Wiese Version 0.8.7: Use macro usb_maxpacket() for portability. 2004-10-26 Karsten Wiese Version 0.8.6: wake_up() process waiting in usx2y_urbs_start() on error. 2004-10-21 Karsten Wiese Version 0.8.5: nrpacks is runtime or compiletime configurable now with tested values from 1 to 4. 2004-10-03 Karsten Wiese Version 0.8.2: Avoid any possible racing while in prepare callback. 2004-09-30 Karsten Wiese Version 0.8.0: Simplified things and made ohci work again. 2004-09-20 Karsten Wiese Version 0.7.3: Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb(). 2004-07-13 Karsten Wiese Version 0.7.1: Don't sleep in START/STOP callbacks anymore. us428 channels C/D not handled just for this version, sorry. 2004-06-21 Karsten Wiese Version 0.6.4: Temporarely suspend midi input to sanely call usb_set_interface() when setting format. 2004-06-12 Karsten Wiese Version 0.6.3: Made it thus the following rule is enforced: "All pcm substreams of one usx2y have to operate at the same rate & format." 2004-04-06 Karsten Wiese Version 0.6.0: Runs on 2.6.5 kernel without any "--with-debug=" things. us224 reported running. 2004-01-14 Karsten Wiese Version 0.5.1: Runs with 2.6.1 kernel. 2003-12-30 Karsten Wiese Version 0.4.1: Fix 24Bit 4Channel capturing for the us428. 2003-11-27 Karsten Wiese, Martin Langer Version 0.4: us122 support. us224 could be tested by uncommenting the sections containing USB_ID_US224 2003-11-03 Karsten Wiese Version 0.3: 24Bit support. "arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works. 2003-08-22 Karsten Wiese Version 0.0.8: Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader. See: http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz 2003-06-18 Karsten Wiese Version 0.0.5: changed to compile with kernel 2.4.21 and alsa 0.9.4 2002-10-16 Karsten Wiese Version 0.0.4: compiles again with alsa-current. USB_ISO_ASAP not used anymore (most of the time), instead urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore. To get the best out of this: Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds. This helped me much on my slowish PII 400 & PIII 500. ACPI yet untested but might cause the same bad behaviour. Use a kernel with lowlatency and preemptiv patches applied. To autoload snd-usb-midi append a line post-install snd-usb-us428 modprobe snd-usb-midi to /etc/modules.conf. known problems: sliders, knobs, lights not yet handled except MASTER Volume slider. "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does. KDE3: "Enable full duplex operation" deadlocks. 2002-08-31 Karsten Wiese Version 0.0.3: audio also simplex; simplifying: iso urbs only 1 packet, melted structs. ASYNC_UNLINK not used anymore: no more crashes so far..... for alsa 0.9 rc3. 2002-08-09 Karsten Wiese Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol. The firmware has been sniffed from win2k us-428 driver 3.09. * Copyright (c) 2002 - 2004 Karsten Wiese */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_usx2y_card_used[SNDRV_CARDS]; static void snd_usx2y_card_private_free(struct snd_card *card); static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s); #ifdef USX2Y_NRPACKS_VARIABLE int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ module_param(nrpacks, int, 0444); MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); #endif /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ static void i_usx2y_out04_int(struct urb *urb) { #ifdef CONFIG_SND_DEBUG if (urb->status) { int i; struct usx2ydev *usx2y = urb->context; for (i = 0; i < 10 && usx2y->as04.urb[i] != urb; i++) ; dev_dbg(&urb->dev->dev, "%s urb %i status=%i\n", __func__, i, urb->status); } #endif } static void i_usx2y_in04_int(struct urb *urb) { int err = 0; struct usx2ydev *usx2y = urb->context; struct us428ctls_sharedmem *us428ctls = usx2y->us428ctls_sharedmem; struct us428_p4out *p4out; int i, j, n, diff, send; usx2y->in04_int_calls++; if (urb->status) { dev_dbg(&urb->dev->dev, "Interrupt Pipe 4 came back with status=%i\n", urb->status); return; } if (us428ctls) { diff = -1; if (us428ctls->ctl_snapshot_last == -2) { diff = 0; memcpy(usx2y->in04_last, usx2y->in04_buf, sizeof(usx2y->in04_last)); us428ctls->ctl_snapshot_last = -1; } else { for (i = 0; i < 21; i++) { if (usx2y->in04_last[i] != ((char *)usx2y->in04_buf)[i]) { if (diff < 0) diff = i; usx2y->in04_last[i] = ((char *)usx2y->in04_buf)[i]; } } } if (diff >= 0) { n = us428ctls->ctl_snapshot_last + 1; if (n >= N_US428_CTL_BUFS || n < 0) n = 0; memcpy(us428ctls->ctl_snapshot + n, usx2y->in04_buf, sizeof(us428ctls->ctl_snapshot[0])); us428ctls->ctl_snapshot_differs_at[n] = diff; us428ctls->ctl_snapshot_last = n; wake_up(&usx2y->us428ctls_wait_queue_head); } } if (usx2y->us04) { if (!usx2y->us04->submitted) { do { err = usb_submit_urb(usx2y->us04->urb[usx2y->us04->submitted++], GFP_ATOMIC); } while (!err && usx2y->us04->submitted < usx2y->us04->len); } } else { if (us428ctls && us428ctls->p4out_last >= 0 && us428ctls->p4out_last < N_US428_P4OUT_BUFS) { if (us428ctls->p4out_last != us428ctls->p4out_sent) { send = us428ctls->p4out_sent + 1; if (send >= N_US428_P4OUT_BUFS) send = 0; for (j = 0; j < URBS_ASYNC_SEQ && !err; ++j) { if (!usx2y->as04.urb[j]->status) { p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usx2y->as04.urb[j], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 0x04), &p4out->val.vol, p4out->type == ELT_LIGHT ? sizeof(struct us428_lights) : 5, i_usx2y_out04_int, usx2y); err = usb_submit_urb(usx2y->as04.urb[j], GFP_ATOMIC); us428ctls->p4out_sent = send; break; } } } } } if (err) dev_err(&urb->dev->dev, "in04_int() usb_submit_urb err=%i\n", err); urb->dev = usx2y->dev; usb_submit_urb(urb, GFP_ATOMIC); } /* * Prepare some urbs */ int usx2y_async_seq04_init(struct usx2ydev *usx2y) { int err = 0, i; if (WARN_ON(usx2y->as04.buffer)) return -EBUSY; usx2y->as04.buffer = kmalloc_array(URBS_ASYNC_SEQ, URB_DATA_LEN_ASYNC_SEQ, GFP_KERNEL); if (!usx2y->as04.buffer) { err = -ENOMEM; } else { for (i = 0; i < URBS_ASYNC_SEQ; ++i) { usx2y->as04.urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!usx2y->as04.urb[i]) { err = -ENOMEM; break; } usb_fill_bulk_urb(usx2y->as04.urb[i], usx2y->dev, usb_sndbulkpipe(usx2y->dev, 0x04), usx2y->as04.buffer + URB_DATA_LEN_ASYNC_SEQ * i, 0, i_usx2y_out04_int, usx2y); err = usb_urb_ep_type_check(usx2y->as04.urb[i]); if (err < 0) break; } } if (err) usx2y_unlinkseq(&usx2y->as04); return err; } int usx2y_in04_init(struct usx2ydev *usx2y) { int err; if (WARN_ON(usx2y->in04_urb)) return -EBUSY; usx2y->in04_urb = usb_alloc_urb(0, GFP_KERNEL); if (!usx2y->in04_urb) { err = -ENOMEM; goto error; } usx2y->in04_buf = kmalloc(21, GFP_KERNEL); if (!usx2y->in04_buf) { err = -ENOMEM; goto error; } init_waitqueue_head(&usx2y->in04_wait_queue); usb_fill_int_urb(usx2y->in04_urb, usx2y->dev, usb_rcvintpipe(usx2y->dev, 0x4), usx2y->in04_buf, 21, i_usx2y_in04_int, usx2y, 10); if (usb_urb_ep_type_check(usx2y->in04_urb)) { err = -EINVAL; goto error; } return usb_submit_urb(usx2y->in04_urb, GFP_KERNEL); error: kfree(usx2y->in04_buf); usb_free_urb(usx2y->in04_urb); usx2y->in04_buf = NULL; usx2y->in04_urb = NULL; return err; } static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s) { int i; for (i = 0; i < URBS_ASYNC_SEQ; ++i) { if (!s->urb[i]) continue; usb_kill_urb(s->urb[i]); usb_free_urb(s->urb[i]); s->urb[i] = NULL; } kfree(s->buffer); s->buffer = NULL; } static const struct usb_device_id snd_usx2y_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US428 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US122 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US224 }, { /* terminator */ } }; MODULE_DEVICE_TABLE(usb, snd_usx2y_usb_id_table); static int usx2y_create_card(struct usb_device *device, struct usb_interface *intf, struct snd_card **cardp) { int dev; struct snd_card *card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_usx2y_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_new(&intf->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct usx2ydev), &card); if (err < 0) return err; snd_usx2y_card_used[usx2y(card)->card_index = dev] = 1; card->private_free = snd_usx2y_card_private_free; usx2y(card)->dev = device; init_waitqueue_head(&usx2y(card)->prepare_wait_queue); init_waitqueue_head(&usx2y(card)->us428ctls_wait_queue_head); mutex_init(&usx2y(card)->pcm_mutex); INIT_LIST_HEAD(&usx2y(card)->midi_list); strscpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0,//us428(card)->usbmidi.ifnum, usx2y(card)->dev->bus->busnum, usx2y(card)->dev->devnum); *cardp = card; return 0; } static void snd_usx2y_card_private_free(struct snd_card *card) { struct usx2ydev *usx2y = usx2y(card); kfree(usx2y->in04_buf); usb_free_urb(usx2y->in04_urb); if (usx2y->us428ctls_sharedmem) free_pages_exact(usx2y->us428ctls_sharedmem, US428_SHAREDMEM_PAGES); if (usx2y->card_index >= 0 && usx2y->card_index < SNDRV_CARDS) snd_usx2y_card_used[usx2y->card_index] = 0; } static void snd_usx2y_disconnect(struct usb_interface *intf) { struct snd_card *card; struct usx2ydev *usx2y; struct list_head *p; card = usb_get_intfdata(intf); if (!card) return; usx2y = usx2y(card); usx2y->chip_status = USX2Y_STAT_CHIP_HUP; usx2y_unlinkseq(&usx2y->as04); usb_kill_urb(usx2y->in04_urb); snd_card_disconnect(card); /* release the midi resources */ list_for_each(p, &usx2y->midi_list) { snd_usbmidi_disconnect(p); } if (usx2y->us428ctls_sharedmem) wake_up(&usx2y->us428ctls_wait_queue_head); snd_card_free_when_closed(card); } static int snd_usx2y_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *device = interface_to_usbdev(intf); struct snd_card *card; int err; #ifdef USX2Y_NRPACKS_VARIABLE if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX) return -EINVAL; #endif if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428)) return -EINVAL; err = usx2y_create_card(device, intf, &card); if (err < 0) return err; err = usx2y_hwdep_new(card, device); if (err < 0) goto error; err = snd_card_register(card); if (err < 0) goto error; dev_set_drvdata(&intf->dev, card); return 0; error: snd_card_free(card); return err; } static struct usb_driver snd_usx2y_usb_driver = { .name = "snd-usb-usx2y", .probe = snd_usx2y_probe, .disconnect = snd_usx2y_disconnect, .id_table = snd_usx2y_usb_id_table, }; module_usb_driver(snd_usx2y_usb_driver);
5609 5602 5 67 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 // SPDX-License-Identifier: GPL-2.0-only #include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <net/net_namespace.h> #include <linux/if_arp.h> #include <net/rtnetlink.h> static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev) { dev_lstats_add(dev, skb->len); dev_kfree_skb(skb); return NETDEV_TX_OK; } struct nlmon { struct netlink_tap nt; }; static int nlmon_open(struct net_device *dev) { struct nlmon *nlmon = netdev_priv(dev); nlmon->nt.dev = dev; nlmon->nt.module = THIS_MODULE; return netlink_add_tap(&nlmon->nt); } static int nlmon_close(struct net_device *dev) { struct nlmon *nlmon = netdev_priv(dev); return netlink_remove_tap(&nlmon->nt); } static void nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes); } static u32 always_on(struct net_device *dev) { return 1; } static const struct ethtool_ops nlmon_ethtool_ops = { .get_link = always_on, }; static const struct net_device_ops nlmon_ops = { .ndo_open = nlmon_open, .ndo_stop = nlmon_close, .ndo_start_xmit = nlmon_xmit, .ndo_get_stats64 = nlmon_get_stats64, }; static void nlmon_setup(struct net_device *dev) { dev->type = ARPHRD_NETLINK; dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; dev->needs_free_netdev = true; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; dev->flags = IFF_NOARP; dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS; /* That's rather a softlimit here, which, of course, * can be altered. Not a real MTU, but what is to be * expected in most cases. */ dev->mtu = NLMSG_GOODSIZE; dev->min_mtu = sizeof(struct nlmsghdr); } static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) return -EINVAL; return 0; } static struct rtnl_link_ops nlmon_link_ops __read_mostly = { .kind = "nlmon", .priv_size = sizeof(struct nlmon), .setup = nlmon_setup, .validate = nlmon_validate, }; static __init int nlmon_register(void) { return rtnl_link_register(&nlmon_link_ops); } static __exit void nlmon_unregister(void) { rtnl_link_unregister(&nlmon_link_ops); } module_init(nlmon_register); module_exit(nlmon_unregister); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); MODULE_AUTHOR("Mathieu Geli <geli@enseirb.fr>"); MODULE_DESCRIPTION("Netlink monitoring device"); MODULE_ALIAS_RTNL_LINK("nlmon");
2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 /* * linux/fs/hfs/part_tbl.c * * Copyright (C) 1996-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * Original code to handle the new style Mac partition table based on * a patch contributed by Holger Schemel (aeglos@valinor.owl.de). */ #include "hfs_fs.h" /* * The new style Mac partition map * * For each partition on the media there is a physical block (512-byte * block) containing one of these structures. These blocks are * contiguous starting at block 1. */ struct new_pmap { __be16 pmSig; /* signature */ __be16 reSigPad; /* padding */ __be32 pmMapBlkCnt; /* partition blocks count */ __be32 pmPyPartStart; /* physical block start of partition */ __be32 pmPartBlkCnt; /* physical block count of partition */ u8 pmPartName[32]; /* (null terminated?) string giving the name of this partition */ u8 pmPartType[32]; /* (null terminated?) string giving the type of this partition */ /* a bunch more stuff we don't need */ } __packed; /* * The old style Mac partition map * * The partition map consists for a 2-byte signature followed by an * array of these structures. The map is terminated with an all-zero * one of these. */ struct old_pmap { __be16 pdSig; /* Signature bytes */ struct old_pmap_entry { __be32 pdStart; __be32 pdSize; __be32 pdFSID; } pdEntry[42]; } __packed; /* * hfs_part_find() * * Parse the partition map looking for the * start and length of the 'part'th HFS partition. */ int hfs_part_find(struct super_block *sb, sector_t *part_start, sector_t *part_size) { struct buffer_head *bh; __be16 *data; int i, size, res; res = -ENOENT; bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK, data); if (!bh) return -EIO; switch (be16_to_cpu(*data)) { case HFS_OLD_PMAP_MAGIC: { struct old_pmap *pm; struct old_pmap_entry *p; pm = (struct old_pmap *)bh->b_data; p = pm->pdEntry; size = 42; for (i = 0; i < size; p++, i++) { if (p->pdStart && p->pdSize && p->pdFSID == cpu_to_be32(0x54465331)/*"TFS1"*/ && (HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) { *part_start += be32_to_cpu(p->pdStart); *part_size = be32_to_cpu(p->pdSize); res = 0; } } break; } case HFS_NEW_PMAP_MAGIC: { struct new_pmap *pm; pm = (struct new_pmap *)bh->b_data; size = be32_to_cpu(pm->pmMapBlkCnt); for (i = 0; i < size;) { if (!memcmp(pm->pmPartType,"Apple_HFS", 9) && (HFS_SB(sb)->part < 0 || HFS_SB(sb)->part == i)) { *part_start += be32_to_cpu(pm->pmPyPartStart); *part_size = be32_to_cpu(pm->pmPartBlkCnt); res = 0; break; } brelse(bh); bh = sb_bread512(sb, *part_start + HFS_PMAP_BLK + ++i, pm); if (!bh) return -EIO; if (pm->pmSig != cpu_to_be16(HFS_NEW_PMAP_MAGIC)) break; } break; } } brelse(bh); return res; }
1 1 4 5 5 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 // SPDX-License-Identifier: GPL-2.0 #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/xxhash.h> #include <linux/unaligned.h> #define XXHASH64_BLOCK_SIZE 32 #define XXHASH64_DIGEST_SIZE 8 struct xxhash64_tfm_ctx { u64 seed; }; struct xxhash64_desc_ctx { struct xxh64_state xxhstate; }; static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm); if (keylen != sizeof(tctx->seed)) return -EINVAL; tctx->seed = get_unaligned_le64(key); return 0; } static int xxhash64_init(struct shash_desc *desc) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); xxh64_reset(&dctx->xxhstate, tctx->seed); return 0; } static int xxhash64_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); xxh64_update(&dctx->xxhstate, data, length); return 0; } static int xxhash64_final(struct shash_desc *desc, u8 *out) { struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out); return 0; } static int xxhash64_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); put_unaligned_le64(xxh64(data, length, tctx->seed), out); return 0; } static struct shash_alg alg = { .digestsize = XXHASH64_DIGEST_SIZE, .setkey = xxhash64_setkey, .init = xxhash64_init, .update = xxhash64_update, .final = xxhash64_final, .digest = xxhash64_digest, .descsize = sizeof(struct xxhash64_desc_ctx), .base = { .cra_name = "xxhash64", .cra_driver_name = "xxhash64-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = XXHASH64_BLOCK_SIZE, .cra_ctxsize = sizeof(struct xxhash64_tfm_ctx), .cra_module = THIS_MODULE, } }; static int __init xxhash_mod_init(void) { return crypto_register_shash(&alg); } static void __exit xxhash_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(xxhash_mod_init); module_exit(xxhash_mod_fini); MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("xxhash64"); MODULE_ALIAS_CRYPTO("xxhash64-generic");
73 37 3085 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM notifier #if !defined(_TRACE_NOTIFIERS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NOTIFIERS_H #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(notifier_info, TP_PROTO(void *cb), TP_ARGS(cb), TP_STRUCT__entry( __field(void *, cb) ), TP_fast_assign( __entry->cb = cb; ), TP_printk("%ps", __entry->cb) ); /* * notifier_register - called upon notifier callback registration * * @cb: callback pointer * */ DEFINE_EVENT(notifier_info, notifier_register, TP_PROTO(void *cb), TP_ARGS(cb) ); /* * notifier_unregister - called upon notifier callback unregistration * * @cb: callback pointer * */ DEFINE_EVENT(notifier_info, notifier_unregister, TP_PROTO(void *cb), TP_ARGS(cb) ); /* * notifier_run - called upon notifier callback execution * * @cb: callback pointer * */ DEFINE_EVENT(notifier_info, notifier_run, TP_PROTO(void *cb), TP_ARGS(cb) ); #endif /* _TRACE_NOTIFIERS_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
33 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 // SPDX-License-Identifier: GPL-2.0-or-later /* * NetLabel Kernel API * * This file defines the kernel API for the NetLabel system. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 */ #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/audit.h> #include <linux/in.h> #include <linux/in6.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <net/calipso.h> #include <asm/bug.h> #include <linux/atomic.h> #include "netlabel_domainhash.h" #include "netlabel_unlabeled.h" #include "netlabel_cipso_v4.h" #include "netlabel_calipso.h" #include "netlabel_user.h" #include "netlabel_mgmt.h" #include "netlabel_addrlist.h" /* * Configuration Functions */ /** * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping * @domain: the domain mapping to remove * @family: address family * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Removes a NetLabel/LSM domain mapping. A @domain value of NULL causes the * default domain mapping to be removed. Returns zero on success, negative * values on failure. * */ int netlbl_cfg_map_del(const char *domain, u16 family, const void *addr, const void *mask, struct netlbl_audit *audit_info) { if (addr == NULL && mask == NULL) { return netlbl_domhsh_remove(domain, family, audit_info); } else if (addr != NULL && mask != NULL) { switch (family) { case AF_INET: return netlbl_domhsh_remove_af4(domain, addr, mask, audit_info); #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return netlbl_domhsh_remove_af6(domain, addr, mask, audit_info); #endif /* IPv6 */ default: return -EPFNOSUPPORT; } } else return -EINVAL; } /** * netlbl_cfg_unlbl_map_add - Add a new unlabeled mapping * @domain: the domain mapping to add * @family: address family * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Adds a new unlabeled NetLabel/LSM domain mapping. A @domain value of NULL * causes a new default domain mapping to be added. Returns zero on success, * negative values on failure. * */ int netlbl_cfg_unlbl_map_add(const char *domain, u16 family, const void *addr, const void *mask, struct netlbl_audit *audit_info) { int ret_val = -ENOMEM; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *map4 = NULL; struct netlbl_domaddr6_map *map6 = NULL; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto cfg_unlbl_map_add_failure; } entry->family = family; if (addr == NULL && mask == NULL) entry->def.type = NETLBL_NLTYPE_UNLABELED; else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto cfg_unlbl_map_add_failure; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); switch (family) { case AF_INET: { const struct in_addr *addr4 = addr; const struct in_addr *mask4 = mask; map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); if (map4 == NULL) goto cfg_unlbl_map_add_failure; map4->def.type = NETLBL_NLTYPE_UNLABELED; map4->list.addr = addr4->s_addr & mask4->s_addr; map4->list.mask = mask4->s_addr; map4->list.valid = 1; ret_val = netlbl_af4list_add(&map4->list, &addrmap->list4); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { const struct in6_addr *addr6 = addr; const struct in6_addr *mask6 = mask; map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); if (map6 == NULL) goto cfg_unlbl_map_add_failure; map6->def.type = NETLBL_NLTYPE_UNLABELED; map6->list.addr = *addr6; map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2]; map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; map6->list.mask = *mask6; map6->list.valid = 1; ret_val = netlbl_af6list_add(&map6->list, &addrmap->list6); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; } #endif /* IPv6 */ default: goto cfg_unlbl_map_add_failure; } entry->def.addrsel = addrmap; entry->def.type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto cfg_unlbl_map_add_failure; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_unlbl_map_add_failure; return 0; cfg_unlbl_map_add_failure: kfree(entry->domain); kfree(entry); kfree(addrmap); kfree(map4); kfree(map6); return ret_val; } /** * netlbl_cfg_unlbl_static_add - Adds a new static label * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order (struct in[6]_addr) * @mask: address mask in network byte order (struct in[6]_addr) * @family: address family * @secid: LSM secid value for the entry * @audit_info: NetLabel audit information * * Description: * Adds a new NetLabel static label to be used when protocol provided labels * are not present on incoming traffic. If @dev_name is NULL then the default * interface will be used. Returns zero on success, negative values on failure. * */ int netlbl_cfg_unlbl_static_add(struct net *net, const char *dev_name, const void *addr, const void *mask, u16 family, u32 secid, struct netlbl_audit *audit_info) { u32 addr_len; switch (family) { case AF_INET: addr_len = sizeof(struct in_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr_len = sizeof(struct in6_addr); break; #endif /* IPv6 */ default: return -EPFNOSUPPORT; } return netlbl_unlhsh_add(net, dev_name, addr, mask, addr_len, secid, audit_info); } /** * netlbl_cfg_unlbl_static_del - Removes an existing static label * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order (struct in[6]_addr) * @mask: address mask in network byte order (struct in[6]_addr) * @family: address family * @audit_info: NetLabel audit information * * Description: * Removes an existing NetLabel static label used when protocol provided labels * are not present on incoming traffic. If @dev_name is NULL then the default * interface will be used. Returns zero on success, negative values on failure. * */ int netlbl_cfg_unlbl_static_del(struct net *net, const char *dev_name, const void *addr, const void *mask, u16 family, struct netlbl_audit *audit_info) { u32 addr_len; switch (family) { case AF_INET: addr_len = sizeof(struct in_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr_len = sizeof(struct in6_addr); break; #endif /* IPv6 */ default: return -EPFNOSUPPORT; } return netlbl_unlhsh_remove(net, dev_name, addr, mask, addr_len, audit_info); } /** * netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition * @doi_def: CIPSO DOI definition * @audit_info: NetLabel audit information * * Description: * Add a new CIPSO DOI definition as defined by @doi_def. Returns zero on * success and negative values on failure. * */ int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { return cipso_v4_doi_add(doi_def, audit_info); } /** * netlbl_cfg_cipsov4_del - Remove an existing CIPSOv4 DOI definition * @doi: CIPSO DOI * @audit_info: NetLabel audit information * * Description: * Remove an existing CIPSO DOI definition matching @doi. Returns zero on * success and negative values on failure. * */ void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info) { cipso_v4_doi_remove(doi, audit_info); } /** * netlbl_cfg_cipsov4_map_add - Add a new CIPSOv4 DOI mapping * @doi: the CIPSO DOI * @domain: the domain mapping to add * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel * subsystem. A @domain value of NULL adds a new default domain mapping. * Returns zero on success, negative values on failure. * */ int netlbl_cfg_cipsov4_map_add(u32 doi, const char *domain, const struct in_addr *addr, const struct in_addr *mask, struct netlbl_audit *audit_info) { int ret_val = -ENOMEM; struct cipso_v4_doi *doi_def; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *addrinfo = NULL; doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) return -ENOENT; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) goto out_entry; entry->family = AF_INET; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto out_domain; } if (addr == NULL && mask == NULL) { entry->def.cipso = doi_def; entry->def.type = NETLBL_NLTYPE_CIPSOV4; } else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto out_addrmap; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); if (addrinfo == NULL) goto out_addrinfo; addrinfo->def.cipso = doi_def; addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4; addrinfo->list.addr = addr->s_addr & mask->s_addr; addrinfo->list.mask = mask->s_addr; addrinfo->list.valid = 1; ret_val = netlbl_af4list_add(&addrinfo->list, &addrmap->list4); if (ret_val != 0) goto cfg_cipsov4_map_add_failure; entry->def.addrsel = addrmap; entry->def.type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto out_addrmap; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_cipsov4_map_add_failure; return 0; cfg_cipsov4_map_add_failure: kfree(addrinfo); out_addrinfo: kfree(addrmap); out_addrmap: kfree(entry->domain); out_domain: kfree(entry); out_entry: cipso_v4_doi_putdef(doi_def); return ret_val; } /** * netlbl_cfg_calipso_add - Add a new CALIPSO DOI definition * @doi_def: CALIPSO DOI definition * @audit_info: NetLabel audit information * * Description: * Add a new CALIPSO DOI definition as defined by @doi_def. Returns zero on * success and negative values on failure. * */ int netlbl_cfg_calipso_add(struct calipso_doi *doi_def, struct netlbl_audit *audit_info) { #if IS_ENABLED(CONFIG_IPV6) return calipso_doi_add(doi_def, audit_info); #else /* IPv6 */ return -ENOSYS; #endif /* IPv6 */ } /** * netlbl_cfg_calipso_del - Remove an existing CALIPSO DOI definition * @doi: CALIPSO DOI * @audit_info: NetLabel audit information * * Description: * Remove an existing CALIPSO DOI definition matching @doi. Returns zero on * success and negative values on failure. * */ void netlbl_cfg_calipso_del(u32 doi, struct netlbl_audit *audit_info) { #if IS_ENABLED(CONFIG_IPV6) calipso_doi_remove(doi, audit_info); #endif /* IPv6 */ } /** * netlbl_cfg_calipso_map_add - Add a new CALIPSO DOI mapping * @doi: the CALIPSO DOI * @domain: the domain mapping to add * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Add a new NetLabel/LSM domain mapping for the given CALIPSO DOI to the * NetLabel subsystem. A @domain value of NULL adds a new default domain * mapping. Returns zero on success, negative values on failure. * */ int netlbl_cfg_calipso_map_add(u32 doi, const char *domain, const struct in6_addr *addr, const struct in6_addr *mask, struct netlbl_audit *audit_info) { #if IS_ENABLED(CONFIG_IPV6) int ret_val = -ENOMEM; struct calipso_doi *doi_def; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr6_map *addrinfo = NULL; doi_def = calipso_doi_getdef(doi); if (doi_def == NULL) return -ENOENT; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) goto out_entry; entry->family = AF_INET6; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto out_domain; } if (addr == NULL && mask == NULL) { entry->def.calipso = doi_def; entry->def.type = NETLBL_NLTYPE_CALIPSO; } else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto out_addrmap; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); if (addrinfo == NULL) goto out_addrinfo; addrinfo->def.calipso = doi_def; addrinfo->def.type = NETLBL_NLTYPE_CALIPSO; addrinfo->list.addr = *addr; addrinfo->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; addrinfo->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; addrinfo->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; addrinfo->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; addrinfo->list.mask = *mask; addrinfo->list.valid = 1; ret_val = netlbl_af6list_add(&addrinfo->list, &addrmap->list6); if (ret_val != 0) goto cfg_calipso_map_add_failure; entry->def.addrsel = addrmap; entry->def.type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto out_addrmap; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_calipso_map_add_failure; return 0; cfg_calipso_map_add_failure: kfree(addrinfo); out_addrinfo: kfree(addrmap); out_addrmap: kfree(entry->domain); out_domain: kfree(entry); out_entry: calipso_doi_putdef(doi_def); return ret_val; #else /* IPv6 */ return -ENOSYS; #endif /* IPv6 */ } /* * Security Attribute Functions */ #define _CM_F_NONE 0x00000000 #define _CM_F_ALLOC 0x00000001 #define _CM_F_WALK 0x00000002 /** * _netlbl_catmap_getnode - Get a individual node from a catmap * @catmap: pointer to the category bitmap * @offset: the requested offset * @cm_flags: catmap flags, see _CM_F_* * @gfp_flags: memory allocation flags * * Description: * Iterate through the catmap looking for the node associated with @offset. * If the _CM_F_ALLOC flag is set in @cm_flags and there is no associated node, * one will be created and inserted into the catmap. If the _CM_F_WALK flag is * set in @cm_flags and there is no associated node, the next highest node will * be returned. Returns a pointer to the node on success, NULL on failure. * */ static struct netlbl_lsm_catmap *_netlbl_catmap_getnode( struct netlbl_lsm_catmap **catmap, u32 offset, unsigned int cm_flags, gfp_t gfp_flags) { struct netlbl_lsm_catmap *iter = *catmap; struct netlbl_lsm_catmap *prev = NULL; if (iter == NULL) goto catmap_getnode_alloc; if (offset < iter->startbit) goto catmap_getnode_walk; while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { prev = iter; iter = iter->next; } if (iter == NULL || offset < iter->startbit) goto catmap_getnode_walk; return iter; catmap_getnode_walk: if (cm_flags & _CM_F_WALK) return iter; catmap_getnode_alloc: if (!(cm_flags & _CM_F_ALLOC)) return NULL; iter = netlbl_catmap_alloc(gfp_flags); if (iter == NULL) return NULL; iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1); if (prev == NULL) { iter->next = *catmap; *catmap = iter; } else { iter->next = prev->next; prev->next = iter; } return iter; } /** * netlbl_catmap_walk - Walk a LSM secattr catmap looking for a bit * @catmap: the category bitmap * @offset: the offset to start searching at, in bits * * Description: * This function walks a LSM secattr category bitmap starting at @offset and * returns the spot of the first set bit or -ENOENT if no bits are set. * */ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset) { struct netlbl_lsm_catmap *iter; u32 idx; u32 bit; u64 bitmap; iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); if (iter == NULL) return -ENOENT; if (offset > iter->startbit) { offset -= iter->startbit; idx = offset / NETLBL_CATMAP_MAPSIZE; bit = offset % NETLBL_CATMAP_MAPSIZE; } else { idx = 0; bit = 0; } bitmap = iter->bitmap[idx] >> bit; for (;;) { if (bitmap != 0) { while ((bitmap & NETLBL_CATMAP_BIT) == 0) { bitmap >>= 1; bit++; } return iter->startbit + (NETLBL_CATMAP_MAPSIZE * idx) + bit; } if (++idx >= NETLBL_CATMAP_MAPCNT) { if (iter->next != NULL) { iter = iter->next; idx = 0; } else return -ENOENT; } bitmap = iter->bitmap[idx]; bit = 0; } return -ENOENT; } EXPORT_SYMBOL(netlbl_catmap_walk); /** * netlbl_catmap_walkrng - Find the end of a string of set bits * @catmap: the category bitmap * @offset: the offset to start searching at, in bits * * Description: * This function walks a LSM secattr category bitmap starting at @offset and * returns the spot of the first cleared bit or -ENOENT if the offset is past * the end of the bitmap. * */ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset) { struct netlbl_lsm_catmap *iter; struct netlbl_lsm_catmap *prev = NULL; u32 idx; u32 bit; u64 bitmask; u64 bitmap; iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); if (iter == NULL) return -ENOENT; if (offset > iter->startbit) { offset -= iter->startbit; idx = offset / NETLBL_CATMAP_MAPSIZE; bit = offset % NETLBL_CATMAP_MAPSIZE; } else { idx = 0; bit = 0; } bitmask = NETLBL_CATMAP_BIT << bit; for (;;) { bitmap = iter->bitmap[idx]; while (bitmask != 0 && (bitmap & bitmask) != 0) { bitmask <<= 1; bit++; } if (prev && idx == 0 && bit == 0) return prev->startbit + NETLBL_CATMAP_SIZE - 1; else if (bitmask != 0) return iter->startbit + (NETLBL_CATMAP_MAPSIZE * idx) + bit - 1; else if (++idx >= NETLBL_CATMAP_MAPCNT) { if (iter->next == NULL) return iter->startbit + NETLBL_CATMAP_SIZE - 1; prev = iter; iter = iter->next; idx = 0; } bitmask = NETLBL_CATMAP_BIT; bit = 0; } return -ENOENT; } /** * netlbl_catmap_getlong - Export an unsigned long bitmap * @catmap: pointer to the category bitmap * @offset: pointer to the requested offset * @bitmap: the exported bitmap * * Description: * Export a bitmap with an offset greater than or equal to @offset and return * it in @bitmap. The @offset must be aligned to an unsigned long and will be * updated on return if different from what was requested; if the catmap is * empty at the requested offset and beyond, the @offset is set to (u32)-1. * Returns zero on success, negative values on failure. * */ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, u32 *offset, unsigned long *bitmap) { struct netlbl_lsm_catmap *iter; u32 off = *offset; u32 idx; /* only allow aligned offsets */ if ((off & (BITS_PER_LONG - 1)) != 0) return -EINVAL; /* a null catmap is equivalent to an empty one */ if (!catmap) { *offset = (u32)-1; return 0; } if (off < catmap->startbit) { off = catmap->startbit; *offset = off; } iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_WALK, 0); if (iter == NULL) { *offset = (u32)-1; return 0; } if (off < iter->startbit) { *offset = iter->startbit; off = 0; } else off -= iter->startbit; idx = off / NETLBL_CATMAP_MAPSIZE; *bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_MAPSIZE); return 0; } /** * netlbl_catmap_setbit - Set a bit in a LSM secattr catmap * @catmap: pointer to the category bitmap * @bit: the bit to set * @flags: memory allocation flags * * Description: * Set the bit specified by @bit in @catmap. Returns zero on success, * negative values on failure. * */ int netlbl_catmap_setbit(struct netlbl_lsm_catmap **catmap, u32 bit, gfp_t flags) { struct netlbl_lsm_catmap *iter; u32 idx; iter = _netlbl_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags); if (iter == NULL) return -ENOMEM; bit -= iter->startbit; idx = bit / NETLBL_CATMAP_MAPSIZE; iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE); return 0; } EXPORT_SYMBOL(netlbl_catmap_setbit); /** * netlbl_catmap_setrng - Set a range of bits in a LSM secattr catmap * @catmap: pointer to the category bitmap * @start: the starting bit * @end: the last bit in the string * @flags: memory allocation flags * * Description: * Set a range of bits, starting at @start and ending with @end. Returns zero * on success, negative values on failure. * */ int netlbl_catmap_setrng(struct netlbl_lsm_catmap **catmap, u32 start, u32 end, gfp_t flags) { int rc = 0; u32 spot = start; while (rc == 0 && spot <= end) { if (((spot & (BITS_PER_LONG - 1)) == 0) && ((end - spot) > BITS_PER_LONG)) { rc = netlbl_catmap_setlong(catmap, spot, (unsigned long)-1, flags); spot += BITS_PER_LONG; } else rc = netlbl_catmap_setbit(catmap, spot++, flags); } return rc; } /** * netlbl_catmap_setlong - Import an unsigned long bitmap * @catmap: pointer to the category bitmap * @offset: offset to the start of the imported bitmap * @bitmap: the bitmap to import * @flags: memory allocation flags * * Description: * Import the bitmap specified in @bitmap into @catmap, using the offset * in @offset. The offset must be aligned to an unsigned long. Returns zero * on success, negative values on failure. * */ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap, u32 offset, unsigned long bitmap, gfp_t flags) { struct netlbl_lsm_catmap *iter; u32 idx; /* only allow aligned offsets */ if ((offset & (BITS_PER_LONG - 1)) != 0) return -EINVAL; iter = _netlbl_catmap_getnode(catmap, offset, _CM_F_ALLOC, flags); if (iter == NULL) return -ENOMEM; offset -= iter->startbit; idx = offset / NETLBL_CATMAP_MAPSIZE; iter->bitmap[idx] |= (u64)bitmap << (offset % NETLBL_CATMAP_MAPSIZE); return 0; } /* Bitmap functions */ /** * netlbl_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found. */ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; if (offset >= bitmap_len) return -1; byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; if (++bit_spot >= bitmap_len) return -1; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } EXPORT_SYMBOL(netlbl_bitmap_walk); /** * netlbl_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } EXPORT_SYMBOL(netlbl_bitmap_setbit); /* * LSM Functions */ /** * netlbl_enabled - Determine if the NetLabel subsystem is enabled * * Description: * The LSM can use this function to determine if it should use NetLabel * security attributes in it's enforcement mechanism. Currently, NetLabel is * considered to be enabled when it's configuration contains a valid setup for * at least one labeled protocol (i.e. NetLabel can understand incoming * labeled packets of at least one type); otherwise NetLabel is considered to * be disabled. * */ int netlbl_enabled(void) { /* At some point we probably want to expose this mechanism to the user * as well so that admins can toggle NetLabel regardless of the * configuration */ return (atomic_read(&netlabel_mgmt_protocount) > 0); } /** * netlbl_sock_setattr - Label a socket using the correct protocol * @sk: the socket to label * @family: protocol family * @secattr: the security attributes * @sk_locked: true if caller holds the socket lock * * Description: * Attach the correct label to the given socket using the security attributes * specified in @secattr. This function requires exclusive access to @sk, * which means it either needs to be in the process of being created or locked. * Returns zero on success, -EDESTADDRREQ if the domain is configured to use * network address selectors (can't blindly label the socket), and negative * values on all other failures. * */ int netlbl_sock_setattr(struct sock *sk, u16 family, const struct netlbl_lsm_secattr *secattr, bool sk_locked) { int ret_val; struct netlbl_dom_map *dom_entry; rcu_read_lock(); dom_entry = netlbl_domhsh_getentry(secattr->domain, family); if (dom_entry == NULL) { ret_val = -ENOENT; goto socket_setattr_return; } switch (family) { case AF_INET: switch (dom_entry->def.type) { case NETLBL_NLTYPE_ADDRSELECT: ret_val = -EDESTADDRREQ; break; case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_sock_setattr(sk, dom_entry->def.cipso, secattr, sk_locked); break; case NETLBL_NLTYPE_UNLABELED: ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: switch (dom_entry->def.type) { case NETLBL_NLTYPE_ADDRSELECT: ret_val = -EDESTADDRREQ; break; case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_sock_setattr(sk, dom_entry->def.calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: ret_val = 0; break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } socket_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_sock_delattr - Delete all the NetLabel labels on a socket * @sk: the socket * * Description: * Remove all the NetLabel labeling from @sk. The caller is responsible for * ensuring that @sk is locked. * */ void netlbl_sock_delattr(struct sock *sk) { switch (sk->sk_family) { case AF_INET: cipso_v4_sock_delattr(sk); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: calipso_sock_delattr(sk); break; #endif /* IPv6 */ } } /** * netlbl_sock_getattr - Determine the security attributes of a sock * @sk: the sock * @secattr: the security attributes * * Description: * Examines the given sock to see if any NetLabel style labeling has been * applied to the sock, if so it parses the socket label and returns the * security attributes in @secattr. Returns zero on success, negative values * on failure. * */ int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { int ret_val; switch (sk->sk_family) { case AF_INET: ret_val = cipso_v4_sock_getattr(sk, secattr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ret_val = calipso_sock_getattr(sk, secattr); break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } return ret_val; } /** * netlbl_sk_lock_check - Check if the socket lock has been acquired. * @sk: the socket to be checked * * Return: true if socket @sk is locked or if lock debugging is disabled at * runtime or compile-time; false otherwise * */ #ifdef CONFIG_LOCKDEP bool netlbl_sk_lock_check(struct sock *sk) { if (debug_locks) return lockdep_sock_is_held(sk); return true; } #else bool netlbl_sk_lock_check(struct sock *sk) { return true; } #endif /** * netlbl_conn_setattr - Label a connected socket using the correct protocol * @sk: the socket to label * @addr: the destination address * @secattr: the security attributes * * Description: * Attach the correct label to the given connected socket using the security * attributes specified in @secattr. The caller is responsible for ensuring * that @sk is locked. Returns zero on success, negative values on failure. * */ int netlbl_conn_setattr(struct sock *sk, struct sockaddr *addr, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct sockaddr_in *addr4; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *addr6; #endif struct netlbl_dommap_def *entry; rcu_read_lock(); switch (addr->sa_family) { case AF_INET: addr4 = (struct sockaddr_in *)addr; entry = netlbl_domhsh_getentry_af4(secattr->domain, addr4->sin_addr.s_addr); if (entry == NULL) { ret_val = -ENOENT; goto conn_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_sock_setattr(sk, entry->cipso, secattr, netlbl_sk_lock_check(sk)); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ netlbl_sock_delattr(sk); ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: if (sk->sk_family != AF_INET6) { ret_val = -EAFNOSUPPORT; goto conn_setattr_return; } addr6 = (struct sockaddr_in6 *)addr; entry = netlbl_domhsh_getentry_af6(secattr->domain, &addr6->sin6_addr); if (entry == NULL) { ret_val = -ENOENT; goto conn_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_sock_setattr(sk, entry->calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ netlbl_sock_delattr(sk); ret_val = 0; break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } conn_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_req_setattr - Label a request socket using the correct protocol * @req: the request socket to label * @secattr: the security attributes * * Description: * Attach the correct label to the given socket using the security attributes * specified in @secattr. Returns zero on success, negative values on failure. * */ int netlbl_req_setattr(struct request_sock *req, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct netlbl_dommap_def *entry; struct inet_request_sock *ireq = inet_rsk(req); rcu_read_lock(); switch (req->rsk_ops->family) { case AF_INET: entry = netlbl_domhsh_getentry_af4(secattr->domain, ireq->ir_rmt_addr); if (entry == NULL) { ret_val = -ENOENT; goto req_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_req_setattr(req, entry->cipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: netlbl_req_delattr(req); ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: entry = netlbl_domhsh_getentry_af6(secattr->domain, &ireq->ir_v6_rmt_addr); if (entry == NULL) { ret_val = -ENOENT; goto req_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_req_setattr(req, entry->calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: netlbl_req_delattr(req); ret_val = 0; break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } req_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_req_delattr - Delete all the NetLabel labels on a socket * @req: the socket * * Description: * Remove all the NetLabel labeling from @req. * */ void netlbl_req_delattr(struct request_sock *req) { switch (req->rsk_ops->family) { case AF_INET: cipso_v4_req_delattr(req); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: calipso_req_delattr(req); break; #endif /* IPv6 */ } } /** * netlbl_skbuff_setattr - Label a packet using the correct protocol * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Attach the correct label to the given packet using the security attributes * specified in @secattr. Returns zero on success, negative values on failure. * */ int netlbl_skbuff_setattr(struct sk_buff *skb, u16 family, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *hdr4; #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *hdr6; #endif struct netlbl_dommap_def *entry; rcu_read_lock(); switch (family) { case AF_INET: hdr4 = ip_hdr(skb); entry = netlbl_domhsh_getentry_af4(secattr->domain, hdr4->daddr); if (entry == NULL) { ret_val = -ENOENT; goto skbuff_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ ret_val = cipso_v4_skbuff_delattr(skb); break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: hdr6 = ipv6_hdr(skb); entry = netlbl_domhsh_getentry_af6(secattr->domain, &hdr6->daddr); if (entry == NULL) { ret_val = -ENOENT; goto skbuff_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_skbuff_setattr(skb, entry->calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ ret_val = calipso_skbuff_delattr(skb); break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } skbuff_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_skbuff_getattr - Determine the security attributes of a packet * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Examines the given packet to see if a recognized form of packet labeling * is present, if so it parses the packet label and returns the security * attributes in @secattr. Returns zero on success, negative values on * failure. * */ int netlbl_skbuff_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr) { unsigned char *ptr; switch (family) { case AF_INET: ptr = cipso_v4_optptr(skb); if (ptr && cipso_v4_getattr(ptr, secattr) == 0) return 0; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ptr = calipso_optptr(skb); if (ptr && calipso_getattr(ptr, secattr) == 0) return 0; break; #endif /* IPv6 */ } return netlbl_unlabel_getattr(skb, family, secattr); } /** * netlbl_skbuff_err - Handle a LSM error on a sk_buff * @skb: the packet * @family: the family * @error: the error code * @gateway: true if host is acting as a gateway, false otherwise * * Description: * Deal with a LSM problem when handling the packet in @skb, typically this is * a permission denied problem (-EACCES). The correct action is determined * according to the packet's labeling protocol. * */ void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway) { switch (family) { case AF_INET: if (cipso_v4_optptr(skb)) cipso_v4_error(skb, error, gateway); break; } } /** * netlbl_cache_invalidate - Invalidate all of the NetLabel protocol caches * * Description: * For all of the NetLabel protocols that support some form of label mapping * cache, invalidate the cache. Returns zero on success, negative values on * error. * */ void netlbl_cache_invalidate(void) { cipso_v4_cache_invalidate(); #if IS_ENABLED(CONFIG_IPV6) calipso_cache_invalidate(); #endif /* IPv6 */ } /** * netlbl_cache_add - Add an entry to a NetLabel protocol cache * @skb: the packet * @family: the family * @secattr: the packet's security attributes * * Description: * Add the LSM security attributes for the given packet to the underlying * NetLabel protocol's label mapping cache. Returns zero on success, negative * values on error. * */ int netlbl_cache_add(const struct sk_buff *skb, u16 family, const struct netlbl_lsm_secattr *secattr) { unsigned char *ptr; if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0) return -ENOMSG; switch (family) { case AF_INET: ptr = cipso_v4_optptr(skb); if (ptr) return cipso_v4_cache_add(ptr, secattr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ptr = calipso_optptr(skb); if (ptr) return calipso_cache_add(ptr, secattr); break; #endif /* IPv6 */ } return -ENOMSG; } /* * Protocol Engine Functions */ /** * netlbl_audit_start - Start an audit message * @type: audit message type * @audit_info: NetLabel audit information * * Description: * Start an audit message using the type specified in @type and fill the audit * message with some fields common to all NetLabel audit messages. This * function should only be used by protocol engines, not LSMs. Returns a * pointer to the audit buffer on success, NULL on failure. * */ struct audit_buffer *netlbl_audit_start(int type, struct netlbl_audit *audit_info) { return netlbl_audit_start_common(type, audit_info); } EXPORT_SYMBOL(netlbl_audit_start); /* * Setup Functions */ /** * netlbl_init - Initialize NetLabel * * Description: * Perform the required NetLabel initialization before first use. * */ static int __init netlbl_init(void) { int ret_val; printk(KERN_INFO "NetLabel: Initializing\n"); printk(KERN_INFO "NetLabel: domain hash size = %u\n", (1 << NETLBL_DOMHSH_BITSIZE)); printk(KERN_INFO "NetLabel: protocols = UNLABELED CIPSOv4 CALIPSO\n"); ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE); if (ret_val != 0) goto init_failure; ret_val = netlbl_unlabel_init(NETLBL_UNLHSH_BITSIZE); if (ret_val != 0) goto init_failure; ret_val = netlbl_netlink_init(); if (ret_val != 0) goto init_failure; ret_val = netlbl_unlabel_defconf(); if (ret_val != 0) goto init_failure; printk(KERN_INFO "NetLabel: unlabeled traffic allowed by default\n"); return 0; init_failure: panic("NetLabel: failed to initialize properly (%d)\n", ret_val); } subsys_initcall(netlbl_init);
5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 // SPDX-License-Identifier: GPL-2.0-only #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/list_bl.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/workqueue.h> #include <linux/mbcache.h> /* * Mbcache is a simple key-value store. Keys need not be unique, however * key-value pairs are expected to be unique (we use this fact in * mb_cache_entry_delete_or_get()). * * Ext2 and ext4 use this cache for deduplication of extended attribute blocks. * Ext4 also uses it for deduplication of xattr values stored in inodes. * They use hash of data as a key and provide a value that may represent a * block or inode number. That's why keys need not be unique (hash of different * data may be the same). However user provided value always uniquely * identifies a cache entry. * * We provide functions for creation and removal of entries, search by key, * and a special "delete entry with given key-value pair" operation. Fixed * size hash table is used for fast key lookups. */ struct mb_cache { /* Hash table of entries */ struct hlist_bl_head *c_hash; /* log2 of hash table size */ int c_bucket_bits; /* Maximum entries in cache to avoid degrading hash too much */ unsigned long c_max_entries; /* Protects c_list, c_entry_count */ spinlock_t c_list_lock; struct list_head c_list; /* Number of entries in cache */ unsigned long c_entry_count; struct shrinker *c_shrink; /* Work for shrinking when the cache has too many entries */ struct work_struct c_shrink_work; }; static struct kmem_cache *mb_entry_cache; static unsigned long mb_cache_shrink(struct mb_cache *cache, unsigned long nr_to_scan); static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, u32 key) { return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; } /* * Number of entries to reclaim synchronously when there are too many entries * in cache */ #define SYNC_SHRINK_BATCH 64 /* * mb_cache_entry_create - create entry in cache * @cache - cache where the entry should be created * @mask - gfp mask with which the entry should be allocated * @key - key of the entry * @value - value of the entry * @reusable - is the entry reusable by others? * * Creates entry in @cache with key @key and value @value. The function returns * -EBUSY if entry with the same key and value already exists in cache. * Otherwise 0 is returned. */ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, u64 value, bool reusable) { struct mb_cache_entry *entry, *dup; struct hlist_bl_node *dup_node; struct hlist_bl_head *head; /* Schedule background reclaim if there are too many entries */ if (cache->c_entry_count >= cache->c_max_entries) schedule_work(&cache->c_shrink_work); /* Do some sync reclaim if background reclaim cannot keep up */ if (cache->c_entry_count >= 2*cache->c_max_entries) mb_cache_shrink(cache, SYNC_SHRINK_BATCH); entry = kmem_cache_alloc(mb_entry_cache, mask); if (!entry) return -ENOMEM; INIT_LIST_HEAD(&entry->e_list); /* * We create entry with two references. One reference is kept by the * hash table, the other reference is used to protect us from * mb_cache_entry_delete_or_get() until the entry is fully setup. This * avoids nesting of cache->c_list_lock into hash table bit locks which * is problematic for RT. */ atomic_set(&entry->e_refcnt, 2); entry->e_key = key; entry->e_value = value; entry->e_flags = 0; if (reusable) set_bit(MBE_REUSABLE_B, &entry->e_flags); head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { if (dup->e_key == key && dup->e_value == value) { hlist_bl_unlock(head); kmem_cache_free(mb_entry_cache, entry); return -EBUSY; } } hlist_bl_add_head(&entry->e_hash_list, head); hlist_bl_unlock(head); spin_lock(&cache->c_list_lock); list_add_tail(&entry->e_list, &cache->c_list); cache->c_entry_count++; spin_unlock(&cache->c_list_lock); mb_cache_entry_put(cache, entry); return 0; } EXPORT_SYMBOL(mb_cache_entry_create); void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry) { struct hlist_bl_head *head; head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head); hlist_bl_del(&entry->e_hash_list); hlist_bl_unlock(head); kmem_cache_free(mb_entry_cache, entry); } EXPORT_SYMBOL(__mb_cache_entry_free); /* * mb_cache_entry_wait_unused - wait to be the last user of the entry * * @entry - entry to work on * * Wait to be the last user of the entry. */ void mb_cache_entry_wait_unused(struct mb_cache_entry *entry) { wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2); } EXPORT_SYMBOL(mb_cache_entry_wait_unused); static struct mb_cache_entry *__entry_find(struct mb_cache *cache, struct mb_cache_entry *entry, u32 key) { struct mb_cache_entry *old_entry = entry; struct hlist_bl_node *node; struct hlist_bl_head *head; head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) node = entry->e_hash_list.next; else node = hlist_bl_first(head); while (node) { entry = hlist_bl_entry(node, struct mb_cache_entry, e_hash_list); if (entry->e_key == key && test_bit(MBE_REUSABLE_B, &entry->e_flags) && atomic_inc_not_zero(&entry->e_refcnt)) goto out; node = node->next; } entry = NULL; out: hlist_bl_unlock(head); if (old_entry) mb_cache_entry_put(cache, old_entry); return entry; } /* * mb_cache_entry_find_first - find the first reusable entry with the given key * @cache: cache where we should search * @key: key to look for * * Search in @cache for a reusable entry with key @key. Grabs reference to the * first reusable entry found and returns the entry. */ struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, u32 key) { return __entry_find(cache, NULL, key); } EXPORT_SYMBOL(mb_cache_entry_find_first); /* * mb_cache_entry_find_next - find next reusable entry with the same key * @cache: cache where we should search * @entry: entry to start search from * * Finds next reusable entry in the hash chain which has the same key as @entry. * If @entry is unhashed (which can happen when deletion of entry races with the * search), finds the first reusable entry in the hash chain. The function drops * reference to @entry and returns with a reference to the found entry. */ struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, struct mb_cache_entry *entry) { return __entry_find(cache, entry, entry->e_key); } EXPORT_SYMBOL(mb_cache_entry_find_next); /* * mb_cache_entry_get - get a cache entry by value (and key) * @cache - cache we work with * @key - key * @value - value */ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, u64 value) { struct hlist_bl_node *node; struct hlist_bl_head *head; struct mb_cache_entry *entry; head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(entry, node, head, e_hash_list) { if (entry->e_key == key && entry->e_value == value && atomic_inc_not_zero(&entry->e_refcnt)) goto out; } entry = NULL; out: hlist_bl_unlock(head); return entry; } EXPORT_SYMBOL(mb_cache_entry_get); /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users * @cache - cache we work with * @key - key * @value - value * * Remove entry from cache @cache with key @key and value @value. The removal * happens only if the entry is unused. The function returns NULL in case the * entry was successfully removed or there's no entry in cache. Otherwise the * function grabs reference of the entry that we failed to delete because it * still has users and return it. */ struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, u32 key, u64 value) { struct mb_cache_entry *entry; entry = mb_cache_entry_get(cache, key, value); if (!entry) return NULL; /* * Drop the ref we got from mb_cache_entry_get() and the initial hash * ref if we are the last user */ if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2) return entry; spin_lock(&cache->c_list_lock); if (!list_empty(&entry->e_list)) list_del_init(&entry->e_list); cache->c_entry_count--; spin_unlock(&cache->c_list_lock); __mb_cache_entry_free(cache, entry); return NULL; } EXPORT_SYMBOL(mb_cache_entry_delete_or_get); /* mb_cache_entry_touch - cache entry got used * @cache - cache the entry belongs to * @entry - entry that got used * * Marks entry as used to give hit higher chances of surviving in cache. */ void mb_cache_entry_touch(struct mb_cache *cache, struct mb_cache_entry *entry) { set_bit(MBE_REFERENCED_B, &entry->e_flags); } EXPORT_SYMBOL(mb_cache_entry_touch); static unsigned long mb_cache_count(struct shrinker *shrink, struct shrink_control *sc) { struct mb_cache *cache = shrink->private_data; return cache->c_entry_count; } /* Shrink number of entries in cache */ static unsigned long mb_cache_shrink(struct mb_cache *cache, unsigned long nr_to_scan) { struct mb_cache_entry *entry; unsigned long shrunk = 0; spin_lock(&cache->c_list_lock); while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list); /* Drop initial hash reference if there is no user */ if (test_bit(MBE_REFERENCED_B, &entry->e_flags) || atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) { clear_bit(MBE_REFERENCED_B, &entry->e_flags); list_move_tail(&entry->e_list, &cache->c_list); continue; } list_del_init(&entry->e_list); cache->c_entry_count--; spin_unlock(&cache->c_list_lock); __mb_cache_entry_free(cache, entry); shrunk++; cond_resched(); spin_lock(&cache->c_list_lock); } spin_unlock(&cache->c_list_lock); return shrunk; } static unsigned long mb_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { struct mb_cache *cache = shrink->private_data; return mb_cache_shrink(cache, sc->nr_to_scan); } /* We shrink 1/X of the cache when we have too many entries in it */ #define SHRINK_DIVISOR 16 static void mb_cache_shrink_worker(struct work_struct *work) { struct mb_cache *cache = container_of(work, struct mb_cache, c_shrink_work); mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); } /* * mb_cache_create - create cache * @bucket_bits: log2 of the hash table size * * Create cache for keys with 2^bucket_bits hash entries. */ struct mb_cache *mb_cache_create(int bucket_bits) { struct mb_cache *cache; unsigned long bucket_count = 1UL << bucket_bits; unsigned long i; cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL); if (!cache) goto err_out; cache->c_bucket_bits = bucket_bits; cache->c_max_entries = bucket_count << 4; INIT_LIST_HEAD(&cache->c_list); spin_lock_init(&cache->c_list_lock); cache->c_hash = kmalloc_array(bucket_count, sizeof(struct hlist_bl_head), GFP_KERNEL); if (!cache->c_hash) { kfree(cache); goto err_out; } for (i = 0; i < bucket_count; i++) INIT_HLIST_BL_HEAD(&cache->c_hash[i]); cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker"); if (!cache->c_shrink) { kfree(cache->c_hash); kfree(cache); goto err_out; } cache->c_shrink->count_objects = mb_cache_count; cache->c_shrink->scan_objects = mb_cache_scan; cache->c_shrink->private_data = cache; shrinker_register(cache->c_shrink); INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); return cache; err_out: return NULL; } EXPORT_SYMBOL(mb_cache_create); /* * mb_cache_destroy - destroy cache * @cache: the cache to destroy * * Free all entries in cache and cache itself. Caller must make sure nobody * (except shrinker) can reach @cache when calling this. */ void mb_cache_destroy(struct mb_cache *cache) { struct mb_cache_entry *entry, *next; shrinker_free(cache->c_shrink); /* * We don't bother with any locking. Cache must not be used at this * point. */ list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { list_del(&entry->e_list); WARN_ON(atomic_read(&entry->e_refcnt) != 1); mb_cache_entry_put(cache, entry); } kfree(cache->c_hash); kfree(cache); } EXPORT_SYMBOL(mb_cache_destroy); static int __init mbcache_init(void) { mb_entry_cache = KMEM_CACHE(mb_cache_entry, SLAB_RECLAIM_ACCOUNT); if (!mb_entry_cache) return -ENOMEM; return 0; } static void __exit mbcache_exit(void) { kmem_cache_destroy(mb_entry_cache); } module_init(mbcache_init) module_exit(mbcache_exit) MODULE_AUTHOR("Jan Kara <jack@suse.cz>"); MODULE_DESCRIPTION("Meta block cache (for extended attributes)"); MODULE_LICENSE("GPL");
75 64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #ifndef __DRM_ENCODER_H__ #define __DRM_ENCODER_H__ #include <linux/list.h> #include <linux/ctype.h> #include <drm/drm_crtc.h> #include <drm/drm_mode.h> #include <drm/drm_mode_object.h> #include <drm/drm_util.h> struct drm_encoder; /** * struct drm_encoder_funcs - encoder controls * * Encoders sit between CRTCs and connectors. */ struct drm_encoder_funcs { /** * @reset: * * Reset encoder hardware and software state to off. This function isn't * called by the core directly, only through drm_mode_config_reset(). * It's not a helper hook only for historical reasons. */ void (*reset)(struct drm_encoder *encoder); /** * @destroy: * * Clean up encoder resources. This is only called at driver unload time * through drm_mode_config_cleanup() since an encoder cannot be * hotplugged in DRM. */ void (*destroy)(struct drm_encoder *encoder); /** * @late_register: * * This optional hook can be used to register additional userspace * interfaces attached to the encoder. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. * * Returns: * * 0 on success, or a negative error code on failure. */ int (*late_register)(struct drm_encoder *encoder); /** * @early_unregister: * * This optional hook should be used to unregister the additional * userspace interfaces attached to the encoder from * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ void (*early_unregister)(struct drm_encoder *encoder); /** * @debugfs_init: * * Allows encoders to create encoder-specific debugfs files. */ void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root); }; /** * struct drm_encoder - central DRM encoder structure * @dev: parent DRM device * @head: list management * @base: base KMS object * @name: human readable name, can be overwritten by the driver * @funcs: control functions, can be NULL for simple managed encoders * @helper_private: mid-layer private data * * CRTCs drive pixels to encoders, which convert them into signals * appropriate for a given connector or set of connectors. */ struct drm_encoder { struct drm_device *dev; struct list_head head; struct drm_mode_object base; char *name; /** * @encoder_type: * * One of the DRM_MODE_ENCODER_<foo> types in drm_mode.h. The following * encoder types are defined thus far: * * - DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A. * * - DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort. * * - DRM_MODE_ENCODER_LVDS for display panels, or in general any panel * with a proprietary parallel connector. * * - DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, * Component, SCART). * * - DRM_MODE_ENCODER_VIRTUAL for virtual machine displays * * - DRM_MODE_ENCODER_DSI for panels connected using the DSI serial bus. * * - DRM_MODE_ENCODER_DPI for panels connected using the DPI parallel * bus. * * - DRM_MODE_ENCODER_DPMST for special fake encoders used to allow * mutliple DP MST streams to share one physical encoder. */ int encoder_type; /** * @index: Position inside the mode_config.list, can be used as an array * index. It is invariant over the lifetime of the encoder. */ unsigned index; /** * @possible_crtcs: Bitmask of potential CRTC bindings, using * drm_crtc_index() as the index into the bitfield. The driver must set * the bits for all &drm_crtc objects this encoder can be connected to * before calling drm_dev_register(). * * You will get a WARN if you get this wrong in the driver. * * Note that since CRTC objects can't be hotplugged the assigned indices * are stable and hence known before registering all objects. */ uint32_t possible_crtcs; /** * @possible_clones: Bitmask of potential sibling encoders for cloning, * using drm_encoder_index() as the index into the bitfield. The driver * must set the bits for all &drm_encoder objects which can clone a * &drm_crtc together with this encoder before calling * drm_dev_register(). Drivers should set the bit representing the * encoder itself, too. Cloning bits should be set such that when two * encoders can be used in a cloned configuration, they both should have * each another bits set. * * As an exception to the above rule if the driver doesn't implement * any cloning it can leave @possible_clones set to 0. The core will * automagically fix this up by setting the bit for the encoder itself. * * You will get a WARN if you get this wrong in the driver. * * Note that since encoder objects can't be hotplugged the assigned indices * are stable and hence known before registering all objects. */ uint32_t possible_clones; /** * @crtc: Currently bound CRTC, only really meaningful for non-atomic * drivers. Atomic drivers should instead check * &drm_connector_state.crtc. */ struct drm_crtc *crtc; /** * @bridge_chain: Bridges attached to this encoder. Drivers shall not * access this field directly. */ struct list_head bridge_chain; const struct drm_encoder_funcs *funcs; const struct drm_encoder_helper_funcs *helper_private; /** * @debugfs_entry: * * Debugfs directory for this CRTC. */ struct dentry *debugfs_entry; }; #define obj_to_encoder(x) container_of(x, struct drm_encoder, base) __printf(5, 6) int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...); __printf(5, 6) int drmm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...); __printf(6, 7) void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...); /** * drmm_encoder_alloc - Allocate and initialize an encoder * @dev: drm device * @type: the type of the struct which contains struct &drm_encoder * @member: the name of the &drm_encoder within @type * @funcs: callbacks for this encoder (optional) * @encoder_type: user visible type of the encoder * @name: printf style format string for the encoder name, or NULL for default name * * Allocates and initializes an encoder. Encoder should be subclassed as part of * driver encoder objects. Cleanup is automatically handled through registering * drm_encoder_cleanup() with drmm_add_action(). * * The @drm_encoder_funcs.destroy hook must be NULL. * * Returns: * Pointer to new encoder, or ERR_PTR on failure. */ #define drmm_encoder_alloc(dev, type, member, funcs, encoder_type, name, ...) \ ((type *)__drmm_encoder_alloc(dev, sizeof(type), \ offsetof(type, member), funcs, \ encoder_type, name, ##__VA_ARGS__)) /** * drmm_plain_encoder_alloc - Allocate and initialize an encoder * @dev: drm device * @funcs: callbacks for this encoder (optional) * @encoder_type: user visible type of the encoder * @name: printf style format string for the encoder name, or NULL for default name * * This is a simplified version of drmm_encoder_alloc(), which only allocates * and returns a struct drm_encoder instance, with no subclassing. * * Returns: * Pointer to the new drm_encoder struct, or ERR_PTR on failure. */ #define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \ ((struct drm_encoder *) \ __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \ 0, funcs, encoder_type, name, ##__VA_ARGS__)) /** * drm_encoder_index - find the index of a registered encoder * @encoder: encoder to find index for * * Given a registered encoder, return the index of that encoder within a DRM * device's list of encoders. */ static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder) { return encoder->index; } /** * drm_encoder_mask - find the mask of a registered encoder * @encoder: encoder to find mask for * * Given a registered encoder, return the mask bit of that encoder for an * encoder's possible_clones field. */ static inline u32 drm_encoder_mask(const struct drm_encoder *encoder) { return 1 << drm_encoder_index(encoder); } /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? * @encoder: encoder to test * @crtc: crtc to test * * Returns false if @encoder can't be driven by @crtc, true otherwise. */ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, struct drm_crtc *crtc) { return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); } /** * drm_encoder_find - find a &drm_encoder * @dev: DRM device * @file_priv: drm file to check for lease against. * @id: encoder id * * Returns the encoder with @id, NULL if it doesn't exist. Simple wrapper around * drm_mode_object_find(). */ static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev, struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_ENCODER); return mo ? obj_to_encoder(mo) : NULL; } void drm_encoder_cleanup(struct drm_encoder *encoder); /** * drm_for_each_encoder_mask - iterate over encoders specified by bitmask * @encoder: the loop cursor * @dev: the DRM device * @encoder_mask: bitmask of encoder indices * * Iterate over all encoders specified by bitmask. */ #define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \ list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ for_each_if ((encoder_mask) & drm_encoder_mask(encoder)) /** * drm_for_each_encoder - iterate over all encoders * @encoder: the loop cursor * @dev: the DRM device * * Iterate over all encoders of @dev. */ #define drm_for_each_encoder(encoder, dev) \ list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) #endif
3 1 3 2 1 3 1 1 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant Linux driver for the * - TwinhanDTV Alpha/MagicBoxII USB2.0 DVB-T receiver * - DigitalNow TinyUSB2 DVB-t receiver * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to Twinhan who kindly provided hardware and information. * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "vp7045.h" /* debug */ static int dvb_usb_vp7045_debug; module_param_named(debug,dvb_usb_vp7045_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info,xfer=2,rc=4 (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define deb_info(args...) dprintk(dvb_usb_vp7045_debug,0x01,args) #define deb_xfer(args...) dprintk(dvb_usb_vp7045_debug,0x02,args) #define deb_rc(args...) dprintk(dvb_usb_vp7045_debug,0x04,args) int vp7045_usb_op(struct dvb_usb_device *d, u8 cmd, u8 *out, int outlen, u8 *in, int inlen, int msec) { int ret = 0; u8 *buf = d->priv; buf[0] = cmd; if (outlen > 19) outlen = 19; if (inlen > 11) inlen = 11; ret = mutex_lock_interruptible(&d->usb_mutex); if (ret) return ret; if (out != NULL && outlen > 0) memcpy(&buf[1], out, outlen); deb_xfer("out buffer: "); debug_dump(buf, outlen+1, deb_xfer); if (usb_control_msg(d->udev, usb_sndctrlpipe(d->udev,0), TH_COMMAND_OUT, USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, buf, 20, 2000) != 20) { err("USB control message 'out' went wrong."); ret = -EIO; goto unlock; } msleep(msec); if (usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), TH_COMMAND_IN, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, buf, 12, 2000) != 12) { err("USB control message 'in' went wrong."); ret = -EIO; goto unlock; } deb_xfer("in buffer: "); debug_dump(buf, 12, deb_xfer); if (in != NULL && inlen > 0) memcpy(in, &buf[1], inlen); unlock: mutex_unlock(&d->usb_mutex); return ret; } u8 vp7045_read_reg(struct dvb_usb_device *d, u8 reg) { u8 obuf[2] = { 0 },v; obuf[1] = reg; vp7045_usb_op(d,TUNER_REG_READ,obuf,2,&v,1,30); return v; } static int vp7045_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 v = onoff; return vp7045_usb_op(d,SET_TUNER_POWER,&v,1,NULL,0,150); } static int vp7045_rc_query(struct dvb_usb_device *d) { int ret; u8 key; ret = vp7045_usb_op(d, RC_VAL_READ, NULL, 0, &key, 1, 20); if (ret) return ret; deb_rc("remote query key: %x\n", key); if (key != 0x44) { /* * The 8 bit address isn't available, but since the remote uses * address 0 we'll use that. nec repeats are ignored too, even * though the remote sends them. */ rc_keydown(d->rc_dev, RC_PROTO_NEC, RC_SCANCODE_NEC(0, key), 0); } return 0; } static int vp7045_read_eeprom(struct dvb_usb_device *d,u8 *buf, int len, int offset) { int i, ret; u8 v, br[2]; for (i=0; i < len; i++) { v = offset + i; ret = vp7045_usb_op(d, GET_EE_VALUE, &v, 1, br, 2, 5); if (ret) return ret; buf[i] = br[1]; } deb_info("VP7045 EEPROM read (offs: %d, len: %d) : ", offset, i); debug_dump(buf, i, deb_info); return 0; } static int vp7045_read_mac_addr(struct dvb_usb_device *d,u8 mac[6]) { return vp7045_read_eeprom(d,mac, 6, MAC_0_ADDR); } static int vp7045_frontend_attach(struct dvb_usb_adapter *adap) { u8 buf[255] = { 0 }; vp7045_usb_op(adap->dev,VENDOR_STRING_READ,NULL,0,buf,20,0); buf[10] = '\0'; deb_info("firmware says: %s ",buf); vp7045_usb_op(adap->dev,PRODUCT_STRING_READ,NULL,0,buf,20,0); buf[10] = '\0'; deb_info("%s ",buf); vp7045_usb_op(adap->dev,FW_VERSION_READ,NULL,0,buf,20,0); buf[10] = '\0'; deb_info("v%s\n",buf); /* Dump the EEPROM */ /* vp7045_read_eeprom(d,buf, 255, FX2_ID_ADDR); */ adap->fe_adap[0].fe = vp7045_fe_attach(adap->dev); return 0; } static struct dvb_usb_device_properties vp7045_properties; static int vp7045_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return dvb_usb_device_init(intf, &vp7045_properties, THIS_MODULE, NULL, adapter_nr); } enum { VISIONPLUS_VP7045_COLD, VISIONPLUS_VP7045_WARM, VISIONPLUS_TINYUSB2_COLD, VISIONPLUS_TINYUSB2_WARM, }; static const struct usb_device_id vp7045_usb_table[] = { DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7045_COLD), DVB_USB_DEV(VISIONPLUS, VISIONPLUS_VP7045_WARM), DVB_USB_DEV(VISIONPLUS, VISIONPLUS_TINYUSB2_COLD), DVB_USB_DEV(VISIONPLUS, VISIONPLUS_TINYUSB2_WARM), { } }; MODULE_DEVICE_TABLE(usb, vp7045_usb_table); static struct dvb_usb_device_properties vp7045_properties = { .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-vp7045-01.fw", .size_of_priv = 20, .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .frontend_attach = vp7045_frontend_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 7, .endpoint = 0x02, .u = { .bulk = { .buffersize = 4096, } } }, }}, } }, .power_ctrl = vp7045_power_ctrl, .read_mac_address = vp7045_read_mac_addr, .rc.core = { .rc_interval = 400, .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS, .module_name = KBUILD_MODNAME, .rc_query = vp7045_rc_query, .allowed_protos = RC_PROTO_BIT_NEC, .scancode_mask = 0xff, }, .num_device_descs = 2, .devices = { { .name = "Twinhan USB2.0 DVB-T receiver (TwinhanDTV Alpha/MagicBox II)", .cold_ids = { &vp7045_usb_table[VISIONPLUS_VP7045_COLD], NULL }, .warm_ids = { &vp7045_usb_table[VISIONPLUS_VP7045_WARM], NULL }, }, { .name = "DigitalNow TinyUSB 2 DVB-t Receiver", .cold_ids = { &vp7045_usb_table[VISIONPLUS_TINYUSB2_COLD], NULL }, .warm_ids = { &vp7045_usb_table[VISIONPLUS_TINYUSB2_WARM], NULL }, }, { NULL }, } }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver vp7045_usb_driver = { .name = "dvb_usb_vp7045", .probe = vp7045_usb_probe, .disconnect = dvb_usb_device_exit, .id_table = vp7045_usb_table, }; module_usb_driver(vp7045_usb_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
1 1 1 7 8 1 7 6 4 1 1 540 8 8 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2007-2012 Nicira, Inc. */ #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/skbuff.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/rtnetlink.h> #include "datapath.h" #include "vport-internal_dev.h" #include "vport-netdev.h" struct internal_dev { struct vport *vport; }; static struct vport_ops ovs_internal_vport_ops; static struct internal_dev *internal_dev_priv(struct net_device *netdev) { return netdev_priv(netdev); } /* Called with rcu_read_lock_bh. */ static netdev_tx_t internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) { int len, err; /* store len value because skb can be freed inside ovs_vport_receive() */ len = skb->len; rcu_read_lock(); err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL); rcu_read_unlock(); if (likely(!err)) dev_sw_netstats_tx_add(netdev, 1, len); else netdev->stats.tx_errors++; return NETDEV_TX_OK; } static int internal_dev_open(struct net_device *netdev) { netif_start_queue(netdev); return 0; } static int internal_dev_stop(struct net_device *netdev) { netif_stop_queue(netdev); return 0; } static void internal_dev_getinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { strscpy(info->driver, "openvswitch", sizeof(info->driver)); } static const struct ethtool_ops internal_dev_ethtool_ops = { .get_drvinfo = internal_dev_getinfo, .get_link = ethtool_op_get_link, }; static void internal_dev_destructor(struct net_device *dev) { struct vport *vport = ovs_internal_dev_get_vport(dev); ovs_vport_free(vport); } static const struct net_device_ops internal_dev_netdev_ops = { .ndo_open = internal_dev_open, .ndo_stop = internal_dev_stop, .ndo_start_xmit = internal_dev_xmit, .ndo_set_mac_address = eth_mac_addr, }; static struct rtnl_link_ops internal_dev_link_ops __read_mostly = { .kind = "openvswitch", }; static void do_setup(struct net_device *netdev) { ether_setup(netdev); netdev->max_mtu = ETH_MAX_MTU; netdev->netdev_ops = &internal_dev_netdev_ops; netdev->priv_flags &= ~IFF_TX_SKB_SHARING; netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | IFF_NO_QUEUE; netdev->lltx = true; netdev->needs_free_netdev = true; netdev->priv_destructor = NULL; netdev->ethtool_ops = &internal_dev_ethtool_ops; netdev->rtnl_link_ops = &internal_dev_link_ops; netdev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL; netdev->vlan_features = netdev->features; netdev->hw_enc_features = netdev->features; netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netdev->hw_features = netdev->features; eth_hw_addr_random(netdev); } static struct vport *internal_dev_create(const struct vport_parms *parms) { struct vport *vport; struct internal_dev *internal_dev; struct net_device *dev; int err; vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); goto error; } dev = alloc_netdev(sizeof(struct internal_dev), parms->name, NET_NAME_USER, do_setup); vport->dev = dev; if (!vport->dev) { err = -ENOMEM; goto error_free_vport; } dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev_net_set(vport->dev, ovs_dp_get_net(vport->dp)); dev->ifindex = parms->desired_ifindex; internal_dev = internal_dev_priv(vport->dev); internal_dev->vport = vport; /* Restrict bridge port to current netns. */ if (vport->port_no == OVSP_LOCAL) vport->dev->netns_immutable = true; rtnl_lock(); err = register_netdevice(vport->dev); if (err) goto error_unlock; vport->dev->priv_destructor = internal_dev_destructor; dev_set_promiscuity(vport->dev, 1); rtnl_unlock(); netif_start_queue(vport->dev); return vport; error_unlock: rtnl_unlock(); free_netdev(dev); error_free_vport: ovs_vport_free(vport); error: return ERR_PTR(err); } static void internal_dev_destroy(struct vport *vport) { netif_stop_queue(vport->dev); rtnl_lock(); dev_set_promiscuity(vport->dev, -1); /* unregister_netdevice() waits for an RCU grace period. */ unregister_netdevice(vport->dev); rtnl_unlock(); } static int internal_dev_recv(struct sk_buff *skb) { struct net_device *netdev = skb->dev; if (unlikely(!(netdev->flags & IFF_UP))) { kfree_skb(skb); netdev->stats.rx_dropped++; return NETDEV_TX_OK; } skb_dst_drop(skb); nf_reset_ct(skb); skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, netdev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); dev_sw_netstats_rx_add(netdev, skb->len); netif_rx(skb); return NETDEV_TX_OK; } static struct vport_ops ovs_internal_vport_ops = { .type = OVS_VPORT_TYPE_INTERNAL, .create = internal_dev_create, .destroy = internal_dev_destroy, .send = internal_dev_recv, }; int ovs_is_internal_dev(const struct net_device *netdev) { return netdev->netdev_ops == &internal_dev_netdev_ops; } struct vport *ovs_internal_dev_get_vport(struct net_device *netdev) { if (!ovs_is_internal_dev(netdev)) return NULL; return internal_dev_priv(netdev)->vport; } int ovs_internal_dev_rtnl_link_register(void) { int err; err = rtnl_link_register(&internal_dev_link_ops); if (err < 0) return err; err = ovs_vport_ops_register(&ovs_internal_vport_ops); if (err < 0) rtnl_link_unregister(&internal_dev_link_ops); return err; } void ovs_internal_dev_rtnl_link_unregister(void) { ovs_vport_ops_unregister(&ovs_internal_vport_ops); rtnl_link_unregister(&internal_dev_link_ops); }
36 35 1 37 36 36 37 37 35 1 36 3 37 37 37 37 60 43 20 10 3 2 21 42 5 21 61 4 60 1 1 60 60 59 53 21 59 60 2 51 51 48 15 56 23 23 23 23 23 9 9 30 30 9 11 2 2 30 30 30 30 30 30 30 30 38 38 38 53 53 53 52 23 14 14 74 69 5 1 1 1 1 1 1 1 158 159 11 1 32 32 32 18 14 31 14 18 11 21 14 17 32 31 20 2 18 20 19 19 19 19 20 20 20 20 19 1 1 20 3 17 1 1 19 20 20 20 20 3 17 20 20 20 20 20 20 20 20 20 20 20 20 1 19 20 20 2 18 20 20 1 19 20 3 17 20 20 3 17 20 20 20 20 20 20 20 20 1 19 20 20 20 3 17 20 1 1 13 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2025 Intel Corporation */ #include <net/mac80211.h> #include <linux/module.h> #include <linux/fips.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/bitmap.h> #include <linux/inetdevice.h> #include <net/net_namespace.h> #include <net/dropreason.h> #include <net/cfg80211.h> #include <net/addrconf.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wep.h" #include "led.h" #include "debugfs.h" void ieee80211_configure_filter(struct ieee80211_local *local) { u64 mc; unsigned int changed_flags; unsigned int new_flags = 0; if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) new_flags |= FIF_PROBE_REQ; if (local->fif_fcsfail) new_flags |= FIF_FCSFAIL; if (local->fif_plcpfail) new_flags |= FIF_PLCPFAIL; if (local->fif_control) new_flags |= FIF_CONTROL; if (local->fif_other_bss) new_flags |= FIF_OTHER_BSS; if (local->fif_pspoll) new_flags |= FIF_PSPOLL; if (local->rx_mcast_action_reg) new_flags |= FIF_MCAST_ACTION; spin_lock_bh(&local->filter_lock); changed_flags = local->filter_flags ^ new_flags; mc = drv_prepare_multicast(local, &local->mc_list); spin_unlock_bh(&local->filter_lock); /* be a bit nasty */ new_flags |= (1<<31); drv_configure_filter(local, changed_flags, &new_flags, mc); WARN_ON(new_flags & (1<<31)); local->filter_flags = new_flags & ~(1<<31); } static void ieee80211_reconfig_filter(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, reconfig_filter); ieee80211_configure_filter(local); } static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef = {}; struct cfg80211_chan_def *oper = NULL; enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC; u32 changed = 0; int power; u32 offchannel_flag; if (!local->emulate_chanctx) return 0; offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; if (ctx && !WARN_ON(!ctx->def.chan)) { oper = &ctx->def; if (ctx->rx_chains_static > 1) smps_mode = IEEE80211_SMPS_OFF; else if (ctx->rx_chains_dynamic > 1) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_STATIC; } if (local->scan_chandef.chan) { chandef = local->scan_chandef; } else if (local->tmp_channel) { chandef.chan = local->tmp_channel; chandef.width = NL80211_CHAN_WIDTH_20_NOHT; chandef.center_freq1 = chandef.chan->center_freq; chandef.freq1_offset = chandef.chan->freq_offset; } else if (oper) { chandef = *oper; } else { chandef = local->dflt_chandef; } if (WARN(!cfg80211_chandef_valid(&chandef), "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz", chandef.chan ? chandef.chan->center_freq : -1, chandef.chan ? chandef.chan->freq_offset : 0, chandef.width, chandef.center_freq1, chandef.freq1_offset, chandef.center_freq2)) return 0; if (!oper || !cfg80211_chandef_identical(&chandef, oper)) local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; else local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; /* force it also for scanning, since drivers might config differently */ if (offchannel_flag || local->scanning || local->in_reconfig || !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; } if (!conf_is_ht(&local->hw.conf)) { /* * mac80211.h documents that this is only valid * when the channel is set to an HT type, and * that otherwise STATIC is used. */ local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC; } else if (local->hw.conf.smps_mode != smps_mode) { local->hw.conf.smps_mode = smps_mode; changed |= IEEE80211_CONF_CHANGE_SMPS; } power = ieee80211_chandef_max_power(&chandef); if (local->user_power_level != IEEE80211_UNSET_POWER_LEVEL) power = min(local->user_power_level, power); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (sdata->vif.bss_conf.txpower == INT_MIN) continue; power = min(power, sdata->vif.bss_conf.txpower); } rcu_read_unlock(); if (local->hw.conf.power_level != power) { changed |= IEEE80211_CONF_CHANGE_POWER; local->hw.conf.power_level = power; } return changed; } int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx, u32 changed) { int ret = 0; might_sleep(); WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER | IEEE80211_CONF_CHANGE_SMPS)); if (changed && local->open_count) { ret = drv_config(local, radio_idx, changed); /* * Goal: * HW reconfiguration should never fail, the driver has told * us what it can support so it should live up to that promise. * * Current status: * rfkill is not integrated with mac80211 and a * configuration command can thus fail if hardware rfkill * is enabled * * FIXME: integrate rfkill with mac80211 and then add this * WARN_ON() back * */ /* WARN_ON(ret); */ } return ret; } /* for scanning, offchannel and chanctx emulation only */ static int _ieee80211_hw_conf_chan(struct ieee80211_local *local, struct ieee80211_chanctx_conf *ctx) { u32 changed; if (!local->open_count) return 0; changed = ieee80211_calc_hw_conf_chan(local, ctx); if (!changed) return 0; return drv_config(local, -1, changed); } int ieee80211_hw_conf_chan(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx; ctx = list_first_entry_or_null(&local->chanctx_list, struct ieee80211_chanctx, list); return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL); } void ieee80211_hw_conf_init(struct ieee80211_local *local) { u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER | IEEE80211_CONF_CHANGE_SMPS); if (WARN_ON(!local->open_count)) return; if (local->emulate_chanctx) { struct ieee80211_chanctx *ctx; ctx = list_first_entry_or_null(&local->chanctx_list, struct ieee80211_chanctx, list); changed |= ieee80211_calc_hw_conf_chan(local, ctx ? &ctx->conf : NULL); } WARN_ON(drv_config(local, -1, changed)); } int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = ctx->radar_enabled; return _ieee80211_hw_conf_chan(local, ctx); } EXPORT_SYMBOL(ieee80211_emulate_add_chanctx); void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = false; _ieee80211_hw_conf_chan(local, NULL); } EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx); void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = ctx->radar_enabled; _ieee80211_hw_conf_chan(local, ctx); } EXPORT_SYMBOL(ieee80211_emulate_change_chanctx); int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct ieee80211_local *local = hw_to_local(hw); if (n_vifs <= 0) return -EINVAL; local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled; _ieee80211_hw_conf_chan(local, vifs[0].new_ctx); return 0; } EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx); #define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\ BSS_CHANGED_IDLE |\ BSS_CHANGED_PS |\ BSS_CHANGED_IBSS |\ BSS_CHANGED_ARP_FILTER |\ BSS_CHANGED_SSID |\ BSS_CHANGED_MLD_VALID_LINKS |\ BSS_CHANGED_MLD_TTLM) void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_local *local = sdata->local; might_sleep(); WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED) && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_OCB)) return; if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && !sdata->vif.bss_conf.mu_mimo_owner && !(changed & BSS_CHANGED_TXPOWER)))) return; if (!check_sdata_in_driver(sdata)) return; if (changed & BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & BSS_CHANGED_VIF_CFG_FLAGS; trace_drv_vif_cfg_changed(local, sdata, changed); if (local->ops->vif_cfg_changed) local->ops->vif_cfg_changed(&local->hw, &sdata->vif, ch); } if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS; trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf, changed); if (local->ops->link_info_changed) local->ops->link_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, ch); } if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, changed); trace_drv_return_void(local); } void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_local *local = sdata->local; WARN_ON_ONCE(changed & ~BSS_CHANGED_VIF_CFG_FLAGS); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; drv_vif_cfg_changed(local, sdata, changed); } void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, u64 changed) { struct ieee80211_local *local = sdata->local; WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; if (!check_sdata_in_driver(sdata)) return; drv_link_info_changed(local, sdata, link->conf, link->link_id, changed); } u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) { sdata->vif.bss_conf.use_cts_prot = false; sdata->vif.bss_conf.use_short_preamble = false; sdata->vif.bss_conf.use_short_slot = false; return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT; } /* context: requires softirqs disabled */ void ieee80211_handle_queued_frames(struct ieee80211_local *local) { struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue)) || (skb = skb_dequeue(&local->skb_queue_unreliable))) { switch (skb->pkt_type) { case IEEE80211_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee80211_rx(&local->hw, skb); break; case IEEE80211_TX_STATUS_MSG: skb->pkt_type = 0; ieee80211_tx_status_skb(&local->hw, skb); break; default: WARN(1, "mac80211: Packet is of unknown type %d\n", skb->pkt_type); dev_kfree_skb(skb); break; } } } static void ieee80211_tasklet_handler(struct tasklet_struct *t) { struct ieee80211_local *local = from_tasklet(local, t, tasklet); ieee80211_handle_queued_frames(local); } static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, restart_work); struct ieee80211_sub_if_data *sdata; int ret; flush_workqueue(local->workqueue); rtnl_lock(); /* we might do interface manipulations, so need both */ wiphy_lock(local->hw.wiphy); wiphy_work_flush(local->hw.wiphy, NULL); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); list_for_each_entry(sdata, &local->interfaces, list) { /* * XXX: there may be more work for other vif types and even * for station mode: a good thing would be to run most of * the iface type's dependent _stop (ieee80211_mg_stop, * ieee80211_ibss_stop) etc... * For now, fix only the specific bug that was seen: race * between csa_connection_drop_work and us. */ if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* * This worker is scheduled from the iface worker that * runs on mac80211's workqueue, so we can't be * scheduling this worker after the cancel right here. * The exception is ieee80211_chswitch_done. * Then we can have a race... */ wiphy_work_cancel(local->hw.wiphy, &sdata->u.mgd.csa_connection_drop_work); if (sdata->vif.bss_conf.csa_active) ieee80211_sta_connection_lost(sdata, WLAN_REASON_UNSPECIFIED, false); } wiphy_delayed_work_flush(local->hw.wiphy, &sdata->dec_tailroom_needed_wk); } ieee80211_scan_cancel(local); /* make sure any new ROC will consider local->in_reconfig */ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work); wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done); /* wait for all packet processing to be done */ synchronize_net(); ret = ieee80211_reconfig(local); wiphy_unlock(local->hw.wiphy); if (ret) cfg80211_shutdown_all_interfaces(local->hw.wiphy); rtnl_unlock(); } void ieee80211_restart_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_restart_hw(local); wiphy_info(hw->wiphy, "Hardware restart was requested\n"); /* use this reason, ieee80211_reconfig will unblock it */ ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); /* * Stop all Rx during the reconfig. We don't want state changes * or driver callbacks while this is in progress. */ local->in_reconfig = true; barrier(); queue_work(system_freezable_wq, &local->restart_work); } EXPORT_SYMBOL(ieee80211_restart_hw); #ifdef CONFIG_INET static int ieee80211_ifa_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct in_ifaddr *ifa = arg; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa_notifier); struct net_device *ndev = ifa->ifa_dev->dev; struct wireless_dev *wdev = ndev->ieee80211_ptr; struct in_device *idev; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif_cfg *vif_cfg; struct ieee80211_if_managed *ifmgd; int c = 0; /* Make sure it's our interface that got changed */ if (!wdev) return NOTIFY_DONE; if (wdev->wiphy != local->hw.wiphy || !wdev->registered) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); vif_cfg = &sdata->vif.cfg; /* ARP filtering is only supported in managed mode */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; idev = __in_dev_get_rtnl(sdata->dev); if (!idev) return NOTIFY_DONE; ifmgd = &sdata->u.mgd; /* * The nested here is needed to convince lockdep that this is * all OK. Yes, we lock the wiphy mutex here while we already * hold the notifier rwsem, that's the normal case. And yes, * we also acquire the notifier rwsem again when unregistering * a netdev while we already hold the wiphy mutex, so it does * look like a typical ABBA deadlock. * * However, both of these things happen with the RTNL held * already. Therefore, they can't actually happen, since the * lock orders really are ABC and ACB, which is fine due to * the RTNL (A). * * We still need to prevent recursion, which is accomplished * by the !wdev->registered check above. */ mutex_lock_nested(&local->hw.wiphy->mtx, 1); __acquire(&local->hw.wiphy->mtx); /* Copy the addresses to the vif config list */ ifa = rtnl_dereference(idev->ifa_list); while (ifa) { if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) vif_cfg->arp_addr_list[c] = ifa->ifa_address; ifa = rtnl_dereference(ifa->ifa_next); c++; } vif_cfg->arp_addr_cnt = c; /* Configure driver only if associated (which also implies it is up) */ if (ifmgd->associated) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER); wiphy_unlock(local->hw.wiphy); return NOTIFY_OK; } #endif #if IS_ENABLED(CONFIG_IPV6) static int ieee80211_ifa6_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; struct inet6_dev *idev = ifa->idev; struct net_device *ndev = ifa->idev->dev; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa6_notifier); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_sub_if_data *sdata; /* Make sure it's our interface that got changed */ if (!wdev || wdev->wiphy != local->hw.wiphy) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); /* * For now only support station mode. This is mostly because * doing AP would have to handle AP_VLAN in some way ... */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; drv_ipv6_addr_change(local, sdata, idev); return NOTIFY_OK; } #endif /* There isn't a lot of sense in it, but you can transmit anything you like */ static const struct ieee80211_txrx_stypes ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_ADHOC] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_STATION] = { .tx = 0xffff, /* * To support Pre Association Security Negotiation (PASN) while * already associated to one AP, allow user space to register to * Rx authentication frames, so that the user space logic would * be able to receive/handle authentication frames from a * different AP as part of PASN. * It is expected that user space would intelligently register * for Rx authentication frames, i.e., only when PASN is used * and configure a match filter only for PASN authentication * algorithm, as otherwise the MLME functionality of mac80211 * would be broken. */ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_AP] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_AP_VLAN] = { /* copy AP */ .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_P2P_GO] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_MESH_POINT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4), }, [NL80211_IFTYPE_P2P_DEVICE] = { .tx = 0xffff, /* * To support P2P PASN pairing let user space register to rx * also AUTH frames on P2P device interface. */ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4), }, }; static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | IEEE80211_HT_AMPDU_PARM_DENSITY, .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_RX_STBC | IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_40MHZ_INTOLERANT), .mcs = { .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, }, }; static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { .vht_cap_info = cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_RXSTBC_MASK | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK), .supp_mcs = { .rx_mcs_map = cpu_to_le16(~0), .tx_mcs_map = cpu_to_le16(~0), }, }; struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, const struct ieee80211_ops *ops, const char *requested_name) { struct ieee80211_local *local; int priv_size, i; struct wiphy *wiphy; bool emulate_chanctx; if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || !ops->add_interface || !ops->remove_interface || !ops->configure_filter || !ops->wake_tx_queue)) return NULL; if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) return NULL; if (WARN_ON(!!ops->link_info_changed != !!ops->vif_cfg_changed || (ops->link_info_changed && ops->bss_info_changed))) return NULL; /* check all or no channel context operations exist */ if (ops->add_chanctx == ieee80211_emulate_add_chanctx && ops->remove_chanctx == ieee80211_emulate_remove_chanctx && ops->change_chanctx == ieee80211_emulate_change_chanctx) { if (WARN_ON(ops->assign_vif_chanctx || ops->unassign_vif_chanctx)) return NULL; emulate_chanctx = true; } else { if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx || ops->remove_chanctx == ieee80211_emulate_remove_chanctx || ops->change_chanctx == ieee80211_emulate_change_chanctx)) return NULL; if (WARN_ON(!ops->add_chanctx || !ops->remove_chanctx || !ops->change_chanctx || !ops->assign_vif_chanctx || !ops->unassign_vif_chanctx)) return NULL; emulate_chanctx = false; } /* Ensure 32-byte alignment of our private data and hw private data. * We use the wiphy priv data for both our ieee80211_local and for * the driver's private data * * In memory it'll be like this: * * +-------------------------+ * | struct wiphy | * +-------------------------+ * | struct ieee80211_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name); if (!wiphy) return NULL; wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes; wiphy->privid = mac80211_wiphy_privid; wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION | WIPHY_FLAG_REPORTS_OBSS | WIPHY_FLAG_OFFCHAN_TX; if (emulate_chanctx || ops->remain_on_channel) wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | NL80211_FEATURE_SAE | NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | NL80211_FEATURE_MAC_ON_CREATE | NL80211_FEATURE_USERSPACE_MPM | NL80211_FEATURE_FULL_AP_CLIENT_STATE; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE); if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_AP_SCAN; /* * if the driver behaves correctly using the probe request * (template) from mac80211, then both of these should be * supported even with hw scan - but let drivers opt in. */ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); } if (!ops->set_key) { wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT); } wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM); wiphy->bss_priv_size = sizeof(struct ieee80211_bss); local = wiphy_priv(wiphy); if (sta_info_init(local)) goto err_free; local->hw.wiphy = wiphy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); local->ops = ops; local->emulate_chanctx = emulate_chanctx; if (emulate_chanctx) ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA); /* * We need a bit of data queued to build aggregates properly, so * instruct the TCP stack to allow more than a single ms of data * to be queued in the stack. The value is a bit-shift of 1 * second, so 7 is ~8ms of queued data. Only affects local TCP * sockets. * This is the default, anyhow - drivers may need to override it * for local reasons (longer buffers, longer completion time, or * similar). */ local->hw.tx_sk_pacing_shift = 7; /* set up some defaults */ local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_BW; local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; local->hw.uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->hw.uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; local->hw.max_mtu = IEEE80211_MAX_DATA_LEN; local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask; local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; wiphy->extended_capabilities = local->ext_capa; wiphy->extended_capabilities_mask = local->ext_capa; wiphy->extended_capabilities_len = ARRAY_SIZE(local->ext_capa); INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->mon_list); __hw_addr_init(&local->mc_list); mutex_init(&local->iflist_mtx); spin_lock_init(&local->filter_lock); spin_lock_init(&local->rx_path_lock); spin_lock_init(&local->queue_stop_reason_lock); for (i = 0; i < IEEE80211_NUM_ACS; i++) { INIT_LIST_HEAD(&local->active_txqs[i]); spin_lock_init(&local->active_txq_lock[i]); local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; local->aql_txq_limit_high[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; atomic_set(&local->aql_ac_pending_airtime[i], 0); } local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; local->aql_threshold = IEEE80211_AQL_THRESHOLD; atomic_set(&local->aql_total_pending_airtime, 0); spin_lock_init(&local->handle_wake_tx_queue_lock); INIT_LIST_HEAD(&local->chanctx_list); wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work); INIT_WORK(&local->restart_work, ieee80211_restart_work); wiphy_work_init(&local->radar_detected_work, ieee80211_dfs_radar_detected_work); wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter); wiphy_work_init(&local->dynamic_ps_enable_work, ieee80211_dynamic_ps_enable_work); wiphy_work_init(&local->dynamic_ps_disable_work, ieee80211_dynamic_ps_disable_work); timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0); wiphy_work_init(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); spin_lock_init(&local->ack_status_lock); idr_init(&local->ack_status_frames); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_head_init(&local->pending[i]); atomic_set(&local->agg_queue_stop[i], 0); } tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending); tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs); tasklet_setup(&local->tasklet, ieee80211_tasklet_handler); skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); ieee80211_alloc_led_names(local); ieee80211_roc_setup(local); local->hw.radiotap_timestamp.units_pos = -1; local->hw.radiotap_timestamp.accuracy = -1; return &local->hw; err_free: wiphy_free(wiphy); return NULL; } EXPORT_SYMBOL(ieee80211_alloc_hw_nm); static int ieee80211_init_cipher_suites(struct ieee80211_local *local) { bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); static const u32 cipher_suites[] = { /* keep WEP and TKIP first, they may be removed below */ WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, /* keep last -- depends on hw flags! */ WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, }; if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) && fips_enabled) { dev_err(local->hw.wiphy->dev.parent, "Drivers with SW_CRYPTO_CONTROL cannot work with FIPS\n"); return -EINVAL; } if (WARN_ON(ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) && !local->hw.wiphy->cipher_suites)) return -EINVAL; if (fips_enabled || !local->hw.wiphy->cipher_suites) { /* assign the (software supported and perhaps offloaded) * cipher suites */ local->hw.wiphy->cipher_suites = cipher_suites; local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); if (!have_mfp) local->hw.wiphy->n_cipher_suites -= 4; /* FIPS does not permit the use of RC4 */ if (fips_enabled) { local->hw.wiphy->cipher_suites += 3; local->hw.wiphy->n_cipher_suites -= 3; } } return 0; } static bool ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb) { int i, j; for (i = 0; i < n_comb; i++, c++) { /* DFS is not supported with multi-channel combinations yet */ if (c->radar_detect_widths && c->num_different_channels > 1) return false; /* mac80211 doesn't support more than one IBSS interface */ for (j = 0; j < c->n_limits; j++) if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && c->limits[j].max > 1) return false; } return true; } int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); int result, i; enum nl80211_band band; int channels, max_bitrates; bool supp_ht, supp_vht, supp_he, supp_eht; struct cfg80211_chan_def dflt_chandef = {}; if (ieee80211_hw_check(hw, QUEUE_CONTROL) && (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || local->hw.offchannel_tx_hw_queue >= local->hw.queues)) return -EINVAL; if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && (!local->ops->tdls_channel_switch || !local->ops->tdls_cancel_channel_switch || !local->ops->tdls_recv_channel_switch)) return -EOPNOTSUPP; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_TX_FRAG) && !local->ops->set_frag_threshold)) return -EINVAL; if (WARN_ON(local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN) && (!local->ops->start_nan || !local->ops->stop_nan))) return -EINVAL; if (hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) { /* * For drivers capable of doing MLO, assume modern driver * or firmware facilities, so software doesn't have to do * as much, e.g. monitoring beacons would be hard if we * might not even know which link is active at which time. */ if (WARN_ON(local->emulate_chanctx)) return -EINVAL; if (WARN_ON(!local->ops->link_info_changed)) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, HAS_RATE_CONTROL))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, AMPDU_AGGREGATION))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, HOST_BROADCAST_PS_BUFFERING))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_PS) && (!ieee80211_hw_check(hw, SUPPORTS_DYNAMIC_PS) || ieee80211_hw_check(hw, PS_NULLFUNC_STACK)))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, TIMING_BEACON_ONLY))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS))) return -EINVAL; } #ifdef CONFIG_PM if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; #endif if (local->emulate_chanctx) { for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *comb; comb = &local->hw.wiphy->iface_combinations[i]; if (comb->num_different_channels > 1) return -EINVAL; } } if (hw->wiphy->n_radio) { for (i = 0; i < hw->wiphy->n_radio; i++) { const struct wiphy_radio *radio = &hw->wiphy->radio[i]; if (!ieee80211_ifcomb_check(radio->iface_combinations, radio->n_iface_combinations)) return -EINVAL; } } else { if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations, hw->wiphy->n_iface_combinations)) return -EINVAL; } /* Only HW csum features are currently compatible with mac80211 */ if (WARN_ON(hw->netdev_features & ~MAC80211_SUPPORTED_FEATURES)) return -EINVAL; if (hw->max_report_rates == 0) hw->max_report_rates = hw->max_rates; local->rx_chains = 1; /* * generic code guarantees at least one band, * set this very early because much code assumes * that hw.conf.channel is assigned */ channels = 0; max_bitrates = 0; supp_ht = false; supp_vht = false; supp_he = false; supp_eht = false; for (band = 0; band < NUM_NL80211_BANDS; band++) { const struct ieee80211_sband_iftype_data *iftd; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; if (!dflt_chandef.chan) { /* * Assign the first enabled channel to dflt_chandef * from the list of channels */ for (i = 0; i < sband->n_channels; i++) if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED)) break; /* if none found then use the first anyway */ if (i == sband->n_channels) i = 0; cfg80211_chandef_create(&dflt_chandef, &sband->channels[i], NL80211_CHAN_NO_HT); /* init channel we're on */ local->monitor_chanreq.oper = dflt_chandef; if (local->emulate_chanctx) { local->dflt_chandef = dflt_chandef; local->hw.conf.chandef = dflt_chandef; } } channels += sband->n_channels; /* * Due to the way the aggregation code handles this and it * being an HT capability, we can't really support delayed * BA in MLO (yet). */ if (WARN_ON(sband->ht_cap.ht_supported && (sband->ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) && hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; if (max_bitrates < sband->n_bitrates) max_bitrates = sband->n_bitrates; supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; for_each_sband_iftype_data(sband, i, iftd) { u8 he_40_mhz_cap; supp_he = supp_he || iftd->he_cap.has_he; supp_eht = supp_eht || iftd->eht_cap.has_eht; if (band == NL80211_BAND_2GHZ) he_40_mhz_cap = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; else he_40_mhz_cap = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; /* currently no support for HE client where HT has 40 MHz but not HT */ if (iftd->he_cap.has_he && iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT)) && sband->ht_cap.ht_supported && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap)) return -EINVAL; /* no support for per-band vendor elems with MLO */ if (WARN_ON(iftd->vendor_elems.len && hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; } /* HT, VHT, HE require QoS, thus >= 4 queues */ if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS && (supp_ht || supp_vht || supp_he))) return -EINVAL; /* EHT requires HE support */ if (WARN_ON(supp_eht && !supp_he)) return -EINVAL; if (!sband->ht_cap.ht_supported) continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ local->rx_chains = max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), local->rx_chains); /* no need to mask, SM_PS_DISABLED has all bits set */ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN. * drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN * based on their support to transmit SW encrypted packets. */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) && !ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); } /* mac80211 always supports monitor */ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); local->int_scan_req = kzalloc(struct_size(local->int_scan_req, channels, channels), GFP_KERNEL); if (!local->int_scan_req) return -ENOMEM; eth_broadcast_addr(local->int_scan_req->bssid); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; local->int_scan_req->rates[band] = (u32) -1; } #ifndef CONFIG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); #endif /* if the underlying driver supports mesh, mac80211 will (at least) * provide routing of mesh authentication frames to userspace */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH; /* mac80211 supports control port protocol changing */ local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; } else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; if (hw->max_signal <= 0) { result = -EINVAL; goto fail_workqueue; } } /* Mac80211 and therefore all drivers using SW crypto only * are able to handle PTK rekeys and Extended Key ID. */ if (!local->ops->set_key) { wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); } if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_ADHOC)) wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_DEL_IBSS_STA); /* * Calculate scan IE length -- we need this to alloc * memory and to subtract from the driver limit. It * includes the DS Params, (extended) supported rates, and HT * information -- SSID is the driver's responsibility. */ local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + 3 /* DS Params */; if (supp_ht) local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); if (supp_vht) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); /* * HE cap element is variable in size - set len to allow max size */ if (supp_he) { local->scan_ies_len += 3 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + IEEE80211_HE_PPE_THRES_MAX_LEN; if (supp_eht) local->scan_ies_len += 3 + sizeof(struct ieee80211_eht_cap_elem) + sizeof(struct ieee80211_eht_mcs_nss_supp) + IEEE80211_EHT_PPE_THRES_MAX_LEN; } if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; } /* * If the driver supports any scan IEs, then assume the * limit includes the IEs mac80211 will add, otherwise * leave it at zero and let the driver sort it out; we * still pass our IEs to the driver but userspace will * not be allowed to in that case. */ if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; result = ieee80211_init_cipher_suites(local); if (result < 0) goto fail_workqueue; if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; /* mac80211 based drivers don't support internal TDLS setup */ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; /* mac80211 supports eCSA, if the driver supports STA CSA at all */ if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; /* mac80211 supports multi BSSID, if the driver supports it */ if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) { local->hw.wiphy->support_mbssid = true; if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID)) local->hw.wiphy->support_only_he_mbssid = true; else local->ext_capa[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; } local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CNTDWN_COUNTERS_NUM; /* * We use the number of queues for feature tests (QoS, HT) internally * so restrict them appropriately. */ if (hw->queues > IEEE80211_MAX_QUEUES) hw->queues = IEEE80211_MAX_QUEUES; local->workqueue = alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy)); if (!local->workqueue) { result = -ENOMEM; goto fail_workqueue; } /* * The hardware needs headroom for sending the frame, * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, IEEE80211_TX_STATUS_HEADROOM); /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default */ if (local->hw.max_listen_interval == 0) local->hw.max_listen_interval = 5; local->hw.conf.listen_interval = local->hw.max_listen_interval; local->dynamic_ps_forced_timeout = -1; if (!local->hw.max_nan_de_entries) local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID; if (!local->hw.weight_multiplier) local->hw.weight_multiplier = 1; ieee80211_wep_init(local); local->hw.conf.flags = IEEE80211_CONF_IDLE; ieee80211_led_init(local); result = ieee80211_txq_setup_flows(local); if (result) goto fail_flows; rtnl_lock(); result = ieee80211_init_rate_ctrl_alg(local, hw->rate_control_algorithm); rtnl_unlock(); if (result < 0) { wiphy_debug(local->hw.wiphy, "Failed to initialize rate control algorithm\n"); goto fail_rate; } if (local->rate_ctrl) { clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags); if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW) ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); } /* * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE, * or have it when we don't, copy the sband structure and set/clear it. * This is necessary because rate scaling algorithms could be switched * and have different support values. * Print a message so that in the common case the reallocation can be * avoided. */ BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated)); for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; bool local_cap, ie_cap; local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW); sband = local->hw.wiphy->bands[band]; if (!sband || !sband->vht_cap.vht_supported) continue; ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)); if (local_cap == ie_cap) continue; sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL); if (!sband) { result = -ENOMEM; goto fail_rate; } wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n", band); sband->vht_cap.vht_mcs.tx_highest ^= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); local->hw.wiphy->bands[band] = sband; local->sband_allocated |= BIT(band); } result = wiphy_register(local->hw.wiphy); if (result < 0) goto fail_wiphy_register; debugfs_hw_add(local); rate_control_add_debugfs(local); ieee80211_check_wbrf_support(local); rtnl_lock(); wiphy_lock(hw->wiphy); /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && !ieee80211_hw_check(hw, NO_AUTO_VIF)) { struct vif_params params = {0}; result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL, NL80211_IFTYPE_STATION, &params); if (result) wiphy_warn(local->hw.wiphy, "Failed to add default virtual iface\n"); } wiphy_unlock(hw->wiphy); rtnl_unlock(); #ifdef CONFIG_INET local->ifa_notifier.notifier_call = ieee80211_ifa_changed; result = register_inetaddr_notifier(&local->ifa_notifier); if (result) goto fail_ifa; #endif #if IS_ENABLED(CONFIG_IPV6) local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed; result = register_inet6addr_notifier(&local->ifa6_notifier); if (result) goto fail_ifa6; #endif return 0; #if IS_ENABLED(CONFIG_IPV6) fail_ifa6: #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #endif #if defined(CONFIG_INET) || defined(CONFIG_IPV6) fail_ifa: #endif wiphy_unregister(local->hw.wiphy); fail_wiphy_register: rtnl_lock(); rate_control_deinitialize(local); ieee80211_remove_interfaces(local); rtnl_unlock(); fail_rate: ieee80211_txq_teardown_flows(local); fail_flows: ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: kfree(local->int_scan_req); return result; } EXPORT_SYMBOL(ieee80211_register_hw); void ieee80211_unregister_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); tasklet_kill(&local->tx_pending_tasklet); tasklet_kill(&local->tasklet); #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&local->ifa6_notifier); #endif rtnl_lock(); /* * At this point, interface list manipulations are fine * because the driver cannot be handing us frames any * more and the tasklet is killed. */ ieee80211_remove_interfaces(local); ieee80211_txq_teardown_flows(local); wiphy_lock(local->hw.wiphy); wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work); wiphy_work_cancel(local->hw.wiphy, &local->reconfig_filter); wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work); wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work); wiphy_unlock(local->hw.wiphy); rtnl_unlock(); cancel_work_sync(&local->restart_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); if (skb_queue_len(&local->skb_queue) || skb_queue_len(&local->skb_queue_unreliable)) wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); wiphy_unregister(local->hw.wiphy); destroy_workqueue(local->workqueue); ieee80211_led_exit(local); kfree(local->int_scan_req); } EXPORT_SYMBOL(ieee80211_unregister_hw); static int ieee80211_free_ack_frame(int id, void *p, void *data) { WARN_ONCE(1, "Have pending ack frames!\n"); kfree_skb(p); return 0; } void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); enum nl80211_band band; mutex_destroy(&local->iflist_mtx); idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); idr_destroy(&local->ack_status_frames); sta_info_stop(local); ieee80211_free_led_names(local); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!(local->sband_allocated & BIT(band))) continue; kfree(local->hw.wiphy->bands[band]); } wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); #define V(x) #x, static const char * const drop_reasons_unusable[] = { [0] = "RX_DROP_UNUSABLE", MAC80211_DROP_REASONS_UNUSABLE(V) #undef V }; static struct drop_reason_list drop_reason_list_unusable = { .reasons = drop_reasons_unusable, .n_reasons = ARRAY_SIZE(drop_reasons_unusable), }; static int __init ieee80211_init(void) { struct sk_buff *skb; int ret; BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); ret = rc80211_minstrel_init(); if (ret) return ret; ret = ieee80211_iface_init(); if (ret) goto err_netdev; drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE, &drop_reason_list_unusable); return 0; err_netdev: rc80211_minstrel_exit(); return ret; } static void __exit ieee80211_exit(void) { rc80211_minstrel_exit(); ieee80211s_stop(); ieee80211_iface_exit(); drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE); rcu_barrier(); } subsys_initcall(ieee80211_init); module_exit(ieee80211_exit); MODULE_DESCRIPTION("IEEE 802.11 subsystem"); MODULE_LICENSE("GPL");
17 24 24 17 5 5 17 17 16 17 13 1 10 1 1 1 1 17 17 17 17 17 17 1 24 6 18 17 17 17 1 1 2 1 1 2 1 2 1 2 2 1 2 1 2 1 3 1 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 // SPDX-License-Identifier: GPL-2.0 /* * usb-serial driver for Quatech USB 2 devices * * Copyright (C) 2012 Bill Pemberton (wfp5p@virginia.edu) * * These devices all have only 1 bulk in and 1 bulk out that is shared * for all serial ports. * */ #include <linux/unaligned.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial_reg.h> #include <linux/uaccess.h> /* default urb timeout for usb operations */ #define QT2_USB_TIMEOUT USB_CTRL_SET_TIMEOUT #define QT_OPEN_CLOSE_CHANNEL 0xca #define QT_SET_GET_DEVICE 0xc2 #define QT_SET_GET_REGISTER 0xc0 #define QT_GET_SET_PREBUF_TRIG_LVL 0xcc #define QT_SET_ATF 0xcd #define QT_TRANSFER_IN 0xc0 #define QT_HW_FLOW_CONTROL_MASK 0xc5 #define QT_SW_FLOW_CONTROL_MASK 0xc6 #define QT2_BREAK_CONTROL 0xc8 #define QT2_GET_SET_UART 0xc1 #define QT2_FLUSH_DEVICE 0xc4 #define QT2_GET_SET_QMCR 0xe1 #define QT2_QMCR_RS232 0x40 #define QT2_QMCR_RS422 0x10 #define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS) #define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR) /* status bytes for the device */ #define QT2_CONTROL_BYTE 0x1b #define QT2_LINE_STATUS 0x00 /* following 1 byte is line status */ #define QT2_MODEM_STATUS 0x01 /* following 1 byte is modem status */ #define QT2_XMIT_HOLD 0x02 /* following 2 bytes are ?? */ #define QT2_CHANGE_PORT 0x03 /* following 1 byte is port to change to */ #define QT2_REC_FLUSH 0x04 /* no following info */ #define QT2_XMIT_FLUSH 0x05 /* no following info */ #define QT2_CONTROL_ESCAPE 0xff /* pass through previous 2 control bytes */ #define MAX_BAUD_RATE 921600 #define DEFAULT_BAUD_RATE 9600 #define QT2_READ_BUFFER_SIZE 512 /* size of read buffer */ #define QT2_WRITE_BUFFER_SIZE 512 /* size of write buffer */ #define QT2_WRITE_CONTROL_SIZE 5 /* control bytes used for a write */ #define DRIVER_DESC "Quatech 2nd gen USB to Serial Driver" #define USB_VENDOR_ID_QUATECH 0x061d #define QUATECH_SSU2_100 0xC120 /* RS232 single port */ #define QUATECH_DSU2_100 0xC140 /* RS232 dual port */ #define QUATECH_DSU2_400 0xC150 /* RS232/422/485 dual port */ #define QUATECH_QSU2_100 0xC160 /* RS232 four port */ #define QUATECH_QSU2_400 0xC170 /* RS232/422/485 four port */ #define QUATECH_ESU2_100 0xC1A0 /* RS232 eight port */ #define QUATECH_ESU2_400 0xC180 /* RS232/422/485 eight port */ struct qt2_device_detail { int product_id; int num_ports; }; #define QT_DETAILS(prod, ports) \ .product_id = (prod), \ .num_ports = (ports) static const struct qt2_device_detail qt2_device_details[] = { {QT_DETAILS(QUATECH_SSU2_100, 1)}, {QT_DETAILS(QUATECH_DSU2_400, 2)}, {QT_DETAILS(QUATECH_DSU2_100, 2)}, {QT_DETAILS(QUATECH_QSU2_400, 4)}, {QT_DETAILS(QUATECH_QSU2_100, 4)}, {QT_DETAILS(QUATECH_ESU2_400, 8)}, {QT_DETAILS(QUATECH_ESU2_100, 8)}, {QT_DETAILS(0, 0)} /* Terminating entry */ }; static const struct usb_device_id id_table[] = { {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_SSU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_DSU2_400)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_QSU2_400)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU2_100)}, {USB_DEVICE(USB_VENDOR_ID_QUATECH, QUATECH_ESU2_400)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); struct qt2_serial_private { unsigned char current_port; /* current port for incoming data */ struct urb *read_urb; /* shared among all ports */ char *read_buffer; }; struct qt2_port_private { u8 device_port; spinlock_t urb_lock; bool urb_in_use; struct urb *write_urb; char *write_buffer; spinlock_t lock; u8 shadowLSR; u8 shadowMSR; struct usb_serial_port *port; }; static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch); static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch); static void qt2_write_bulk_callback(struct urb *urb); static void qt2_read_bulk_callback(struct urb *urb); static void qt2_release(struct usb_serial *serial) { struct qt2_serial_private *serial_priv; serial_priv = usb_get_serial_data(serial); usb_kill_urb(serial_priv->read_urb); usb_free_urb(serial_priv->read_urb); kfree(serial_priv->read_buffer); kfree(serial_priv); } static inline int calc_baud_divisor(int baudrate) { int divisor, rem; divisor = MAX_BAUD_RATE / baudrate; rem = MAX_BAUD_RATE % baudrate; /* Round to nearest divisor */ if (((rem * 2) >= baudrate) && (baudrate != 110)) divisor++; return divisor; } static inline int qt2_set_port_config(struct usb_device *dev, unsigned char port_number, u16 baudrate, u16 lcr) { int divisor = calc_baud_divisor(baudrate); u16 index = ((u16) (lcr << 8) | (u16) (port_number)); return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), QT2_GET_SET_UART, 0x40, divisor, index, NULL, 0, QT2_USB_TIMEOUT); } static inline int qt2_control_msg(struct usb_device *dev, u8 request, u16 data, u16 index) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, 0x40, data, index, NULL, 0, QT2_USB_TIMEOUT); } static inline int qt2_getregister(struct usb_device *dev, u8 uart, u8 reg, u8 *data) { int ret; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0xc0, reg, uart, data, sizeof(*data), QT2_USB_TIMEOUT); if (ret < (int)sizeof(*data)) { if (ret >= 0) ret = -EIO; } return ret; } static inline int qt2_setregister(struct usb_device *dev, u8 uart, u8 reg, u16 data) { u16 value = (data << 8) | reg; return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), QT_SET_GET_REGISTER, 0x40, value, uart, NULL, 0, QT2_USB_TIMEOUT); } static inline int update_mctrl(struct qt2_port_private *port_priv, unsigned int set, unsigned int clear) { struct usb_serial_port *port = port_priv->port; struct usb_device *dev = port->serial->dev; unsigned urb_value; int status; if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) { dev_dbg(&port->dev, "update_mctrl - DTR|RTS not being set|cleared\n"); return 0; /* no change */ } clear &= ~set; /* 'set' takes precedence over 'clear' */ urb_value = 0; if (set & TIOCM_DTR) urb_value |= UART_MCR_DTR; if (set & TIOCM_RTS) urb_value |= UART_MCR_RTS; status = qt2_setregister(dev, port_priv->device_port, UART_MCR, urb_value); if (status < 0) dev_err(&port->dev, "update_mctrl - Error from MODEM_CTRL urb: %i\n", status); return status; } static int qt2_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { struct qt2_device_detail d; int i; for (i = 0; d = qt2_device_details[i], d.product_id != 0; i++) { if (d.product_id == le16_to_cpu(serial->dev->descriptor.idProduct)) return d.num_ports; } /* we didn't recognize the device */ dev_err(&serial->dev->dev, "don't know the number of ports, assuming 1\n"); return 1; } static void qt2_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv; struct ktermios *termios = &tty->termios; u16 baud; unsigned int cflag = termios->c_cflag; u16 new_lcr = 0; int status; port_priv = usb_get_serial_port_data(port); if (cflag & PARENB) { if (cflag & PARODD) new_lcr |= UART_LCR_PARITY; else new_lcr |= SERIAL_EVEN_PARITY; } new_lcr |= UART_LCR_WLEN(tty_get_char_size(cflag)); baud = tty_get_baud_rate(tty); if (!baud) baud = 9600; status = qt2_set_port_config(dev, port_priv->device_port, baud, new_lcr); if (status < 0) dev_err(&port->dev, "%s - qt2_set_port_config failed: %i\n", __func__, status); if (cflag & CRTSCTS) status = qt2_control_msg(dev, QT_HW_FLOW_CONTROL_MASK, SERIAL_CRTSCTS, port_priv->device_port); else status = qt2_control_msg(dev, QT_HW_FLOW_CONTROL_MASK, 0, port_priv->device_port); if (status < 0) dev_err(&port->dev, "%s - set HW flow control failed: %i\n", __func__, status); if (I_IXOFF(tty) || I_IXON(tty)) { u16 x = ((u16) (START_CHAR(tty) << 8) | (u16) (STOP_CHAR(tty))); status = qt2_control_msg(dev, QT_SW_FLOW_CONTROL_MASK, x, port_priv->device_port); } else status = qt2_control_msg(dev, QT_SW_FLOW_CONTROL_MASK, 0, port_priv->device_port); if (status < 0) dev_err(&port->dev, "%s - set SW flow control failed: %i\n", __func__, status); } static int qt2_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial; struct qt2_port_private *port_priv; u8 *data; u16 device_port; int status; unsigned long flags; device_port = port->port_number; serial = port->serial; port_priv = usb_get_serial_port_data(port); /* set the port to RS232 mode */ status = qt2_control_msg(serial->dev, QT2_GET_SET_QMCR, QT2_QMCR_RS232, device_port); if (status < 0) { dev_err(&port->dev, "%s failed to set RS232 mode for port %i error %i\n", __func__, device_port, status); return status; } data = kzalloc(2, GFP_KERNEL); if (!data) return -ENOMEM; /* open the port */ status = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), QT_OPEN_CLOSE_CHANNEL, 0xc0, 0, device_port, data, 2, QT2_USB_TIMEOUT); if (status < 2) { dev_err(&port->dev, "%s - open port failed %i\n", __func__, status); if (status >= 0) status = -EIO; kfree(data); return status; } spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowLSR = data[0]; port_priv->shadowMSR = data[1]; spin_unlock_irqrestore(&port_priv->lock, flags); kfree(data); /* set to default speed and 8bit word size */ status = qt2_set_port_config(serial->dev, device_port, DEFAULT_BAUD_RATE, UART_LCR_WLEN8); if (status < 0) { dev_err(&port->dev, "%s - initial setup failed (%i)\n", __func__, device_port); return status; } port_priv->device_port = (u8) device_port; if (tty) qt2_set_termios(tty, port, &tty->termios); return 0; } static void qt2_close(struct usb_serial_port *port) { struct usb_serial *serial; struct qt2_port_private *port_priv; int i; serial = port->serial; port_priv = usb_get_serial_port_data(port); usb_kill_urb(port_priv->write_urb); /* flush the port transmit buffer */ i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), QT2_FLUSH_DEVICE, 0x40, 1, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); if (i < 0) dev_err(&port->dev, "%s - transmit buffer flush failed: %i\n", __func__, i); /* flush the port receive buffer */ i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), QT2_FLUSH_DEVICE, 0x40, 0, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); if (i < 0) dev_err(&port->dev, "%s - receive buffer flush failed: %i\n", __func__, i); /* close the port */ i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), QT_OPEN_CLOSE_CHANNEL, 0x40, 0, port_priv->device_port, NULL, 0, QT2_USB_TIMEOUT); if (i < 0) dev_err(&port->dev, "%s - close port failed %i\n", __func__, i); } static void qt2_disconnect(struct usb_serial *serial) { struct qt2_serial_private *serial_priv = usb_get_serial_data(serial); usb_kill_urb(serial_priv->read_urb); } static void qt2_process_status(struct usb_serial_port *port, unsigned char *ch) { switch (*ch) { case QT2_LINE_STATUS: qt2_update_lsr(port, ch + 1); break; case QT2_MODEM_STATUS: qt2_update_msr(port, ch + 1); break; } } static void qt2_process_read_urb(struct urb *urb) { struct usb_serial *serial; struct qt2_serial_private *serial_priv; struct usb_serial_port *port; bool escapeflag; unsigned char *ch; int i; unsigned char newport; int len = urb->actual_length; if (!len) return; ch = urb->transfer_buffer; serial = urb->context; serial_priv = usb_get_serial_data(serial); port = serial->port[serial_priv->current_port]; for (i = 0; i < urb->actual_length; i++) { ch = (unsigned char *)urb->transfer_buffer + i; if ((i <= (len - 3)) && (*ch == QT2_CONTROL_BYTE) && (*(ch + 1) == QT2_CONTROL_BYTE)) { escapeflag = false; switch (*(ch + 2)) { case QT2_LINE_STATUS: case QT2_MODEM_STATUS: if (i > (len - 4)) { dev_warn(&port->dev, "%s - status message too short\n", __func__); break; } qt2_process_status(port, ch + 2); i += 3; escapeflag = true; break; case QT2_XMIT_HOLD: if (i > (len - 5)) { dev_warn(&port->dev, "%s - xmit_empty message too short\n", __func__); break; } /* bytes_written = (ch[1] << 4) + ch[0]; */ i += 4; escapeflag = true; break; case QT2_CHANGE_PORT: if (i > (len - 4)) { dev_warn(&port->dev, "%s - change_port message too short\n", __func__); break; } tty_flip_buffer_push(&port->port); newport = *(ch + 3); if (newport >= serial->num_ports) { dev_err(&port->dev, "%s - port change to invalid port: %i\n", __func__, newport); break; } serial_priv->current_port = newport; port = serial->port[serial_priv->current_port]; i += 3; escapeflag = true; break; case QT2_REC_FLUSH: case QT2_XMIT_FLUSH: i += 2; escapeflag = true; break; case QT2_CONTROL_ESCAPE: tty_insert_flip_string(&port->port, ch, 2); i += 2; escapeflag = true; break; default: dev_warn(&port->dev, "%s - unsupported command %i\n", __func__, *(ch + 2)); break; } if (escapeflag) continue; } tty_insert_flip_char(&port->port, *ch, TTY_NORMAL); } tty_flip_buffer_push(&port->port); } static void qt2_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port; struct qt2_port_private *port_priv; unsigned long flags; port = urb->context; port_priv = usb_get_serial_port_data(port); spin_lock_irqsave(&port_priv->urb_lock, flags); port_priv->urb_in_use = false; usb_serial_port_softint(port); spin_unlock_irqrestore(&port_priv->urb_lock, flags); } static void qt2_read_bulk_callback(struct urb *urb) { struct usb_serial *serial = urb->context; int status; if (urb->status) { dev_warn(&serial->dev->dev, "%s - non-zero urb status: %i\n", __func__, urb->status); return; } qt2_process_read_urb(urb); status = usb_submit_urb(urb, GFP_ATOMIC); if (status != 0) dev_err(&serial->dev->dev, "%s - resubmit read urb failed: %i\n", __func__, status); } static int qt2_setup_urbs(struct usb_serial *serial) { struct usb_serial_port *port0; struct qt2_serial_private *serial_priv; int status; port0 = serial->port[0]; serial_priv = usb_get_serial_data(serial); serial_priv->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!serial_priv->read_urb) return -ENOMEM; usb_fill_bulk_urb(serial_priv->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, port0->bulk_in_endpointAddress), serial_priv->read_buffer, QT2_READ_BUFFER_SIZE, qt2_read_bulk_callback, serial); status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL); if (status != 0) { dev_err(&serial->dev->dev, "%s - submit read urb failed %i\n", __func__, status); usb_free_urb(serial_priv->read_urb); return status; } return 0; } static int qt2_attach(struct usb_serial *serial) { struct qt2_serial_private *serial_priv; int status; /* power on unit */ status = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0xc2, 0x40, 0x8000, 0, NULL, 0, QT2_USB_TIMEOUT); if (status < 0) { dev_err(&serial->dev->dev, "%s - failed to power on unit: %i\n", __func__, status); return status; } serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL); if (!serial_priv) return -ENOMEM; serial_priv->read_buffer = kmalloc(QT2_READ_BUFFER_SIZE, GFP_KERNEL); if (!serial_priv->read_buffer) { status = -ENOMEM; goto err_buf; } usb_set_serial_data(serial, serial_priv); status = qt2_setup_urbs(serial); if (status != 0) goto attach_failed; return 0; attach_failed: kfree(serial_priv->read_buffer); err_buf: kfree(serial_priv); return status; } static int qt2_port_probe(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct qt2_port_private *port_priv; u8 bEndpointAddress; port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); if (!port_priv) return -ENOMEM; spin_lock_init(&port_priv->lock); spin_lock_init(&port_priv->urb_lock); port_priv->port = port; port_priv->write_buffer = kmalloc(QT2_WRITE_BUFFER_SIZE, GFP_KERNEL); if (!port_priv->write_buffer) goto err_buf; port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); if (!port_priv->write_urb) goto err_urb; bEndpointAddress = serial->port[0]->bulk_out_endpointAddress; usb_fill_bulk_urb(port_priv->write_urb, serial->dev, usb_sndbulkpipe(serial->dev, bEndpointAddress), port_priv->write_buffer, QT2_WRITE_BUFFER_SIZE, qt2_write_bulk_callback, port); usb_set_serial_port_data(port, port_priv); return 0; err_urb: kfree(port_priv->write_buffer); err_buf: kfree(port_priv); return -ENOMEM; } static void qt2_port_remove(struct usb_serial_port *port) { struct qt2_port_private *port_priv; port_priv = usb_get_serial_port_data(port); usb_free_urb(port_priv->write_urb); kfree(port_priv->write_buffer); kfree(port_priv); } static int qt2_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv = usb_get_serial_port_data(port); u8 *d; int r; d = kzalloc(2, GFP_KERNEL); if (!d) return -ENOMEM; r = qt2_getregister(dev, port_priv->device_port, UART_MCR, d); if (r < 0) goto mget_out; r = qt2_getregister(dev, port_priv->device_port, UART_MSR, d + 1); if (r < 0) goto mget_out; r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) | (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) | (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) | (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) | (d[1] & UART_MSR_RI ? TIOCM_RI : 0) | (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0); mget_out: kfree(d); return r; } static int qt2_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct qt2_port_private *port_priv; port_priv = usb_get_serial_port_data(tty->driver_data); return update_mctrl(port_priv, set, clear); } static int qt2_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct qt2_port_private *port_priv; int status; u16 val; port_priv = usb_get_serial_port_data(port); val = (break_state == -1) ? 1 : 0; status = qt2_control_msg(port->serial->dev, QT2_BREAK_CONTROL, val, port_priv->device_port); if (status < 0) { dev_warn(&port->dev, "%s - failed to send control message: %i\n", __func__, status); return status; } return 0; } static void qt2_dtr_rts(struct usb_serial_port *port, int on) { struct usb_device *dev = port->serial->dev; struct qt2_port_private *port_priv = usb_get_serial_port_data(port); /* Disable flow control */ if (!on) { if (qt2_setregister(dev, port_priv->device_port, UART_MCR, 0) < 0) dev_warn(&port->dev, "error from flowcontrol urb\n"); } /* drop RTS and DTR */ if (on) update_mctrl(port_priv, TIOCM_DTR | TIOCM_RTS, 0); else update_mctrl(port_priv, 0, TIOCM_DTR | TIOCM_RTS); } static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) { struct qt2_port_private *port_priv; u8 newMSR = (u8) *ch; unsigned long flags; /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); if (!port_priv) return; spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowMSR = newMSR; spin_unlock_irqrestore(&port_priv->lock, flags); if (newMSR & UART_MSR_ANY_DELTA) { /* update input line counters */ if (newMSR & UART_MSR_DCTS) port->icount.cts++; if (newMSR & UART_MSR_DDSR) port->icount.dsr++; if (newMSR & UART_MSR_DDCD) port->icount.dcd++; if (newMSR & UART_MSR_TERI) port->icount.rng++; wake_up_interruptible(&port->port.delta_msr_wait); } } static void qt2_update_lsr(struct usb_serial_port *port, unsigned char *ch) { struct qt2_port_private *port_priv; struct async_icount *icount; unsigned long flags; u8 newLSR = (u8) *ch; /* May be called from qt2_process_read_urb() for an unbound port. */ port_priv = usb_get_serial_port_data(port); if (!port_priv) return; if (newLSR & UART_LSR_BI) newLSR &= (u8) (UART_LSR_OE | UART_LSR_BI); spin_lock_irqsave(&port_priv->lock, flags); port_priv->shadowLSR = newLSR; spin_unlock_irqrestore(&port_priv->lock, flags); icount = &port->icount; if (newLSR & UART_LSR_BRK_ERROR_BITS) { if (newLSR & UART_LSR_BI) icount->brk++; if (newLSR & UART_LSR_OE) icount->overrun++; if (newLSR & UART_LSR_PE) icount->parity++; if (newLSR & UART_LSR_FE) icount->frame++; } } static unsigned int qt2_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct qt2_port_private *port_priv; unsigned long flags; unsigned int r; port_priv = usb_get_serial_port_data(port); spin_lock_irqsave(&port_priv->urb_lock, flags); if (port_priv->urb_in_use) r = 0; else r = QT2_WRITE_BUFFER_SIZE - QT2_WRITE_CONTROL_SIZE; spin_unlock_irqrestore(&port_priv->urb_lock, flags); return r; } static int qt2_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct qt2_port_private *port_priv; struct urb *write_urb; unsigned char *data; unsigned long flags; int status; int bytes_out = 0; port_priv = usb_get_serial_port_data(port); if (port_priv->write_urb == NULL) { dev_err(&port->dev, "%s - no output urb\n", __func__); return 0; } write_urb = port_priv->write_urb; count = min(count, QT2_WRITE_BUFFER_SIZE - QT2_WRITE_CONTROL_SIZE); data = write_urb->transfer_buffer; spin_lock_irqsave(&port_priv->urb_lock, flags); if (port_priv->urb_in_use) { dev_err(&port->dev, "qt2_write - urb is in use\n"); goto write_out; } *data++ = QT2_CONTROL_BYTE; *data++ = QT2_CONTROL_BYTE; *data++ = port_priv->device_port; put_unaligned_le16(count, data); data += 2; memcpy(data, buf, count); write_urb->transfer_buffer_length = count + QT2_WRITE_CONTROL_SIZE; status = usb_submit_urb(write_urb, GFP_ATOMIC); if (status == 0) { port_priv->urb_in_use = true; bytes_out += count; } write_out: spin_unlock_irqrestore(&port_priv->urb_lock, flags); return bytes_out; } static struct usb_serial_driver qt2_device = { .driver = { .name = "quatech-serial", }, .description = DRIVER_DESC, .id_table = id_table, .open = qt2_open, .close = qt2_close, .write = qt2_write, .write_room = qt2_write_room, .calc_num_ports = qt2_calc_num_ports, .attach = qt2_attach, .release = qt2_release, .disconnect = qt2_disconnect, .port_probe = qt2_port_probe, .port_remove = qt2_port_remove, .dtr_rts = qt2_dtr_rts, .break_ctl = qt2_break_ctl, .tiocmget = qt2_tiocmget, .tiocmset = qt2_tiocmset, .tiocmiwait = usb_serial_generic_tiocmiwait, .get_icount = usb_serial_generic_get_icount, .set_termios = qt2_set_termios, }; static struct usb_serial_driver *const serial_drivers[] = { &qt2_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2");
2 1 1 1 1 1 5 4 2 3 5 2 5 7 7 3 1 1 3 3 3 2 2 1 2 2 2 1 3 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright Red Hat Inc. 2022 * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp stream queue/scheduling. * * Please send any bug reports or fixes you make to the * email addresched(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Xin Long <lucien.xin@gmail.com> */ #include <linux/list.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> /* Fair Capacity and Weighted Fair Queueing handling * RFC 8260 section 3.5 and 3.6 */ static void sctp_sched_fc_unsched_all(struct sctp_stream *stream); static int sctp_sched_wfq_set(struct sctp_stream *stream, __u16 sid, __u16 weight, gfp_t gfp) { struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; if (!weight) return -EINVAL; soute->fc_weight = weight; return 0; } static int sctp_sched_wfq_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; *value = soute->fc_weight; return 0; } static int sctp_sched_fc_set(struct sctp_stream *stream, __u16 sid, __u16 weight, gfp_t gfp) { return 0; } static int sctp_sched_fc_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { return 0; } static int sctp_sched_fc_init(struct sctp_stream *stream) { INIT_LIST_HEAD(&stream->fc_list); return 0; } static int sctp_sched_fc_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; INIT_LIST_HEAD(&soute->fc_list); soute->fc_length = 0; soute->fc_weight = 1; return 0; } static void sctp_sched_fc_free_sid(struct sctp_stream *stream, __u16 sid) { } static void sctp_sched_fc_sched(struct sctp_stream *stream, struct sctp_stream_out_ext *soute) { struct sctp_stream_out_ext *pos; if (!list_empty(&soute->fc_list)) return; list_for_each_entry(pos, &stream->fc_list, fc_list) if ((__u64)pos->fc_length * soute->fc_weight >= (__u64)soute->fc_length * pos->fc_weight) break; list_add_tail(&soute->fc_list, &pos->fc_list); } static void sctp_sched_fc_enqueue(struct sctp_outq *q, struct sctp_datamsg *msg) { struct sctp_stream *stream; struct sctp_chunk *ch; __u16 sid; ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); sid = sctp_chunk_stream_no(ch); stream = &q->asoc->stream; sctp_sched_fc_sched(stream, SCTP_SO(stream, sid)->ext); } static struct sctp_chunk *sctp_sched_fc_dequeue(struct sctp_outq *q) { struct sctp_stream *stream = &q->asoc->stream; struct sctp_stream_out_ext *soute; struct sctp_chunk *ch; /* Bail out quickly if queue is empty */ if (list_empty(&q->out_chunk_list)) return NULL; /* Find which chunk is next */ if (stream->out_curr) soute = stream->out_curr->ext; else soute = list_entry(stream->fc_list.next, struct sctp_stream_out_ext, fc_list); ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list); sctp_sched_dequeue_common(q, ch); return ch; } static void sctp_sched_fc_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) { struct sctp_stream *stream = &q->asoc->stream; struct sctp_stream_out_ext *soute, *pos; __u16 sid, i; sid = sctp_chunk_stream_no(ch); soute = SCTP_SO(stream, sid)->ext; /* reduce all fc_lengths by U32_MAX / 4 if the current fc_length overflows. */ if (soute->fc_length > U32_MAX - ch->skb->len) { for (i = 0; i < stream->outcnt; i++) { pos = SCTP_SO(stream, i)->ext; if (!pos) continue; if (pos->fc_length <= (U32_MAX >> 2)) { pos->fc_length = 0; continue; } pos->fc_length -= (U32_MAX >> 2); } } soute->fc_length += ch->skb->len; if (list_empty(&soute->outq)) { list_del_init(&soute->fc_list); return; } pos = soute; list_for_each_entry_continue(pos, &stream->fc_list, fc_list) if ((__u64)pos->fc_length * soute->fc_weight >= (__u64)soute->fc_length * pos->fc_weight) break; list_move_tail(&soute->fc_list, &pos->fc_list); } static void sctp_sched_fc_sched_all(struct sctp_stream *stream) { struct sctp_association *asoc; struct sctp_chunk *ch; asoc = container_of(stream, struct sctp_association, stream); list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) { __u16 sid = sctp_chunk_stream_no(ch); if (SCTP_SO(stream, sid)->ext) sctp_sched_fc_sched(stream, SCTP_SO(stream, sid)->ext); } } static void sctp_sched_fc_unsched_all(struct sctp_stream *stream) { struct sctp_stream_out_ext *soute, *tmp; list_for_each_entry_safe(soute, tmp, &stream->fc_list, fc_list) list_del_init(&soute->fc_list); } static struct sctp_sched_ops sctp_sched_fc = { .set = sctp_sched_fc_set, .get = sctp_sched_fc_get, .init = sctp_sched_fc_init, .init_sid = sctp_sched_fc_init_sid, .free_sid = sctp_sched_fc_free_sid, .enqueue = sctp_sched_fc_enqueue, .dequeue = sctp_sched_fc_dequeue, .dequeue_done = sctp_sched_fc_dequeue_done, .sched_all = sctp_sched_fc_sched_all, .unsched_all = sctp_sched_fc_unsched_all, }; void sctp_sched_ops_fc_init(void) { sctp_sched_ops_register(SCTP_SS_FC, &sctp_sched_fc); } static struct sctp_sched_ops sctp_sched_wfq = { .set = sctp_sched_wfq_set, .get = sctp_sched_wfq_get, .init = sctp_sched_fc_init, .init_sid = sctp_sched_fc_init_sid, .free_sid = sctp_sched_fc_free_sid, .enqueue = sctp_sched_fc_enqueue, .dequeue = sctp_sched_fc_dequeue, .dequeue_done = sctp_sched_fc_dequeue_done, .sched_all = sctp_sched_fc_sched_all, .unsched_all = sctp_sched_fc_unsched_all, }; void sctp_sched_ops_wfq_init(void) { sctp_sched_ops_register(SCTP_SS_WFQ, &sctp_sched_wfq); }
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2023 Thomas Weißschuh <linux@weissschuh.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/completion.h> #include <linux/device.h> #include <linux/hwmon.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/usb.h> #define DRIVER_NAME "powerz" #define POWERZ_EP_CMD_OUT 0x01 #define POWERZ_EP_DATA_IN 0x81 struct powerz_sensor_data { u8 _unknown_1[8]; __le32 V_bus; __le32 I_bus; __le32 V_bus_avg; __le32 I_bus_avg; u8 _unknown_2[8]; u8 temp[2]; __le16 V_cc1; __le16 V_cc2; __le16 V_dp; __le16 V_dm; __le16 V_dd; u8 _unknown_3[4]; } __packed; struct powerz_priv { char transfer_buffer[64]; /* first member to satisfy DMA alignment */ struct mutex mutex; struct completion completion; struct urb *urb; int status; }; static const struct hwmon_channel_info *const powerz_info[] = { HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL | HWMON_I_AVERAGE, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL | HWMON_C_AVERAGE), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL), NULL }; static int powerz_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { if (type == hwmon_curr && attr == hwmon_curr_label) { *str = "IBUS"; } else if (type == hwmon_in && attr == hwmon_in_label) { if (channel == 0) *str = "VBUS"; else if (channel == 1) *str = "VCC1"; else if (channel == 2) *str = "VCC2"; else if (channel == 3) *str = "VDP"; else if (channel == 4) *str = "VDM"; else if (channel == 5) *str = "VDD"; else return -EOPNOTSUPP; } else if (type == hwmon_temp && attr == hwmon_temp_label) { *str = "TEMP"; } else { return -EOPNOTSUPP; } return 0; } static void powerz_usb_data_complete(struct urb *urb) { struct powerz_priv *priv = urb->context; complete(&priv->completion); } static void powerz_usb_cmd_complete(struct urb *urb) { struct powerz_priv *priv = urb->context; usb_fill_bulk_urb(urb, urb->dev, usb_rcvbulkpipe(urb->dev, POWERZ_EP_DATA_IN), priv->transfer_buffer, sizeof(priv->transfer_buffer), powerz_usb_data_complete, priv); priv->status = usb_submit_urb(urb, GFP_ATOMIC); if (priv->status) complete(&priv->completion); } static int powerz_read_data(struct usb_device *udev, struct powerz_priv *priv) { int ret; priv->status = -ETIMEDOUT; reinit_completion(&priv->completion); priv->transfer_buffer[0] = 0x0c; priv->transfer_buffer[1] = 0x00; priv->transfer_buffer[2] = 0x02; priv->transfer_buffer[3] = 0x00; usb_fill_bulk_urb(priv->urb, udev, usb_sndbulkpipe(udev, POWERZ_EP_CMD_OUT), priv->transfer_buffer, 4, powerz_usb_cmd_complete, priv); ret = usb_submit_urb(priv->urb, GFP_KERNEL); if (ret) return ret; if (!wait_for_completion_interruptible_timeout (&priv->completion, msecs_to_jiffies(5))) { usb_kill_urb(priv->urb); return -EIO; } if (priv->urb->actual_length < sizeof(struct powerz_sensor_data)) return -EIO; return priv->status; } static int powerz_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct usb_interface *intf = to_usb_interface(dev->parent); struct usb_device *udev = interface_to_usbdev(intf); struct powerz_priv *priv = usb_get_intfdata(intf); struct powerz_sensor_data *data; int ret; if (!priv) return -EIO; /* disconnected */ mutex_lock(&priv->mutex); ret = powerz_read_data(udev, priv); if (ret) goto out; data = (struct powerz_sensor_data *)priv->transfer_buffer; if (type == hwmon_curr) { if (attr == hwmon_curr_input) *val = ((s32)le32_to_cpu(data->I_bus)) / 1000; else if (attr == hwmon_curr_average) *val = ((s32)le32_to_cpu(data->I_bus_avg)) / 1000; else ret = -EOPNOTSUPP; } else if (type == hwmon_in) { if (attr == hwmon_in_input) { if (channel == 0) *val = le32_to_cpu(data->V_bus) / 1000; else if (channel == 1) *val = le16_to_cpu(data->V_cc1) / 10; else if (channel == 2) *val = le16_to_cpu(data->V_cc2) / 10; else if (channel == 3) *val = le16_to_cpu(data->V_dp) / 10; else if (channel == 4) *val = le16_to_cpu(data->V_dm) / 10; else if (channel == 5) *val = le16_to_cpu(data->V_dd) / 10; else ret = -EOPNOTSUPP; } else if (attr == hwmon_in_average && channel == 0) { *val = le32_to_cpu(data->V_bus_avg) / 1000; } else { ret = -EOPNOTSUPP; } } else if (type == hwmon_temp && attr == hwmon_temp_input) { *val = data->temp[1] * 2000 + data->temp[0] * 1000 / 128; } else { ret = -EOPNOTSUPP; } out: mutex_unlock(&priv->mutex); return ret; } static const struct hwmon_ops powerz_hwmon_ops = { .visible = 0444, .read = powerz_read, .read_string = powerz_read_string, }; static const struct hwmon_chip_info powerz_chip_info = { .ops = &powerz_hwmon_ops, .info = powerz_info, }; static int powerz_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct powerz_priv *priv; struct device *hwmon_dev; struct device *parent; parent = &intf->dev; priv = devm_kzalloc(parent, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!priv->urb) return -ENOMEM; mutex_init(&priv->mutex); init_completion(&priv->completion); hwmon_dev = devm_hwmon_device_register_with_info(parent, DRIVER_NAME, priv, &powerz_chip_info, NULL); if (IS_ERR(hwmon_dev)) { usb_free_urb(priv->urb); return PTR_ERR(hwmon_dev); } usb_set_intfdata(intf, priv); return 0; } static void powerz_disconnect(struct usb_interface *intf) { struct powerz_priv *priv = usb_get_intfdata(intf); mutex_lock(&priv->mutex); usb_kill_urb(priv->urb); usb_free_urb(priv->urb); mutex_unlock(&priv->mutex); } static const struct usb_device_id powerz_id_table[] = { { USB_DEVICE_INTERFACE_NUMBER(0x5FC9, 0x0061, 0x00) }, /* ChargerLAB POWER-Z KM002C */ { USB_DEVICE_INTERFACE_NUMBER(0x5FC9, 0x0063, 0x00) }, /* ChargerLAB POWER-Z KM003C */ { } }; MODULE_DEVICE_TABLE(usb, powerz_id_table); static struct usb_driver powerz_driver = { .name = DRIVER_NAME, .id_table = powerz_id_table, .probe = powerz_probe, .disconnect = powerz_disconnect, }; module_usb_driver(powerz_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Weißschuh <linux@weissschuh.net>"); MODULE_DESCRIPTION("ChargerLAB POWER-Z USB-C tester");
48 179 5 1100 1112 330 522 22 174 114 186 490 19 2 159 55 97 98 97 98 98 97 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 // SPDX-License-Identifier: GPL-2.0-only /* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. */ #include <linux/export.h> #include <net/ipv6.h> #include <net/ipv6_stubs.h> #include <net/addrconf.h> #include <net/ip.h> /* if ipv6 module registers this function is used by xfrm to force all * sockets to relookup their nodes - this is fairly expensive, be * careful */ void (*__fib6_flush_trees)(struct net *); EXPORT_SYMBOL(__fib6_flush_trees); #define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) static inline unsigned int ipv6_addr_scope2type(unsigned int scope) { switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | IPV6_ADDR_LOOPBACK); case IPV6_ADDR_SCOPE_LINKLOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL) | IPV6_ADDR_LINKLOCAL); case IPV6_ADDR_SCOPE_SITELOCAL: return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL) | IPV6_ADDR_SITELOCAL); } return IPV6_ADDR_SCOPE_TYPE(scope); } int __ipv6_addr_type(const struct in6_addr *addr) { __be32 st; st = addr->s6_addr32[0]; /* Consider all addresses with the first three bits different of 000 and 111 as unicasts. */ if ((st & htonl(0xE0000000)) != htonl(0x00000000) && (st & htonl(0xE0000000)) != htonl(0xE0000000)) return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { /* multicast */ /* addr-select 3.1 */ return (IPV6_ADDR_MULTICAST | ipv6_addr_scope2type(IPV6_ADDR_MC_SCOPE(addr))); } if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) return (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.1 */ if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */ if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* RFC 4193 */ if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) { if (addr->s6_addr32[2] == 0) { if (addr->s6_addr32[3] == 0) return IPV6_ADDR_ANY; if (addr->s6_addr32[3] == htonl(0x00000001)) return (IPV6_ADDR_LOOPBACK | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_LINKLOCAL)); /* addr-select 3.4 */ return (IPV6_ADDR_COMPATv4 | IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ } if (addr->s6_addr32[2] == htonl(0x0000ffff)) return (IPV6_ADDR_MAPPED | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */ } return (IPV6_ADDR_UNICAST | IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */ } EXPORT_SYMBOL(__ipv6_addr_type); static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); static BLOCKING_NOTIFIER_HEAD(inet6addr_validator_chain); int register_inet6addr_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&inet6addr_chain, nb); } EXPORT_SYMBOL(register_inet6addr_notifier); int unregister_inet6addr_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&inet6addr_chain, nb); } EXPORT_SYMBOL(unregister_inet6addr_notifier); int inet6addr_notifier_call_chain(unsigned long val, void *v) { return atomic_notifier_call_chain(&inet6addr_chain, val, v); } EXPORT_SYMBOL(inet6addr_notifier_call_chain); int register_inet6addr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&inet6addr_validator_chain, nb); } EXPORT_SYMBOL(register_inet6addr_validator_notifier); int unregister_inet6addr_validator_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&inet6addr_validator_chain, nb); } EXPORT_SYMBOL(unregister_inet6addr_validator_notifier); int inet6addr_validator_notifier_call_chain(unsigned long val, void *v) { return blocking_notifier_call_chain(&inet6addr_validator_chain, val, v); } EXPORT_SYMBOL(inet6addr_validator_notifier_call_chain); static struct dst_entry *eafnosupport_ipv6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst) { return ERR_PTR(-EAFNOSUPPORT); } static int eafnosupport_ipv6_route_input(struct sk_buff *skb) { return -EAFNOSUPPORT; } static struct fib6_table *eafnosupport_fib6_get_table(struct net *net, u32 id) { return NULL; } static int eafnosupport_fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return -EAFNOSUPPORT; } static int eafnosupport_fib6_lookup(struct net *net, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags) { return -EAFNOSUPPORT; } static void eafnosupport_fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { } static u32 eafnosupport_ip6_mtu_from_fib6(const struct fib6_result *res, const struct in6_addr *daddr, const struct in6_addr *saddr) { return 0; } static int eafnosupport_fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel"); return -EAFNOSUPPORT; } static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify) { return -EAFNOSUPPORT; } static int eafnosupport_ipv6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)) { kfree_skb(skb); return -EAFNOSUPPORT; } static struct net_device *eafnosupport_ipv6_dev_find(struct net *net, const struct in6_addr *addr, struct net_device *dev) { return ERR_PTR(-EAFNOSUPPORT); } const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) { .ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow, .ipv6_route_input = eafnosupport_ipv6_route_input, .fib6_get_table = eafnosupport_fib6_get_table, .fib6_table_lookup = eafnosupport_fib6_table_lookup, .fib6_lookup = eafnosupport_fib6_lookup, .fib6_select_path = eafnosupport_fib6_select_path, .ip6_mtu_from_fib6 = eafnosupport_ip6_mtu_from_fib6, .fib6_nh_init = eafnosupport_fib6_nh_init, .ip6_del_rt = eafnosupport_ip6_del_rt, .ipv6_fragment = eafnosupport_ipv6_fragment, .ipv6_dev_find = eafnosupport_ipv6_dev_find, }; EXPORT_SYMBOL_GPL(ipv6_stub); /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */ const struct in6_addr in6addr_loopback __aligned(BITS_PER_LONG/8) = IN6ADDR_LOOPBACK_INIT; EXPORT_SYMBOL(in6addr_loopback); const struct in6_addr in6addr_any __aligned(BITS_PER_LONG/8) = IN6ADDR_ANY_INIT; EXPORT_SYMBOL(in6addr_any); const struct in6_addr in6addr_linklocal_allnodes __aligned(BITS_PER_LONG/8) = IN6ADDR_LINKLOCAL_ALLNODES_INIT; EXPORT_SYMBOL(in6addr_linklocal_allnodes); const struct in6_addr in6addr_linklocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_linklocal_allrouters); const struct in6_addr in6addr_interfacelocal_allnodes __aligned(BITS_PER_LONG/8) = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT; EXPORT_SYMBOL(in6addr_interfacelocal_allnodes); const struct in6_addr in6addr_interfacelocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_interfacelocal_allrouters); const struct in6_addr in6addr_sitelocal_allrouters __aligned(BITS_PER_LONG/8) = IN6ADDR_SITELOCAL_ALLROUTERS_INIT; EXPORT_SYMBOL(in6addr_sitelocal_allrouters); static void snmp6_free_dev(struct inet6_dev *idev) { kfree(idev->stats.icmpv6msgdev); kfree(idev->stats.icmpv6dev); free_percpu(idev->stats.ipv6); } static void in6_dev_finish_destroy_rcu(struct rcu_head *head) { struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); snmp6_free_dev(idev); kfree(idev); } /* Nobody refers to this device, we may destroy it. */ void in6_dev_finish_destroy(struct inet6_dev *idev) { struct net_device *dev = idev->dev; WARN_ON(!list_empty(&idev->addr_list)); WARN_ON(rcu_access_pointer(idev->mc_list)); WARN_ON(timer_pending(&idev->rs_timer)); #ifdef NET_REFCNT_DEBUG pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL"); #endif netdev_put(dev, &idev->dev_tracker); if (!idev->dead) { pr_warn("Freeing alive inet6 device %p\n", idev); return; } call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); } EXPORT_SYMBOL(in6_dev_finish_destroy);
12 12 8 11 11 11 11 2 8 7 3 5 3 5 5 6 6 1 4 2 5 19 19 19 19 2 11 11 78 79 77 1 1 1 5 3 1 1 16 9 19 19 18 2 15 2 15 1 15 5 1 1 1 1 2 1 7 4 4 12 6 6 6 7 5 5 1 4 4 1 3 6 1 1 4 1 2 1 1 6 6 6 1 5 3 1 1 23 1 1 21 2 9 10 1 1 9 1 9 1 1 1 1 2 1 1 1 2 4 2 4 1 7 38 1 36 1 6 6 23 3 1 3 2 2 2 2 2 1 2 3 3 3 3 2 2 3 3 3 2 1 1 2 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 // SPDX-License-Identifier: GPL-2.0-or-later /* * ip6_flowlabel.c IPv6 flowlabel manager. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/pid_namespace.h> #include <linux/jump_label_ratelimit.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/rawv6.h> #include <net/transp_v6.h> #include <linux/uaccess.h> #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified in old IPv6 RFC. Well, it was reasonable value. */ #define FL_MAX_LINGER 150 /* Maximal linger timeout */ /* FL hash table */ #define FL_MAX_PER_SOCK 32 #define FL_MAX_SIZE 4096 #define FL_HASH_MASK 255 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) static atomic_t fl_size = ATOMIC_INIT(0); static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1]; static void ip6_fl_gc(struct timer_list *unused); static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc); /* FL hash table lock: it protects only of GC */ static DEFINE_SPINLOCK(ip6_fl_lock); /* Big socket sock */ static DEFINE_SPINLOCK(ip6_sk_fl_lock); DEFINE_STATIC_KEY_DEFERRED_FALSE(ipv6_flowlabel_exclusive, HZ); EXPORT_SYMBOL(ipv6_flowlabel_exclusive); #define for_each_fl_rcu(hash, fl) \ for (fl = rcu_dereference(fl_ht[(hash)]); \ fl != NULL; \ fl = rcu_dereference(fl->next)) #define for_each_fl_continue_rcu(fl) \ for (fl = rcu_dereference(fl->next); \ fl != NULL; \ fl = rcu_dereference(fl->next)) #define for_each_sk_fl_rcu(np, sfl) \ for (sfl = rcu_dereference(np->ipv6_fl_list); \ sfl != NULL; \ sfl = rcu_dereference(sfl->next)) static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) { struct ip6_flowlabel *fl; for_each_fl_rcu(FL_HASH(label), fl) { if (fl->label == label && net_eq(fl->fl_net, net)) return fl; } return NULL; } static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label) { struct ip6_flowlabel *fl; rcu_read_lock(); fl = __fl_lookup(net, label); if (fl && !atomic_inc_not_zero(&fl->users)) fl = NULL; rcu_read_unlock(); return fl; } static bool fl_shared_exclusive(struct ip6_flowlabel *fl) { return fl->share == IPV6_FL_S_EXCL || fl->share == IPV6_FL_S_PROCESS || fl->share == IPV6_FL_S_USER; } static void fl_free_rcu(struct rcu_head *head) { struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu); if (fl->share == IPV6_FL_S_PROCESS) put_pid(fl->owner.pid); kfree(fl->opt); kfree(fl); } static void fl_free(struct ip6_flowlabel *fl) { if (!fl) return; if (fl_shared_exclusive(fl) || fl->opt) static_branch_slow_dec_deferred(&ipv6_flowlabel_exclusive); call_rcu(&fl->rcu, fl_free_rcu); } static void fl_release(struct ip6_flowlabel *fl) { spin_lock_bh(&ip6_fl_lock); fl->lastuse = jiffies; if (atomic_dec_and_test(&fl->users)) { unsigned long ttd = fl->lastuse + fl->linger; if (time_after(ttd, fl->expires)) fl->expires = ttd; ttd = fl->expires; if (fl->opt && fl->share == IPV6_FL_S_EXCL) { struct ipv6_txoptions *opt = fl->opt; fl->opt = NULL; kfree(opt); } if (!timer_pending(&ip6_fl_gc_timer) || time_after(ip6_fl_gc_timer.expires, ttd)) mod_timer(&ip6_fl_gc_timer, ttd); } spin_unlock_bh(&ip6_fl_lock); } static void ip6_fl_gc(struct timer_list *unused) { int i; unsigned long now = jiffies; unsigned long sched = 0; spin_lock(&ip6_fl_lock); for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; flp = &fl_ht[i]; while ((fl = rcu_dereference_protected(*flp, lockdep_is_held(&ip6_fl_lock))) != NULL) { if (atomic_read(&fl->users) == 0) { unsigned long ttd = fl->lastuse + fl->linger; if (time_after(ttd, fl->expires)) fl->expires = ttd; ttd = fl->expires; if (time_after_eq(now, ttd)) { *flp = fl->next; fl_free(fl); atomic_dec(&fl_size); continue; } if (!sched || time_before(ttd, sched)) sched = ttd; } flp = &fl->next; } } if (!sched && atomic_read(&fl_size)) sched = now + FL_MAX_LINGER; if (sched) { mod_timer(&ip6_fl_gc_timer, sched); } spin_unlock(&ip6_fl_lock); } static void __net_exit ip6_fl_purge(struct net *net) { int i; spin_lock_bh(&ip6_fl_lock); for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; flp = &fl_ht[i]; while ((fl = rcu_dereference_protected(*flp, lockdep_is_held(&ip6_fl_lock))) != NULL) { if (net_eq(fl->fl_net, net) && atomic_read(&fl->users) == 0) { *flp = fl->next; fl_free(fl); atomic_dec(&fl_size); continue; } flp = &fl->next; } } spin_unlock_bh(&ip6_fl_lock); } static struct ip6_flowlabel *fl_intern(struct net *net, struct ip6_flowlabel *fl, __be32 label) { struct ip6_flowlabel *lfl; fl->label = label & IPV6_FLOWLABEL_MASK; rcu_read_lock(); spin_lock_bh(&ip6_fl_lock); if (label == 0) { for (;;) { fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK; if (fl->label) { lfl = __fl_lookup(net, fl->label); if (!lfl) break; } } } else { /* * we dropper the ip6_fl_lock, so this entry could reappear * and we need to recheck with it. * * OTOH no need to search the active socket first, like it is * done in ipv6_flowlabel_opt - sock is locked, so new entry * with the same label can only appear on another sock */ lfl = __fl_lookup(net, fl->label); if (lfl) { atomic_inc(&lfl->users); spin_unlock_bh(&ip6_fl_lock); rcu_read_unlock(); return lfl; } } fl->lastuse = jiffies; fl->next = fl_ht[FL_HASH(fl->label)]; rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl); atomic_inc(&fl_size); spin_unlock_bh(&ip6_fl_lock); rcu_read_unlock(); return NULL; } /* Socket flowlabel lists */ struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label) { struct ipv6_fl_socklist *sfl; struct ipv6_pinfo *np = inet6_sk(sk); label &= IPV6_FLOWLABEL_MASK; rcu_read_lock(); for_each_sk_fl_rcu(np, sfl) { struct ip6_flowlabel *fl = sfl->fl; if (fl->label == label && atomic_inc_not_zero(&fl->users)) { fl->lastuse = jiffies; rcu_read_unlock(); return fl; } } rcu_read_unlock(); return NULL; } EXPORT_SYMBOL_GPL(__fl6_sock_lookup); void fl6_free_socklist(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist *sfl; if (!rcu_access_pointer(np->ipv6_fl_list)) return; spin_lock_bh(&ip6_sk_fl_lock); while ((sfl = rcu_dereference_protected(np->ipv6_fl_list, lockdep_is_held(&ip6_sk_fl_lock))) != NULL) { np->ipv6_fl_list = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); kfree_rcu(sfl, rcu); spin_lock_bh(&ip6_sk_fl_lock); } spin_unlock_bh(&ip6_sk_fl_lock); } /* Service routines */ /* It is the only difficult place. flowlabel enforces equal headers before and including routing header, however user may supply options following rthdr. */ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt) { struct ipv6_txoptions *fl_opt = fl->opt; if (!fopt || fopt->opt_flen == 0) return fl_opt; if (fl_opt) { opt_space->hopopt = fl_opt->hopopt; opt_space->dst0opt = fl_opt->dst0opt; opt_space->srcrt = fl_opt->srcrt; opt_space->opt_nflen = fl_opt->opt_nflen; } else { if (fopt->opt_nflen == 0) return fopt; opt_space->hopopt = NULL; opt_space->dst0opt = NULL; opt_space->srcrt = NULL; opt_space->opt_nflen = 0; } opt_space->dst1opt = fopt->dst1opt; opt_space->opt_flen = fopt->opt_flen; opt_space->tot_len = fopt->tot_len; return opt_space; } EXPORT_SYMBOL_GPL(fl6_merge_options); static unsigned long check_linger(unsigned long ttl) { if (ttl < FL_MIN_LINGER) return FL_MIN_LINGER*HZ; if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN)) return 0; return ttl*HZ; } static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires) { linger = check_linger(linger); if (!linger) return -EPERM; expires = check_linger(expires); if (!expires) return -EPERM; spin_lock_bh(&ip6_fl_lock); fl->lastuse = jiffies; if (time_before(fl->linger, linger)) fl->linger = linger; if (time_before(expires, fl->linger)) expires = fl->linger; if (time_before(fl->expires, fl->lastuse + expires)) fl->expires = fl->lastuse + expires; spin_unlock_bh(&ip6_fl_lock); return 0; } static struct ip6_flowlabel * fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, sockptr_t optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; int addr_type; int err; olen = optlen - CMSG_ALIGN(sizeof(*freq)); err = -EINVAL; if (olen > 64 * 1024) goto done; err = -ENOMEM; fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (!fl) goto done; if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; struct ipcm6_cookie ipc6; err = -ENOMEM; fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); if (!fl->opt) goto done; memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; if (copy_from_sockptr_offset(fl->opt + 1, optval, CMSG_ALIGN(sizeof(*freq)), olen)) goto done; msg.msg_controllen = olen; msg.msg_control = (void *)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); ipc6.opt = fl->opt; err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6); if (err) goto done; err = -EINVAL; if (fl->opt->opt_flen) goto done; if (fl->opt->opt_nflen == 0) { kfree(fl->opt); fl->opt = NULL; } } fl->fl_net = net; fl->expires = jiffies; err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); if (err) goto done; fl->share = freq->flr_share; addr_type = ipv6_addr_type(&freq->flr_dst); if ((addr_type & IPV6_ADDR_MAPPED) || addr_type == IPV6_ADDR_ANY) { err = -EINVAL; goto done; } fl->dst = freq->flr_dst; atomic_set(&fl->users, 1); switch (fl->share) { case IPV6_FL_S_EXCL: case IPV6_FL_S_ANY: break; case IPV6_FL_S_PROCESS: fl->owner.pid = get_task_pid(current, PIDTYPE_PID); break; case IPV6_FL_S_USER: fl->owner.uid = current_euid(); break; default: err = -EINVAL; goto done; } if (fl_shared_exclusive(fl) || fl->opt) { WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1); static_branch_deferred_inc(&ipv6_flowlabel_exclusive); } return fl; done: if (fl) { kfree(fl->opt); kfree(fl); } *err_p = err; return NULL; } static int mem_check(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist *sfl; int room = FL_MAX_SIZE - atomic_read(&fl_size); int count = 0; if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) return 0; rcu_read_lock(); for_each_sk_fl_rcu(np, sfl) count++; rcu_read_unlock(); if (room <= 0 || ((count >= FL_MAX_PER_SOCK || (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) && !capable(CAP_NET_ADMIN))) return -ENOBUFS; return 0; } static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, struct ip6_flowlabel *fl) { spin_lock_bh(&ip6_sk_fl_lock); sfl->fl = fl; sfl->next = np->ipv6_fl_list; rcu_assign_pointer(np->ipv6_fl_list, sfl); spin_unlock_bh(&ip6_sk_fl_lock); } int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist *sfl; if (flags & IPV6_FL_F_REMOTE) { freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK; return 0; } if (inet6_test_bit(REPFLOW, sk)) { freq->flr_label = np->flow_label; return 0; } rcu_read_lock(); for_each_sk_fl_rcu(np, sfl) { if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) { spin_lock_bh(&ip6_fl_lock); freq->flr_label = sfl->fl->label; freq->flr_dst = sfl->fl->dst; freq->flr_share = sfl->fl->share; freq->flr_expires = (sfl->fl->expires - jiffies) / HZ; freq->flr_linger = sfl->fl->linger / HZ; spin_unlock_bh(&ip6_fl_lock); rcu_read_unlock(); return 0; } } rcu_read_unlock(); return -ENOENT; } #define socklist_dereference(__sflp) \ rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock)) static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_fl_socklist __rcu **sflp; struct ipv6_fl_socklist *sfl; if (freq->flr_flags & IPV6_FL_F_REFLECT) { if (sk->sk_protocol != IPPROTO_TCP) return -ENOPROTOOPT; if (!inet6_test_bit(REPFLOW, sk)) return -ESRCH; np->flow_label = 0; inet6_clear_bit(REPFLOW, sk); return 0; } spin_lock_bh(&ip6_sk_fl_lock); for (sflp = &np->ipv6_fl_list; (sfl = socklist_dereference(*sflp)) != NULL; sflp = &sfl->next) { if (sfl->fl->label == freq->flr_label) goto found; } spin_unlock_bh(&ip6_sk_fl_lock); return -ESRCH; found: if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK)) np->flow_label &= ~IPV6_FLOWLABEL_MASK; *sflp = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); kfree_rcu(sfl, rcu); return 0; } static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6_fl_socklist *sfl; int err; rcu_read_lock(); for_each_sk_fl_rcu(np, sfl) { if (sfl->fl->label == freq->flr_label) { err = fl6_renew(sfl->fl, freq->flr_linger, freq->flr_expires); rcu_read_unlock(); return err; } } rcu_read_unlock(); if (freq->flr_share == IPV6_FL_S_NONE && ns_capable(net->user_ns, CAP_NET_ADMIN)) { struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label); if (fl) { err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); fl_release(fl); return err; } } return -ESRCH; } static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, sockptr_t optval, int optlen) { struct ipv6_fl_socklist *sfl, *sfl1 = NULL; struct ip6_flowlabel *fl, *fl1 = NULL; struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int err; if (freq->flr_flags & IPV6_FL_F_REFLECT) { if (net->ipv6.sysctl.flowlabel_consistency) { net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n"); return -EPERM; } if (sk->sk_protocol != IPPROTO_TCP) return -ENOPROTOOPT; inet6_set_bit(REPFLOW, sk); return 0; } if (freq->flr_label & ~IPV6_FLOWLABEL_MASK) return -EINVAL; if (net->ipv6.sysctl.flowlabel_state_ranges && (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG)) return -ERANGE; fl = fl_create(net, sk, freq, optval, optlen, &err); if (!fl) return err; sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); if (freq->flr_label) { err = -EEXIST; rcu_read_lock(); for_each_sk_fl_rcu(np, sfl) { if (sfl->fl->label == freq->flr_label) { if (freq->flr_flags & IPV6_FL_F_EXCL) { rcu_read_unlock(); goto done; } fl1 = sfl->fl; if (!atomic_inc_not_zero(&fl1->users)) fl1 = NULL; break; } } rcu_read_unlock(); if (!fl1) fl1 = fl_lookup(net, freq->flr_label); if (fl1) { recheck: err = -EEXIST; if (freq->flr_flags&IPV6_FL_F_EXCL) goto release; err = -EPERM; if (fl1->share == IPV6_FL_S_EXCL || fl1->share != fl->share || ((fl1->share == IPV6_FL_S_PROCESS) && (fl1->owner.pid != fl->owner.pid)) || ((fl1->share == IPV6_FL_S_USER) && !uid_eq(fl1->owner.uid, fl->owner.uid))) goto release; err = -ENOMEM; if (!sfl1) goto release; if (fl->linger > fl1->linger) fl1->linger = fl->linger; if ((long)(fl->expires - fl1->expires) > 0) fl1->expires = fl->expires; fl_link(np, sfl1, fl1); fl_free(fl); return 0; release: fl_release(fl1); goto done; } } err = -ENOENT; if (!(freq->flr_flags & IPV6_FL_F_CREATE)) goto done; err = -ENOMEM; if (!sfl1) goto done; err = mem_check(sk); if (err != 0) goto done; fl1 = fl_intern(net, fl, freq->flr_label); if (fl1) goto recheck; if (!freq->flr_label) { size_t offset = offsetof(struct in6_flowlabel_req, flr_label); if (copy_to_sockptr_offset(optval, offset, &fl->label, sizeof(fl->label))) { /* Intentionally ignore fault. */ } } fl_link(np, sfl1, fl); return 0; done: fl_free(fl); kfree(sfl1); return err; } int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen) { struct in6_flowlabel_req freq; if (optlen < sizeof(freq)) return -EINVAL; if (copy_from_sockptr(&freq, optval, sizeof(freq))) return -EFAULT; switch (freq.flr_action) { case IPV6_FL_A_PUT: return ipv6_flowlabel_put(sk, &freq); case IPV6_FL_A_RENEW: return ipv6_flowlabel_renew(sk, &freq); case IPV6_FL_A_GET: return ipv6_flowlabel_get(sk, &freq, optval, optlen); default: return -EINVAL; } } #ifdef CONFIG_PROC_FS struct ip6fl_iter_state { struct seq_net_private p; struct pid_namespace *pid_ns; int bucket; }; #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private) static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq) { struct ip6_flowlabel *fl = NULL; struct ip6fl_iter_state *state = ip6fl_seq_private(seq); struct net *net = seq_file_net(seq); for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { for_each_fl_rcu(state->bucket, fl) { if (net_eq(fl->fl_net, net)) goto out; } } fl = NULL; out: return fl; } static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); struct net *net = seq_file_net(seq); for_each_fl_continue_rcu(fl) { if (net_eq(fl->fl_net, net)) goto out; } try_again: if (++state->bucket <= FL_HASH_MASK) { for_each_fl_rcu(state->bucket, fl) { if (net_eq(fl->fl_net, net)) goto out; } goto try_again; } fl = NULL; out: return fl; } static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos) { struct ip6_flowlabel *fl = ip6fl_get_first(seq); if (fl) while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL) --pos; return pos ? NULL : fl; } static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); state->pid_ns = proc_pid_ns(file_inode(seq->file)->i_sb); rcu_read_lock(); return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct ip6_flowlabel *fl; if (v == SEQ_START_TOKEN) fl = ip6fl_get_first(seq); else fl = ip6fl_get_next(seq, v); ++*pos; return fl; } static void ip6fl_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ip6fl_seq_show(struct seq_file *seq, void *v) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); if (v == SEQ_START_TOKEN) { seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n"); } else { struct ip6_flowlabel *fl = v; seq_printf(seq, "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", (unsigned int)ntohl(fl->label), fl->share, ((fl->share == IPV6_FL_S_PROCESS) ? pid_nr_ns(fl->owner.pid, state->pid_ns) : ((fl->share == IPV6_FL_S_USER) ? from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : 0)), atomic_read(&fl->users), fl->linger/HZ, (long)(fl->expires - jiffies)/HZ, &fl->dst, fl->opt ? fl->opt->opt_nflen : 0); } return 0; } static const struct seq_operations ip6fl_seq_ops = { .start = ip6fl_seq_start, .next = ip6fl_seq_next, .stop = ip6fl_seq_stop, .show = ip6fl_seq_show, }; static int __net_init ip6_flowlabel_proc_init(struct net *net) { if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net, &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state))) return -ENOMEM; return 0; } static void __net_exit ip6_flowlabel_proc_fini(struct net *net) { remove_proc_entry("ip6_flowlabel", net->proc_net); } #else static inline int ip6_flowlabel_proc_init(struct net *net) { return 0; } static inline void ip6_flowlabel_proc_fini(struct net *net) { } #endif static void __net_exit ip6_flowlabel_net_exit(struct net *net) { ip6_fl_purge(net); ip6_flowlabel_proc_fini(net); } static struct pernet_operations ip6_flowlabel_net_ops = { .init = ip6_flowlabel_proc_init, .exit = ip6_flowlabel_net_exit, }; int ip6_flowlabel_init(void) { return register_pernet_subsys(&ip6_flowlabel_net_ops); } void ip6_flowlabel_cleanup(void) { static_key_deferred_flush(&ipv6_flowlabel_exclusive); timer_delete(&ip6_fl_gc_timer); unregister_pernet_subsys(&ip6_flowlabel_net_ops); }
25 25 49 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AppArmor security module * * This file contains AppArmor policy definitions. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. */ #ifndef __AA_POLICY_H #define __AA_POLICY_H #include <linux/capability.h> #include <linux/cred.h> #include <linux/kref.h> #include <linux/rhashtable.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/socket.h> #include "apparmor.h" #include "audit.h" #include "capability.h" #include "domain.h" #include "file.h" #include "lib.h" #include "label.h" #include "net.h" #include "perms.h" #include "resource.h" struct aa_ns; extern int unprivileged_userns_apparmor_policy; extern int aa_unprivileged_unconfined_restricted; extern const char *const aa_profile_mode_names[]; #define APPARMOR_MODE_NAMES_MAX_INDEX 4 #define PROFILE_MODE(_profile, _mode) \ ((aa_g_profile_mode == (_mode)) || \ ((_profile)->mode == (_mode))) #define COMPLAIN_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_COMPLAIN) #define USER_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_USER) #define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL) #define PROFILE_IS_HAT(_profile) ((_profile)->label.flags & FLAG_HAT) #define CHECK_DEBUG1(_profile) ((_profile)->label.flags & FLAG_DEBUG1) #define CHECK_DEBUG2(_profile) ((_profile)->label.flags & FLAG_DEBUG2) #define profile_is_stale(_profile) (label_is_stale(&(_profile)->label)) #define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2) /* flags in the dfa accept2 table */ enum dfa_accept_flags { ACCEPT_FLAG_OWNER = 1, }; /* * FIXME: currently need a clean way to replace and remove profiles as a * set. It should be done at the namespace level. * Either, with a set of profiles loaded at the namespace level or via * a mark and remove marked interface. */ enum profile_mode { APPARMOR_ENFORCE, /* enforce access rules */ APPARMOR_COMPLAIN, /* allow and log access violations */ APPARMOR_KILL, /* kill task on access violation */ APPARMOR_UNCONFINED, /* profile set to unconfined */ APPARMOR_USER, /* modified complain mode to userspace */ }; /* struct aa_policydb - match engine for a policy * count: refcount for the pdb * dfa: dfa pattern match * perms: table of permissions * strs: table of strings, index by x * start: set of start states for the different classes of data */ struct aa_policydb { struct kref count; struct aa_dfa *dfa; struct { struct aa_perms *perms; u32 size; }; struct aa_str_table trans; aa_state_t start[AA_CLASS_LAST + 1]; }; extern struct aa_policydb *nullpdb; struct aa_policydb *aa_alloc_pdb(gfp_t gfp); void aa_pdb_free_kref(struct kref *kref); /** * aa_get_pdb - increment refcount on @pdb * @pdb: policydb (MAYBE NULL) * * Returns: pointer to @pdb if @pdb is NULL will return NULL * Requires: @pdb must be held with valid refcount when called */ static inline struct aa_policydb *aa_get_pdb(struct aa_policydb *pdb) { if (pdb) kref_get(&(pdb->count)); return pdb; } /** * aa_put_pdb - put a pdb refcount * @pdb: pdb to put refcount (MAYBE NULL) * * Requires: if @pdb != NULL that a valid refcount be held */ static inline void aa_put_pdb(struct aa_policydb *pdb) { if (pdb) kref_put(&pdb->count, aa_pdb_free_kref); } /* lookup perm that doesn't have and object conditional */ static inline struct aa_perms *aa_lookup_perms(struct aa_policydb *policy, aa_state_t state) { unsigned int index = ACCEPT_TABLE(policy->dfa)[state]; if (!(policy->perms)) return &default_perms; return &(policy->perms[index]); } /* struct aa_data - generic data structure * key: name for retrieving this data * size: size of data in bytes * data: binary data * head: reserved for rhashtable */ struct aa_data { char *key; u32 size; char *data; struct rhash_head head; }; /* struct aa_ruleset - data covering mediation rules * @list: list the rule is on * @size: the memory consumed by this ruleset * @policy: general match rules governing policy * @file: The set of rules governing basic file access and domain transitions * @caps: capabilities for the profile * @rlimits: rlimits for the profile * @secmark_count: number of secmark entries * @secmark: secmark label match info */ struct aa_ruleset { int size; /* TODO: merge policy and file */ struct aa_policydb *policy; struct aa_policydb *file; struct aa_caps caps; struct aa_rlimit rlimits; int secmark_count; struct aa_secmark *secmark; }; /* struct aa_attachment - data and rules for a profiles attachment * @list: * @xmatch_str: human readable attachment string * @xmatch: optional extended matching for unconfined executables names * @xmatch_len: xmatch prefix len, used to determine xmatch priority * @xattr_count: number of xattrs in table * @xattrs: table of xattrs */ struct aa_attachment { const char *xmatch_str; struct aa_policydb *xmatch; unsigned int xmatch_len; int xattr_count; char **xattrs; }; /* struct aa_profile - basic confinement data * @base - base components of the profile (name, refcount, lists, lock ...) * @parent: parent of profile * @ns: namespace the profile is in * @rename: optional profile name that this profile renamed * * @audit: the auditing mode of the profile * @mode: the enforcement mode of the profile * @path_flags: flags controlling path generation behavior * @signal: the signal that should be used when kill is used * @disconnected: what to prepend if attach_disconnected is specified * @attach: attachment rules for the profile * @rules: rules to be enforced * * learning_cache: the accesses learned in complain mode * raw_data: rawdata of the loaded profile policy * hash: cryptographic hash of the profile * @dents: dentries for the profiles file entries in apparmorfs * @dirname: name of the profile dir in apparmorfs * @dents: set of dentries associated with the profile * @data: hashtable for free-form policy aa_data * @label - label this profile is an extension of * @rules - label with the rule vec on its end * * The AppArmor profile contains the basic confinement data. Each profile * has a name, and exists in a namespace. The @name and @exec_match are * used to determine profile attachment against unconfined tasks. All other * attachments are determined by profile X transition rules. * * Profiles have a hierarchy where hats and children profiles keep * a reference to their parent. * * Profile names can not begin with a : and can not contain the \0 * character. If a profile name begins with / it will be considered when * determining profile attachment on "unconfined" tasks. */ struct aa_profile { struct aa_policy base; struct aa_profile __rcu *parent; struct aa_ns *ns; const char *rename; enum audit_mode audit; long mode; u32 path_flags; int signal; const char *disconnected; struct aa_attachment attach; struct aa_loaddata *rawdata; unsigned char *hash; char *dirname; struct dentry *dents[AAFS_PROF_SIZEOF]; struct rhashtable *data; int n_rules; /* special - variable length must be last entry in profile */ struct aa_label label; }; extern enum profile_mode aa_g_profile_mode; #define AA_MAY_LOAD_POLICY AA_MAY_APPEND #define AA_MAY_REPLACE_POLICY AA_MAY_WRITE #define AA_MAY_REMOVE_POLICY AA_MAY_DELETE #define profiles_ns(P) ((P)->ns) #define name_is_shared(A, B) ((A)->hname && (A)->hname == (B)->hname) struct aa_ruleset *aa_alloc_ruleset(gfp_t gfp); struct aa_profile *aa_alloc_profile(const char *name, struct aa_proxy *proxy, gfp_t gfp); struct aa_profile *aa_alloc_null(struct aa_profile *parent, const char *name, gfp_t gfp); struct aa_profile *aa_new_learning_profile(struct aa_profile *parent, bool hat, const char *base, gfp_t gfp); void aa_free_profile(struct aa_profile *profile); struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name); struct aa_profile *aa_lookupn_profile(struct aa_ns *ns, const char *hname, size_t n); struct aa_profile *aa_fqlookupn_profile(struct aa_label *base, const char *fqname, size_t n); ssize_t aa_replace_profiles(struct aa_ns *view, struct aa_label *label, u32 mask, struct aa_loaddata *udata); ssize_t aa_remove_profiles(struct aa_ns *view, struct aa_label *label, char *name, size_t size); void __aa_profile_list_release(struct list_head *head); #define profile_unconfined(X) ((X)->mode == APPARMOR_UNCONFINED) /** * aa_get_newest_profile - simple wrapper fn to wrap the label version * @p: profile (NOT NULL) * * Returns refcount to newest version of the profile (maybe @p) * * Requires: @p must be held with a valid refcount */ static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p) { return labels_profile(aa_get_newest_label(&p->label)); } static inline aa_state_t RULE_MEDIATES(struct aa_ruleset *rules, unsigned char class) { if (class <= AA_CLASS_LAST) return rules->policy->start[class]; else return aa_dfa_match_len(rules->policy->dfa, rules->policy->start[0], &class, 1); } static inline aa_state_t RULE_MEDIATES_v9NET(struct aa_ruleset *rules) { return RULE_MEDIATES(rules, AA_CLASS_NETV9); } static inline aa_state_t RULE_MEDIATES_NET(struct aa_ruleset *rules) { /* can not use RULE_MEDIATE_v9AF here, because AF match fail * can not be distiguished from class match fail, and we only * fallback to checking older class on class match failure */ aa_state_t state = RULE_MEDIATES(rules, AA_CLASS_NETV9); /* fallback and check v7/8 if v9 is NOT mediated */ if (!state) state = RULE_MEDIATES(rules, AA_CLASS_NET); return state; } void aa_compute_profile_mediates(struct aa_profile *profile); static inline bool profile_mediates(struct aa_profile *profile, unsigned char class) { return label_mediates(&profile->label, class); } static inline bool profile_mediates_safe(struct aa_profile *profile, unsigned char class) { return label_mediates_safe(&profile->label, class); } /** * aa_get_profile - increment refcount on profile @p * @p: profile (MAYBE NULL) * * Returns: pointer to @p if @p is NULL will return NULL * Requires: @p must be held with valid refcount when called */ static inline struct aa_profile *aa_get_profile(struct aa_profile *p) { if (p) kref_get(&(p->label.count)); return p; } /** * aa_get_profile_not0 - increment refcount on profile @p found via lookup * @p: profile (MAYBE NULL) * * Returns: pointer to @p if @p is NULL will return NULL * Requires: @p must be held with valid refcount when called */ static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p) { if (p && kref_get_unless_zero(&p->label.count)) return p; return NULL; } /** * aa_get_profile_rcu - increment a refcount profile that can be replaced * @p: pointer to profile that can be replaced (NOT NULL) * * Returns: pointer to a refcounted profile. * else NULL if no profile */ static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p) { struct aa_profile *c; rcu_read_lock(); do { c = rcu_dereference(*p); } while (c && !kref_get_unless_zero(&c->label.count)); rcu_read_unlock(); return c; } /** * aa_put_profile - decrement refcount on profile @p * @p: profile (MAYBE NULL) */ static inline void aa_put_profile(struct aa_profile *p) { if (p) kref_put(&p->label.count, aa_label_kref); } static inline int AUDIT_MODE(struct aa_profile *profile) { if (aa_g_audit != AUDIT_NORMAL) return aa_g_audit; return profile->audit; } bool aa_policy_view_capable(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns); bool aa_policy_admin_capable(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns); int aa_may_manage_policy(const struct cred *subj_cred, struct aa_label *label, struct aa_ns *ns, u32 mask); bool aa_current_policy_view_capable(struct aa_ns *ns); bool aa_current_policy_admin_capable(struct aa_ns *ns); #endif /* __AA_POLICY_H */
18 11 11 7 4 4 4 4 4 4 4 4 4 1 1 1 1 1 1 1 6 7 7 17 2 2 3 1 2 1 1 3 2 4 3 3 3 3 3 3 1 3 4 4 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include <net/genetlink.h> #include <net/sock.h> #include <trace/events/devlink.h> #include "devl_internal.h" struct devlink_fmsg_item { struct list_head list; int attrtype; u8 nla_type; u16 len; int value[]; }; struct devlink_fmsg { struct list_head item_list; int err; /* first error encountered on some devlink_fmsg_XXX() call */ bool putting_binary; /* This flag forces enclosing of binary data * in an array brackets. It forces using * of designated API: * devlink_fmsg_binary_pair_nest_start() * devlink_fmsg_binary_pair_nest_end() */ }; static struct devlink_fmsg *devlink_fmsg_alloc(void) { struct devlink_fmsg *fmsg; fmsg = kzalloc(sizeof(*fmsg), GFP_KERNEL); if (!fmsg) return NULL; INIT_LIST_HEAD(&fmsg->item_list); return fmsg; } static void devlink_fmsg_free(struct devlink_fmsg *fmsg) { struct devlink_fmsg_item *item, *tmp; list_for_each_entry_safe(item, tmp, &fmsg->item_list, list) { list_del(&item->list); kfree(item); } kfree(fmsg); } struct devlink_health_reporter { struct list_head list; void *priv; const struct devlink_health_reporter_ops *ops; struct devlink *devlink; struct devlink_port *devlink_port; struct devlink_fmsg *dump_fmsg; u64 graceful_period; bool auto_recover; bool auto_dump; u8 health_state; u64 dump_ts; u64 dump_real_ts; u64 error_count; u64 recovery_count; u64 last_recovery_ts; }; void * devlink_health_reporter_priv(struct devlink_health_reporter *reporter) { return reporter->priv; } EXPORT_SYMBOL_GPL(devlink_health_reporter_priv); static struct devlink_health_reporter * __devlink_health_reporter_find_by_name(struct list_head *reporter_list, const char *reporter_name) { struct devlink_health_reporter *reporter; list_for_each_entry(reporter, reporter_list, list) if (!strcmp(reporter->ops->name, reporter_name)) return reporter; return NULL; } static struct devlink_health_reporter * devlink_health_reporter_find_by_name(struct devlink *devlink, const char *reporter_name) { return __devlink_health_reporter_find_by_name(&devlink->reporter_list, reporter_name); } static struct devlink_health_reporter * devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port, const char *reporter_name) { return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list, reporter_name); } static struct devlink_health_reporter * __devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv) { struct devlink_health_reporter *reporter; if (WARN_ON(graceful_period && !ops->recover)) return ERR_PTR(-EINVAL); reporter = kzalloc(sizeof(*reporter), GFP_KERNEL); if (!reporter) return ERR_PTR(-ENOMEM); reporter->priv = priv; reporter->ops = ops; reporter->devlink = devlink; reporter->graceful_period = graceful_period; reporter->auto_recover = !!ops->recover; reporter->auto_dump = !!ops->dump; return reporter; } /** * devl_port_health_reporter_create() - create devlink health reporter for * specified port instance * * @port: devlink_port to which health reports will relate * @ops: devlink health reporter ops * @graceful_period: min time (in msec) between recovery attempts * @priv: driver priv pointer */ struct devlink_health_reporter * devl_port_health_reporter_create(struct devlink_port *port, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv) { struct devlink_health_reporter *reporter; devl_assert_locked(port->devlink); if (__devlink_health_reporter_find_by_name(&port->reporter_list, ops->name)) return ERR_PTR(-EEXIST); reporter = __devlink_health_reporter_create(port->devlink, ops, graceful_period, priv); if (IS_ERR(reporter)) return reporter; reporter->devlink_port = port; list_add_tail(&reporter->list, &port->reporter_list); return reporter; } EXPORT_SYMBOL_GPL(devl_port_health_reporter_create); struct devlink_health_reporter * devlink_port_health_reporter_create(struct devlink_port *port, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv) { struct devlink_health_reporter *reporter; struct devlink *devlink = port->devlink; devl_lock(devlink); reporter = devl_port_health_reporter_create(port, ops, graceful_period, priv); devl_unlock(devlink); return reporter; } EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create); /** * devl_health_reporter_create - create devlink health reporter * * @devlink: devlink instance which the health reports will relate * @ops: devlink health reporter ops * @graceful_period: min time (in msec) between recovery attempts * @priv: driver priv pointer */ struct devlink_health_reporter * devl_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv) { struct devlink_health_reporter *reporter; devl_assert_locked(devlink); if (devlink_health_reporter_find_by_name(devlink, ops->name)) return ERR_PTR(-EEXIST); reporter = __devlink_health_reporter_create(devlink, ops, graceful_period, priv); if (IS_ERR(reporter)) return reporter; list_add_tail(&reporter->list, &devlink->reporter_list); return reporter; } EXPORT_SYMBOL_GPL(devl_health_reporter_create); struct devlink_health_reporter * devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv) { struct devlink_health_reporter *reporter; devl_lock(devlink); reporter = devl_health_reporter_create(devlink, ops, graceful_period, priv); devl_unlock(devlink); return reporter; } EXPORT_SYMBOL_GPL(devlink_health_reporter_create); static void devlink_health_reporter_free(struct devlink_health_reporter *reporter) { if (reporter->dump_fmsg) devlink_fmsg_free(reporter->dump_fmsg); kfree(reporter); } /** * devl_health_reporter_destroy() - destroy devlink health reporter * * @reporter: devlink health reporter to destroy */ void devl_health_reporter_destroy(struct devlink_health_reporter *reporter) { devl_assert_locked(reporter->devlink); list_del(&reporter->list); devlink_health_reporter_free(reporter); } EXPORT_SYMBOL_GPL(devl_health_reporter_destroy); void devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) { struct devlink *devlink = reporter->devlink; devl_lock(devlink); devl_health_reporter_destroy(reporter); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy); static int devlink_nl_health_reporter_fill(struct sk_buff *msg, struct devlink_health_reporter *reporter, enum devlink_command cmd, u32 portid, u32 seq, int flags) { struct devlink *devlink = reporter->devlink; struct nlattr *reporter_attr; void *hdr; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto genlmsg_cancel; if (reporter->devlink_port) { if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index)) goto genlmsg_cancel; } reporter_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_HEALTH_REPORTER); if (!reporter_attr) goto genlmsg_cancel; if (nla_put_string(msg, DEVLINK_ATTR_HEALTH_REPORTER_NAME, reporter->ops->name)) goto reporter_nest_cancel; if (nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_STATE, reporter->health_state)) goto reporter_nest_cancel; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT, reporter->error_count)) goto reporter_nest_cancel; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT, reporter->recovery_count)) goto reporter_nest_cancel; if (reporter->ops->recover && devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD, reporter->graceful_period)) goto reporter_nest_cancel; if (reporter->ops->recover && nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER, reporter->auto_recover)) goto reporter_nest_cancel; if (reporter->dump_fmsg && devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS, jiffies_to_msecs(reporter->dump_ts))) goto reporter_nest_cancel; if (reporter->dump_fmsg && devlink_nl_put_u64(msg, DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, reporter->dump_real_ts)) goto reporter_nest_cancel; if (reporter->ops->dump && nla_put_u8(msg, DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP, reporter->auto_dump)) goto reporter_nest_cancel; nla_nest_end(msg, reporter_attr); genlmsg_end(msg, hdr); return 0; reporter_nest_cancel: nla_nest_cancel(msg, reporter_attr); genlmsg_cancel: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static struct devlink_health_reporter * devlink_health_reporter_get_from_attrs(struct devlink *devlink, struct nlattr **attrs) { struct devlink_port *devlink_port; char *reporter_name; if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]) return NULL; reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]); devlink_port = devlink_port_get_from_attrs(devlink, attrs); if (IS_ERR(devlink_port)) return devlink_health_reporter_find_by_name(devlink, reporter_name); else return devlink_port_health_reporter_find_by_name(devlink_port, reporter_name); } static struct devlink_health_reporter * devlink_health_reporter_get_from_info(struct devlink *devlink, struct genl_info *info) { return devlink_health_reporter_get_from_attrs(devlink, info->attrs); } int devlink_nl_health_reporter_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; struct sk_buff *msg; int err; reporter = devlink_health_reporter_get_from_info(devlink, info); if (!reporter) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_health_reporter_fill(msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_health_reporter_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); const struct genl_info *info = genl_info_dump(cb); struct devlink_health_reporter *reporter; unsigned long port_index_end = ULONG_MAX; struct nlattr **attrs = info->attrs; unsigned long port_index_start = 0; struct devlink_port *port; unsigned long port_index; int idx = 0; int err; if (attrs && attrs[DEVLINK_ATTR_PORT_INDEX]) { port_index_start = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]); port_index_end = port_index_start; flags |= NLM_F_DUMP_FILTERED; goto per_port_dump; } list_for_each_entry(reporter, &devlink->reporter_list, list) { if (idx < state->idx) { idx++; continue; } err = devlink_nl_health_reporter_fill(msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; return err; } idx++; } per_port_dump: xa_for_each_range(&devlink->ports, port_index, port, port_index_start, port_index_end) { list_for_each_entry(reporter, &port->reporter_list, list) { if (idx < state->idx) { idx++; continue; } err = devlink_nl_health_reporter_fill(msg, reporter, DEVLINK_CMD_HEALTH_REPORTER_GET, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err) { state->idx = idx; return err; } idx++; } } return 0; } int devlink_nl_health_reporter_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_health_reporter_get_dump_one); } int devlink_nl_health_reporter_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; reporter = devlink_health_reporter_get_from_info(devlink, info); if (!reporter) return -EINVAL; if (!reporter->ops->recover && (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] || info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER])) return -EOPNOTSUPP; if (!reporter->ops->dump && info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) return -EOPNOTSUPP; if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]) reporter->graceful_period = nla_get_u64(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD]); if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]) reporter->auto_recover = nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER]); if (info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]) reporter->auto_dump = nla_get_u8(info->attrs[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP]); return 0; } static void devlink_recover_notify(struct devlink_health_reporter *reporter, enum devlink_command cmd) { struct devlink *devlink = reporter->devlink; struct devlink_obj_desc desc; struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER); ASSERT_DEVLINK_REGISTERED(devlink); if (!devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_health_reporter_fill(msg, reporter, cmd, 0, 0, 0); if (err) { nlmsg_free(msg); return; } devlink_nl_obj_desc_init(&desc, devlink); if (reporter->devlink_port) devlink_nl_obj_desc_port_set(&desc, reporter->devlink_port); devlink_nl_notify_send_desc(devlink, msg, &desc); } void devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter) { reporter->recovery_count++; reporter->last_recovery_ts = jiffies; } EXPORT_SYMBOL_GPL(devlink_health_reporter_recovery_done); static int devlink_health_reporter_recover(struct devlink_health_reporter *reporter, void *priv_ctx, struct netlink_ext_ack *extack) { int err; if (reporter->health_state == DEVLINK_HEALTH_REPORTER_STATE_HEALTHY) return 0; if (!reporter->ops->recover) return -EOPNOTSUPP; err = reporter->ops->recover(reporter, priv_ctx, extack); if (err) return err; devlink_health_reporter_recovery_done(reporter); reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER); return 0; } static void devlink_health_dump_clear(struct devlink_health_reporter *reporter) { if (!reporter->dump_fmsg) return; devlink_fmsg_free(reporter->dump_fmsg); reporter->dump_fmsg = NULL; } static int devlink_health_do_dump(struct devlink_health_reporter *reporter, void *priv_ctx, struct netlink_ext_ack *extack) { int err; if (!reporter->ops->dump) return 0; if (reporter->dump_fmsg) return 0; reporter->dump_fmsg = devlink_fmsg_alloc(); if (!reporter->dump_fmsg) return -ENOMEM; devlink_fmsg_obj_nest_start(reporter->dump_fmsg); err = reporter->ops->dump(reporter, reporter->dump_fmsg, priv_ctx, extack); if (err) goto dump_err; devlink_fmsg_obj_nest_end(reporter->dump_fmsg); err = reporter->dump_fmsg->err; if (err) goto dump_err; reporter->dump_ts = jiffies; reporter->dump_real_ts = ktime_get_real_ns(); return 0; dump_err: devlink_health_dump_clear(reporter); return err; } int devlink_health_report(struct devlink_health_reporter *reporter, const char *msg, void *priv_ctx) { enum devlink_health_reporter_state prev_health_state; struct devlink *devlink = reporter->devlink; unsigned long recover_ts_threshold; int ret; /* write a log message of the current error */ WARN_ON(!msg); trace_devlink_health_report(devlink, reporter->ops->name, msg); reporter->error_count++; prev_health_state = reporter->health_state; reporter->health_state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER); /* abort if the previous error wasn't recovered */ recover_ts_threshold = reporter->last_recovery_ts + msecs_to_jiffies(reporter->graceful_period); if (reporter->auto_recover && (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY || (reporter->last_recovery_ts && reporter->recovery_count && time_is_after_jiffies(recover_ts_threshold)))) { trace_devlink_health_recover_aborted(devlink, reporter->ops->name, reporter->health_state, jiffies - reporter->last_recovery_ts); return -ECANCELED; } if (reporter->auto_dump) { devl_lock(devlink); /* store current dump of current error, for later analysis */ devlink_health_do_dump(reporter, priv_ctx, NULL); devl_unlock(devlink); } if (!reporter->auto_recover) return 0; devl_lock(devlink); ret = devlink_health_reporter_recover(reporter, priv_ctx, NULL); devl_unlock(devlink); return ret; } EXPORT_SYMBOL_GPL(devlink_health_report); void devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, enum devlink_health_reporter_state state) { if (WARN_ON(state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY && state != DEVLINK_HEALTH_REPORTER_STATE_ERROR)) return; if (reporter->health_state == state) return; reporter->health_state = state; trace_devlink_health_reporter_state_update(reporter->devlink, reporter->ops->name, state); devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER); } EXPORT_SYMBOL_GPL(devlink_health_reporter_state_update); int devlink_nl_health_reporter_recover_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; reporter = devlink_health_reporter_get_from_info(devlink, info); if (!reporter) return -EINVAL; return devlink_health_reporter_recover(reporter, NULL, info->extack); } static void devlink_fmsg_err_if_binary(struct devlink_fmsg *fmsg) { if (!fmsg->err && fmsg->putting_binary) fmsg->err = -EINVAL; } static void devlink_fmsg_nest_common(struct devlink_fmsg *fmsg, int attrtype) { struct devlink_fmsg_item *item; if (fmsg->err) return; item = kzalloc(sizeof(*item), GFP_KERNEL); if (!item) { fmsg->err = -ENOMEM; return; } item->attrtype = attrtype; list_add_tail(&item->list, &fmsg->item_list); } void devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_OBJ_NEST_START); } EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_start); static void devlink_fmsg_nest_end(struct devlink_fmsg *fmsg) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_NEST_END); } void devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg) { devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_obj_nest_end); #define DEVLINK_FMSG_MAX_SIZE (GENLMSG_DEFAULT_SIZE - GENL_HDRLEN - NLA_HDRLEN) static void devlink_fmsg_put_name(struct devlink_fmsg *fmsg, const char *name) { struct devlink_fmsg_item *item; devlink_fmsg_err_if_binary(fmsg); if (fmsg->err) return; if (strlen(name) + 1 > DEVLINK_FMSG_MAX_SIZE) { fmsg->err = -EMSGSIZE; return; } item = kzalloc(sizeof(*item) + strlen(name) + 1, GFP_KERNEL); if (!item) { fmsg->err = -ENOMEM; return; } item->nla_type = DEVLINK_VAR_ATTR_TYPE_NUL_STRING; item->len = strlen(name) + 1; item->attrtype = DEVLINK_ATTR_FMSG_OBJ_NAME; memcpy(&item->value, name, item->len); list_add_tail(&item->list, &fmsg->item_list); } void devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_PAIR_NEST_START); devlink_fmsg_put_name(fmsg, name); } EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_start); void devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg) { devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_pair_nest_end); void devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg, const char *name) { devlink_fmsg_pair_nest_start(fmsg, name); devlink_fmsg_nest_common(fmsg, DEVLINK_ATTR_FMSG_ARR_NEST_START); } EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_start); void devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg) { devlink_fmsg_nest_end(fmsg); devlink_fmsg_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_arr_pair_nest_end); void devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg *fmsg, const char *name) { devlink_fmsg_arr_pair_nest_start(fmsg, name); fmsg->putting_binary = true; } EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_start); void devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg *fmsg) { if (fmsg->err) return; if (!fmsg->putting_binary) fmsg->err = -EINVAL; fmsg->putting_binary = false; devlink_fmsg_arr_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_nest_end); static void devlink_fmsg_put_value(struct devlink_fmsg *fmsg, const void *value, u16 value_len, u8 value_nla_type) { struct devlink_fmsg_item *item; if (fmsg->err) return; if (value_len > DEVLINK_FMSG_MAX_SIZE) { fmsg->err = -EMSGSIZE; return; } item = kzalloc(sizeof(*item) + value_len, GFP_KERNEL); if (!item) { fmsg->err = -ENOMEM; return; } item->nla_type = value_nla_type; item->len = value_len; item->attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA; memcpy(&item->value, value, item->len); list_add_tail(&item->list, &fmsg->item_list); } static void devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_put_value(fmsg, &value, sizeof(value), DEVLINK_VAR_ATTR_TYPE_FLAG); } static void devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_put_value(fmsg, &value, sizeof(value), DEVLINK_VAR_ATTR_TYPE_U8); } void devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_put_value(fmsg, &value, sizeof(value), DEVLINK_VAR_ATTR_TYPE_U32); } EXPORT_SYMBOL_GPL(devlink_fmsg_u32_put); static void devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_put_value(fmsg, &value, sizeof(value), DEVLINK_VAR_ATTR_TYPE_U64); } void devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value) { devlink_fmsg_err_if_binary(fmsg); devlink_fmsg_put_value(fmsg, value, strlen(value) + 1, DEVLINK_VAR_ATTR_TYPE_NUL_STRING); } EXPORT_SYMBOL_GPL(devlink_fmsg_string_put); void devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value, u16 value_len) { if (!fmsg->err && !fmsg->putting_binary) fmsg->err = -EINVAL; devlink_fmsg_put_value(fmsg, value, value_len, DEVLINK_VAR_ATTR_TYPE_BINARY); } EXPORT_SYMBOL_GPL(devlink_fmsg_binary_put); void devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name, bool value) { devlink_fmsg_pair_nest_start(fmsg, name); devlink_fmsg_bool_put(fmsg, value); devlink_fmsg_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_bool_pair_put); void devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name, u8 value) { devlink_fmsg_pair_nest_start(fmsg, name); devlink_fmsg_u8_put(fmsg, value); devlink_fmsg_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_u8_pair_put); void devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name, u32 value) { devlink_fmsg_pair_nest_start(fmsg, name); devlink_fmsg_u32_put(fmsg, value); devlink_fmsg_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_u32_pair_put); void devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name, u64 value) { devlink_fmsg_pair_nest_start(fmsg, name); devlink_fmsg_u64_put(fmsg, value); devlink_fmsg_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_u64_pair_put); void devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name, const char *value) { devlink_fmsg_pair_nest_start(fmsg, name); devlink_fmsg_string_put(fmsg, value); devlink_fmsg_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_string_pair_put); void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name, const void *value, u32 value_len) { u32 data_size; u32 offset; devlink_fmsg_binary_pair_nest_start(fmsg, name); for (offset = 0; offset < value_len; offset += data_size) { data_size = value_len - offset; if (data_size > DEVLINK_FMSG_MAX_SIZE) data_size = DEVLINK_FMSG_MAX_SIZE; devlink_fmsg_binary_put(fmsg, value + offset, data_size); } devlink_fmsg_binary_pair_nest_end(fmsg); fmsg->putting_binary = false; } EXPORT_SYMBOL_GPL(devlink_fmsg_binary_pair_put); static int devlink_fmsg_item_fill_data(struct devlink_fmsg_item *msg, struct sk_buff *skb) { int attrtype = DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA; u8 tmp; switch (msg->nla_type) { case DEVLINK_VAR_ATTR_TYPE_FLAG: /* Always provide flag data, regardless of its value */ tmp = *(bool *)msg->value; return nla_put_u8(skb, attrtype, tmp); case DEVLINK_VAR_ATTR_TYPE_U8: return nla_put_u8(skb, attrtype, *(u8 *)msg->value); case DEVLINK_VAR_ATTR_TYPE_U32: return nla_put_u32(skb, attrtype, *(u32 *)msg->value); case DEVLINK_VAR_ATTR_TYPE_U64: return devlink_nl_put_u64(skb, attrtype, *(u64 *)msg->value); case DEVLINK_VAR_ATTR_TYPE_NUL_STRING: return nla_put_string(skb, attrtype, (char *)&msg->value); case DEVLINK_VAR_ATTR_TYPE_BINARY: return nla_put(skb, attrtype, msg->len, (void *)&msg->value); default: return -EINVAL; } } static int devlink_fmsg_prepare_skb(struct devlink_fmsg *fmsg, struct sk_buff *skb, int *start) { struct devlink_fmsg_item *item; struct nlattr *fmsg_nlattr; int err = 0; int i = 0; fmsg_nlattr = nla_nest_start_noflag(skb, DEVLINK_ATTR_FMSG); if (!fmsg_nlattr) return -EMSGSIZE; list_for_each_entry(item, &fmsg->item_list, list) { if (i < *start) { i++; continue; } switch (item->attrtype) { case DEVLINK_ATTR_FMSG_OBJ_NEST_START: case DEVLINK_ATTR_FMSG_PAIR_NEST_START: case DEVLINK_ATTR_FMSG_ARR_NEST_START: case DEVLINK_ATTR_FMSG_NEST_END: err = nla_put_flag(skb, item->attrtype); break; case DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA: err = nla_put_u8(skb, DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE, item->nla_type); if (err) break; err = devlink_fmsg_item_fill_data(item, skb); break; case DEVLINK_ATTR_FMSG_OBJ_NAME: err = nla_put_string(skb, item->attrtype, (char *)&item->value); break; default: err = -EINVAL; break; } if (!err) *start = ++i; else break; } nla_nest_end(skb, fmsg_nlattr); return err; } static int devlink_fmsg_snd(struct devlink_fmsg *fmsg, struct genl_info *info, enum devlink_command cmd, int flags) { struct nlmsghdr *nlh; struct sk_buff *skb; bool last = false; int index = 0; void *hdr; int err; if (fmsg->err) return fmsg->err; while (!last) { int tmp_index = index; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &devlink_nl_family, flags | NLM_F_MULTI, cmd); if (!hdr) { err = -EMSGSIZE; goto nla_put_failure; } err = devlink_fmsg_prepare_skb(fmsg, skb, &index); if (!err) last = true; else if (err != -EMSGSIZE || tmp_index == index) goto nla_put_failure; genlmsg_end(skb, hdr); err = genlmsg_reply(skb, info); if (err) return err; } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { err = -EMSGSIZE; goto nla_put_failure; } return genlmsg_reply(skb, info); nla_put_failure: nlmsg_free(skb); return err; } static int devlink_fmsg_dumpit(struct devlink_fmsg *fmsg, struct sk_buff *skb, struct netlink_callback *cb, enum devlink_command cmd) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); int index = state->idx; int tmp_index = index; void *hdr; int err; if (fmsg->err) return fmsg->err; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, cmd); if (!hdr) { err = -EMSGSIZE; goto nla_put_failure; } err = devlink_fmsg_prepare_skb(fmsg, skb, &index); if ((err && err != -EMSGSIZE) || tmp_index == index) goto nla_put_failure; state->idx = index; genlmsg_end(skb, hdr); return skb->len; nla_put_failure: genlmsg_cancel(skb, hdr); return err; } int devlink_nl_health_reporter_diagnose_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; struct devlink_fmsg *fmsg; int err; reporter = devlink_health_reporter_get_from_info(devlink, info); if (!reporter) return -EINVAL; if (!reporter->ops->diagnose) return -EOPNOTSUPP; fmsg = devlink_fmsg_alloc(); if (!fmsg) return -ENOMEM; devlink_fmsg_obj_nest_start(fmsg); err = reporter->ops->diagnose(reporter, fmsg, info->extack); if (err) goto out; devlink_fmsg_obj_nest_end(fmsg); err = devlink_fmsg_snd(fmsg, info, DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE, 0); out: devlink_fmsg_free(fmsg); return err; } static struct devlink_health_reporter * devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb) { const struct genl_info *info = genl_info_dump(cb); struct devlink_health_reporter *reporter; struct nlattr **attrs = info->attrs; struct devlink *devlink; devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs, false); if (IS_ERR(devlink)) return NULL; reporter = devlink_health_reporter_get_from_attrs(devlink, attrs); if (!reporter) { devl_unlock(devlink); devlink_put(devlink); } return reporter; } int devlink_nl_health_reporter_dump_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_health_reporter *reporter; struct devlink *devlink; int err; reporter = devlink_health_reporter_get_from_cb_lock(cb); if (!reporter) return -EINVAL; devlink = reporter->devlink; if (!reporter->ops->dump) { devl_unlock(devlink); devlink_put(devlink); return -EOPNOTSUPP; } if (!state->idx) { err = devlink_health_do_dump(reporter, NULL, cb->extack); if (err) goto unlock; state->dump_ts = reporter->dump_ts; } if (!reporter->dump_fmsg || state->dump_ts != reporter->dump_ts) { NL_SET_ERR_MSG(cb->extack, "Dump trampled, please retry"); err = -EAGAIN; goto unlock; } err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb, DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET); unlock: devl_unlock(devlink); devlink_put(devlink); return err; } int devlink_nl_health_reporter_dump_clear_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; reporter = devlink_health_reporter_get_from_info(devlink, info); if (!reporter) return -EINVAL; if (!reporter->ops->dump) return -EOPNOTSUPP; devlink_health_dump_clear(reporter); return 0; } int devlink_nl_health_reporter_test_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_health_reporter *reporter; reporter = devlink_health_reporter_get_from_info(devlink, info); if (!reporter) return -EINVAL; if (!reporter->ops->test) return -EOPNOTSUPP; return reporter->ops->test(reporter, info->extack); } /** * devlink_fmsg_dump_skb - Dump sk_buffer structure * @fmsg: devlink formatted message pointer * @skb: pointer to skb * * Dump diagnostic information about sk_buff structure, like headroom, length, * tailroom, MAC, etc. */ void devlink_fmsg_dump_skb(struct devlink_fmsg *fmsg, const struct sk_buff *skb) { struct skb_shared_info *sh = skb_shinfo(skb); struct sock *sk = skb->sk; bool has_mac, has_trans; has_mac = skb_mac_header_was_set(skb); has_trans = skb_transport_header_was_set(skb); devlink_fmsg_pair_nest_start(fmsg, "skb"); devlink_fmsg_obj_nest_start(fmsg); devlink_fmsg_put(fmsg, "actual len", skb->len); devlink_fmsg_put(fmsg, "head len", skb_headlen(skb)); devlink_fmsg_put(fmsg, "data len", skb->data_len); devlink_fmsg_put(fmsg, "tail len", skb_tailroom(skb)); devlink_fmsg_put(fmsg, "MAC", has_mac ? skb->mac_header : -1); devlink_fmsg_put(fmsg, "MAC len", has_mac ? skb_mac_header_len(skb) : -1); devlink_fmsg_put(fmsg, "network hdr", skb->network_header); devlink_fmsg_put(fmsg, "network hdr len", has_trans ? skb_network_header_len(skb) : -1); devlink_fmsg_put(fmsg, "transport hdr", has_trans ? skb->transport_header : -1); devlink_fmsg_put(fmsg, "csum", (__force u32)skb->csum); devlink_fmsg_put(fmsg, "csum_ip_summed", (u8)skb->ip_summed); devlink_fmsg_put(fmsg, "csum_complete_sw", !!skb->csum_complete_sw); devlink_fmsg_put(fmsg, "csum_valid", !!skb->csum_valid); devlink_fmsg_put(fmsg, "csum_level", (u8)skb->csum_level); devlink_fmsg_put(fmsg, "sw_hash", !!skb->sw_hash); devlink_fmsg_put(fmsg, "l4_hash", !!skb->l4_hash); devlink_fmsg_put(fmsg, "proto", ntohs(skb->protocol)); devlink_fmsg_put(fmsg, "pkt_type", (u8)skb->pkt_type); devlink_fmsg_put(fmsg, "iif", skb->skb_iif); if (sk) { devlink_fmsg_pair_nest_start(fmsg, "sk"); devlink_fmsg_obj_nest_start(fmsg); devlink_fmsg_put(fmsg, "family", sk->sk_type); devlink_fmsg_put(fmsg, "type", sk->sk_type); devlink_fmsg_put(fmsg, "proto", sk->sk_protocol); devlink_fmsg_obj_nest_end(fmsg); devlink_fmsg_pair_nest_end(fmsg); } devlink_fmsg_obj_nest_end(fmsg); devlink_fmsg_pair_nest_end(fmsg); devlink_fmsg_pair_nest_start(fmsg, "shinfo"); devlink_fmsg_obj_nest_start(fmsg); devlink_fmsg_put(fmsg, "tx_flags", sh->tx_flags); devlink_fmsg_put(fmsg, "nr_frags", sh->nr_frags); devlink_fmsg_put(fmsg, "gso_size", sh->gso_size); devlink_fmsg_put(fmsg, "gso_type", sh->gso_type); devlink_fmsg_put(fmsg, "gso_segs", sh->gso_segs); devlink_fmsg_obj_nest_end(fmsg); devlink_fmsg_pair_nest_end(fmsg); } EXPORT_SYMBOL_GPL(devlink_fmsg_dump_skb);
12 12 249 5 249 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_UACCESS_64_H #define _ASM_X86_UACCESS_64_H /* * User space memory access functions */ #include <linux/compiler.h> #include <linux/lockdep.h> #include <linux/kasan-checks.h> #include <asm/alternative.h> #include <asm/cpufeatures.h> #include <asm/page.h> #include <asm/percpu.h> #include <asm/runtime-const.h> /* * Virtual variable: there's no actual backing store for this, * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' */ extern unsigned long USER_PTR_MAX; #ifdef CONFIG_ADDRESS_MASKING /* * Mask out tag bits from the address. */ static inline unsigned long __untagged_addr(unsigned long addr) { asm_inline (ALTERNATIVE("", "and " __percpu_arg([mask]) ", %[addr]", X86_FEATURE_LAM) : [addr] "+r" (addr) : [mask] "m" (__my_cpu_var(tlbstate_untag_mask))); return addr; } #define untagged_addr(addr) ({ \ unsigned long __addr = (__force unsigned long)(addr); \ (__force __typeof__(addr))__untagged_addr(__addr); \ }) static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr) { mmap_assert_locked(mm); return addr & (mm)->context.untag_mask; } #define untagged_addr_remote(mm, addr) ({ \ unsigned long __addr = (__force unsigned long)(addr); \ (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \ }) #endif #define valid_user_address(x) \ likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX)) /* * Masking the user address is an alternative to a conditional * user_access_begin that can avoid the fencing. This only works * for dense accesses starting at the address. */ static inline void __user *mask_user_address(const void __user *ptr) { void __user *ret; asm("cmp %1,%0\n\t" "cmova %1,%0" :"=r" (ret) :"r" (runtime_const_ptr(USER_PTR_MAX)), "0" (ptr)); return ret; } #define masked_user_access_begin(x) ({ \ __auto_type __masked_ptr = (x); \ __masked_ptr = mask_user_address(__masked_ptr); \ __uaccess_begin(); __masked_ptr; }) /* * User pointers can have tag bits on x86-64. This scheme tolerates * arbitrary values in those bits rather then masking them off. * * Enforce two rules: * 1. 'ptr' must be in the user part of the address space * 2. 'ptr+size' must not overflow into kernel addresses * * Note that we always have at least one guard page between the * max user address and the non-canonical gap, allowing us to * ignore small sizes entirely. * * In fact, we could probably remove the size check entirely, since * any kernel accesses will be in increasing address order starting * at 'ptr'. * * That's a separate optimization, for now just handle the small * constant case. */ static inline bool __access_ok(const void __user *ptr, unsigned long size) { if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { return valid_user_address(ptr); } else { unsigned long sum = size + (__force unsigned long)ptr; return valid_user_address(sum) && sum >= (__force unsigned long)ptr; } } #define __access_ok __access_ok /* * Copy To/From Userspace */ /* Handles exceptions in both to and from, but doesn't do access_ok */ __must_check unsigned long rep_movs_alternative(void *to, const void *from, unsigned len); static __always_inline __must_check unsigned long copy_user_generic(void *to, const void *from, unsigned long len) { stac(); /* * If CPU has FSRM feature, use 'rep movs'. * Otherwise, use rep_movs_alternative. */ asm volatile( "1:\n\t" ALTERNATIVE("rep movsb", "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM)) "2:\n" _ASM_EXTABLE_UA(1b, 2b) :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT : : "memory", "rax"); clac(); return len; } static __always_inline __must_check unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long size) { return copy_user_generic(dst, (__force void *)src, size); } static __always_inline __must_check unsigned long raw_copy_to_user(void __user *dst, const void *src, unsigned long size) { return copy_user_generic((__force void *)dst, src, size); } extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size); extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { long ret; kasan_check_write(dst, size); stac(); ret = __copy_user_nocache(dst, src, size); clac(); return ret; } static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) { kasan_check_write(dst, size); return __copy_user_flushcache(dst, src, size); } /* * Zero Userspace. */ __must_check unsigned long rep_stos_alternative(void __user *addr, unsigned long len); static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size) { might_fault(); stac(); /* * No memory constraint because it doesn't change any memory gcc * knows about. */ asm volatile( "1:\n\t" ALTERNATIVE("rep stosb", "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS)) "2:\n" _ASM_EXTABLE_UA(1b, 2b) : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT : "a" (0)); clac(); return size; } static __always_inline unsigned long clear_user(void __user *to, unsigned long n) { if (__access_ok(to, n)) return __clear_user(to, n); return n; } #endif /* _ASM_X86_UACCESS_64_H */
1 1 1 1 1 1 1 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 // SPDX-License-Identifier: GPL-2.0-only /* * vDPA bus. * * Copyright (c) 2020, Red Hat. All rights reserved. * Author: Jason Wang <jasowang@redhat.com> * */ #include <linux/module.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/vdpa.h> #include <uapi/linux/vdpa.h> #include <net/genetlink.h> #include <linux/mod_devicetable.h> #include <linux/virtio_ids.h> static LIST_HEAD(mdev_head); /* A global mutex that protects vdpa management device and device level operations. */ static DECLARE_RWSEM(vdpa_dev_lock); static DEFINE_IDA(vdpa_index_ida); void vdpa_set_status(struct vdpa_device *vdev, u8 status) { down_write(&vdev->cf_lock); vdev->config->set_status(vdev, status); up_write(&vdev->cf_lock); } EXPORT_SYMBOL(vdpa_set_status); static struct genl_family vdpa_nl_family; static int vdpa_dev_probe(struct device *d) { struct vdpa_device *vdev = dev_to_vdpa(d); struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); const struct vdpa_config_ops *ops = vdev->config; u32 max_num, min_num = 1; int ret = 0; d->dma_mask = &d->coherent_dma_mask; ret = dma_set_mask_and_coherent(d, DMA_BIT_MASK(64)); if (ret) return ret; max_num = ops->get_vq_num_max(vdev); if (ops->get_vq_num_min) min_num = ops->get_vq_num_min(vdev); if (max_num < min_num) return -EINVAL; if (drv && drv->probe) ret = drv->probe(vdev); return ret; } static void vdpa_dev_remove(struct device *d) { struct vdpa_device *vdev = dev_to_vdpa(d); struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); if (drv && drv->remove) drv->remove(vdev); } static int vdpa_dev_match(struct device *dev, const struct device_driver *drv) { struct vdpa_device *vdev = dev_to_vdpa(dev); /* Check override first, and if set, only use the named driver */ if (vdev->driver_override) return strcmp(vdev->driver_override, drv->name) == 0; /* Currently devices must be supported by all vDPA bus drivers */ return 1; } static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct vdpa_device *vdev = dev_to_vdpa(dev); int ret; ret = driver_set_override(dev, &vdev->driver_override, buf, count); if (ret) return ret; return count; } static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct vdpa_device *vdev = dev_to_vdpa(dev); ssize_t len; device_lock(dev); len = sysfs_emit(buf, "%s\n", vdev->driver_override); device_unlock(dev); return len; } static DEVICE_ATTR_RW(driver_override); static struct attribute *vdpa_dev_attrs[] = { &dev_attr_driver_override.attr, NULL, }; static const struct attribute_group vdpa_dev_group = { .attrs = vdpa_dev_attrs, }; __ATTRIBUTE_GROUPS(vdpa_dev); static const struct bus_type vdpa_bus = { .name = "vdpa", .dev_groups = vdpa_dev_groups, .match = vdpa_dev_match, .probe = vdpa_dev_probe, .remove = vdpa_dev_remove, }; static void vdpa_release_dev(struct device *d) { struct vdpa_device *vdev = dev_to_vdpa(d); const struct vdpa_config_ops *ops = vdev->config; if (ops->free) ops->free(vdev); ida_free(&vdpa_index_ida, vdev->index); kfree(vdev->driver_override); kfree(vdev); } /** * __vdpa_alloc_device - allocate and initilaize a vDPA device * This allows driver to some prepartion after device is * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device * @ngroups: number of groups supported by this device * @nas: number of address spaces supported by this device * @size: size of the parent structure that contains private data * @name: name of the vdpa device; optional. * @use_va: indicate whether virtual address must be used by this device * * Driver should use vdpa_alloc_device() wrapper macro instead of * using this directly. * * Return: Returns an error when parent/config/dma_dev is not set or fail to get * ida. */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, unsigned int ngroups, unsigned int nas, size_t size, const char *name, bool use_va) { struct vdpa_device *vdev; int err = -EINVAL; if (!config) goto err; if (!!config->dma_map != !!config->dma_unmap) goto err; /* It should only work for the device that use on-chip IOMMU */ if (use_va && !(config->dma_map || config->set_map)) goto err; err = -ENOMEM; vdev = kzalloc(size, GFP_KERNEL); if (!vdev) goto err; err = ida_alloc(&vdpa_index_ida, GFP_KERNEL); if (err < 0) goto err_ida; vdev->dev.bus = &vdpa_bus; vdev->dev.parent = parent; vdev->dev.release = vdpa_release_dev; vdev->index = err; vdev->config = config; vdev->features_valid = false; vdev->use_va = use_va; vdev->ngroups = ngroups; vdev->nas = nas; if (name) err = dev_set_name(&vdev->dev, "%s", name); else err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) goto err_name; init_rwsem(&vdev->cf_lock); device_initialize(&vdev->dev); return vdev; err_name: ida_free(&vdpa_index_ida, vdev->index); err_ida: kfree(vdev); err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(__vdpa_alloc_device); static int vdpa_name_match(struct device *dev, const void *data) { struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); return (strcmp(dev_name(&vdev->dev), data) == 0); } static int __vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) { struct device *dev; vdev->nvqs = nvqs; lockdep_assert_held(&vdpa_dev_lock); dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match); if (dev) { put_device(dev); return -EEXIST; } return device_add(&vdev->dev); } /** * _vdpa_register_device - register a vDPA device with vdpa lock held * Caller must have a succeed call of vdpa_alloc_device() before. * Caller must invoke this routine in the management device dev_add() * callback after setting up valid mgmtdev for this vdpa device. * @vdev: the vdpa device to be registered to vDPA bus * @nvqs: number of virtqueues supported by this device * * Return: Returns an error when fail to add device to vDPA bus */ int _vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) { if (!vdev->mdev) return -EINVAL; return __vdpa_register_device(vdev, nvqs); } EXPORT_SYMBOL_GPL(_vdpa_register_device); /** * vdpa_register_device - register a vDPA device * Callers must have a succeed call of vdpa_alloc_device() before. * @vdev: the vdpa device to be registered to vDPA bus * @nvqs: number of virtqueues supported by this device * * Return: Returns an error when fail to add to vDPA bus */ int vdpa_register_device(struct vdpa_device *vdev, u32 nvqs) { int err; down_write(&vdpa_dev_lock); err = __vdpa_register_device(vdev, nvqs); up_write(&vdpa_dev_lock); return err; } EXPORT_SYMBOL_GPL(vdpa_register_device); /** * _vdpa_unregister_device - unregister a vDPA device * Caller must invoke this routine as part of management device dev_del() * callback. * @vdev: the vdpa device to be unregisted from vDPA bus */ void _vdpa_unregister_device(struct vdpa_device *vdev) { lockdep_assert_held(&vdpa_dev_lock); WARN_ON(!vdev->mdev); device_unregister(&vdev->dev); } EXPORT_SYMBOL_GPL(_vdpa_unregister_device); /** * vdpa_unregister_device - unregister a vDPA device * @vdev: the vdpa device to be unregisted from vDPA bus */ void vdpa_unregister_device(struct vdpa_device *vdev) { down_write(&vdpa_dev_lock); device_unregister(&vdev->dev); up_write(&vdpa_dev_lock); } EXPORT_SYMBOL_GPL(vdpa_unregister_device); /** * __vdpa_register_driver - register a vDPA device driver * @drv: the vdpa device driver to be registered * @owner: module owner of the driver * * Return: Returns an err when fail to do the registration */ int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) { drv->driver.bus = &vdpa_bus; drv->driver.owner = owner; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(__vdpa_register_driver); /** * vdpa_unregister_driver - unregister a vDPA device driver * @drv: the vdpa device driver to be unregistered */ void vdpa_unregister_driver(struct vdpa_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(vdpa_unregister_driver); /** * vdpa_mgmtdev_register - register a vdpa management device * * @mdev: Pointer to vdpa management device * vdpa_mgmtdev_register() register a vdpa management device which supports * vdpa device management. * Return: Returns 0 on success or failure when required callback ops are not * initialized. */ int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev) { if (!mdev->device || !mdev->ops || !mdev->ops->dev_add || !mdev->ops->dev_del) return -EINVAL; INIT_LIST_HEAD(&mdev->list); down_write(&vdpa_dev_lock); list_add_tail(&mdev->list, &mdev_head); up_write(&vdpa_dev_lock); return 0; } EXPORT_SYMBOL_GPL(vdpa_mgmtdev_register); static int vdpa_match_remove(struct device *dev, void *data) { struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); struct vdpa_mgmt_dev *mdev = vdev->mdev; if (mdev == data) mdev->ops->dev_del(mdev, vdev); return 0; } void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev) { down_write(&vdpa_dev_lock); list_del(&mdev->list); /* Filter out all the entries belong to this management device and delete it. */ bus_for_each_dev(&vdpa_bus, NULL, mdev, vdpa_match_remove); up_write(&vdpa_dev_lock); } EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister); static void vdpa_get_config_unlocked(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len) { const struct vdpa_config_ops *ops = vdev->config; /* * Config accesses aren't supposed to trigger before features are set. * If it does happen we assume a legacy guest. */ if (!vdev->features_valid) vdpa_set_features_unlocked(vdev, 0); ops->get_config(vdev, offset, buf, len); } /** * vdpa_get_config - Get one or more device configuration fields. * @vdev: vdpa device to operate on * @offset: starting byte offset of the field * @buf: buffer pointer to read to * @len: length of the configuration fields in bytes */ void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len) { down_read(&vdev->cf_lock); vdpa_get_config_unlocked(vdev, offset, buf, len); up_read(&vdev->cf_lock); } EXPORT_SYMBOL_GPL(vdpa_get_config); /** * vdpa_set_config - Set one or more device configuration fields. * @vdev: vdpa device to operate on * @offset: starting byte offset of the field * @buf: buffer pointer to read from * @length: length of the configuration fields in bytes */ void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf, unsigned int length) { down_write(&vdev->cf_lock); vdev->config->set_config(vdev, offset, buf, length); up_write(&vdev->cf_lock); } EXPORT_SYMBOL_GPL(vdpa_set_config); static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev, const char *busname, const char *devname) { /* Bus name is optional for simulated management device, so ignore the * device with bus if bus attribute is provided. */ if ((busname && !mdev->device->bus) || (!busname && mdev->device->bus)) return false; if (!busname && strcmp(dev_name(mdev->device), devname) == 0) return true; if (busname && (strcmp(mdev->device->bus->name, busname) == 0) && (strcmp(dev_name(mdev->device), devname) == 0)) return true; return false; } static struct vdpa_mgmt_dev *vdpa_mgmtdev_get_from_attr(struct nlattr **attrs) { struct vdpa_mgmt_dev *mdev; const char *busname = NULL; const char *devname; if (!attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]) return ERR_PTR(-EINVAL); devname = nla_data(attrs[VDPA_ATTR_MGMTDEV_DEV_NAME]); if (attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]) busname = nla_data(attrs[VDPA_ATTR_MGMTDEV_BUS_NAME]); list_for_each_entry(mdev, &mdev_head, list) { if (mgmtdev_handle_match(mdev, busname, devname)) return mdev; } return ERR_PTR(-ENODEV); } static int vdpa_nl_mgmtdev_handle_fill(struct sk_buff *msg, const struct vdpa_mgmt_dev *mdev) { if (mdev->device->bus && nla_put_string(msg, VDPA_ATTR_MGMTDEV_BUS_NAME, mdev->device->bus->name)) return -EMSGSIZE; if (nla_put_string(msg, VDPA_ATTR_MGMTDEV_DEV_NAME, dev_name(mdev->device))) return -EMSGSIZE; return 0; } static u64 vdpa_mgmtdev_get_classes(const struct vdpa_mgmt_dev *mdev, unsigned int *nclasses) { u64 supported_classes = 0; unsigned int n = 0; for (int i = 0; mdev->id_table[i].device; i++) { if (mdev->id_table[i].device > 63) continue; supported_classes |= BIT_ULL(mdev->id_table[i].device); n++; } if (nclasses) *nclasses = n; return supported_classes; } static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *msg, u32 portid, u32 seq, int flags) { void *hdr; int err; hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_MGMTDEV_NEW); if (!hdr) return -EMSGSIZE; err = vdpa_nl_mgmtdev_handle_fill(msg, mdev); if (err) goto msg_err; if (nla_put_u64_64bit(msg, VDPA_ATTR_MGMTDEV_SUPPORTED_CLASSES, vdpa_mgmtdev_get_classes(mdev, NULL), VDPA_ATTR_UNSPEC)) { err = -EMSGSIZE; goto msg_err; } if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, mdev->max_supported_vqs)) { err = -EMSGSIZE; goto msg_err; } if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES, mdev->supported_features, VDPA_ATTR_PAD)) { err = -EMSGSIZE; goto msg_err; } genlmsg_end(msg, hdr); return 0; msg_err: genlmsg_cancel(msg, hdr); return err; } static int vdpa_nl_cmd_mgmtdev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_mgmt_dev *mdev; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; down_read(&vdpa_dev_lock); mdev = vdpa_mgmtdev_get_from_attr(info->attrs); if (IS_ERR(mdev)) { up_read(&vdpa_dev_lock); NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified mgmt device"); err = PTR_ERR(mdev); goto out; } err = vdpa_mgmtdev_fill(mdev, msg, info->snd_portid, info->snd_seq, 0); up_read(&vdpa_dev_lock); if (err) goto out; err = genlmsg_reply(msg, info); return err; out: nlmsg_free(msg); return err; } static int vdpa_nl_cmd_mgmtdev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { struct vdpa_mgmt_dev *mdev; int start = cb->args[0]; int idx = 0; int err; down_read(&vdpa_dev_lock); list_for_each_entry(mdev, &mdev_head, list) { if (idx < start) { idx++; continue; } err = vdpa_mgmtdev_fill(mdev, msg, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI); if (err) goto out; idx++; } out: up_read(&vdpa_dev_lock); cb->args[0] = idx; return msg->len; } #define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) /* * Bitmask for all per-device features: feature bits VIRTIO_TRANSPORT_F_START * through VIRTIO_TRANSPORT_F_END are unset, i.e. 0xfffffc000fffffff for * all 64bit features. If the features are extended beyond 64 bits, or new * "holes" are reserved for other type of features than per-device, this * macro would have to be updated. */ #define VIRTIO_DEVICE_F_MASK (~0ULL << (VIRTIO_TRANSPORT_F_END + 1) | \ ((1ULL << VIRTIO_TRANSPORT_F_START) - 1)) static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_dev_set_config config = {}; struct nlattr **nl_attrs = info->attrs; struct vdpa_mgmt_dev *mdev; unsigned int ncls = 0; const u8 *macaddr; const char *name; u64 classes; int err = 0; if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL; name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) { macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]); memcpy(config.net.mac, macaddr, sizeof(config.net.mac)); config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); } if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) { config.net.mtu = nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]); config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU); } if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) { config.net.max_vq_pairs = nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]); if (!config.net.max_vq_pairs) { NL_SET_ERR_MSG_MOD(info->extack, "At least one pair of VQs is required"); return -EINVAL; } config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP); } if (nl_attrs[VDPA_ATTR_DEV_FEATURES]) { u64 missing = 0x0ULL; config.device_features = nla_get_u64(nl_attrs[VDPA_ATTR_DEV_FEATURES]); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR] && !(config.device_features & BIT_ULL(VIRTIO_NET_F_MAC))) missing |= BIT_ULL(VIRTIO_NET_F_MAC); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU] && !(config.device_features & BIT_ULL(VIRTIO_NET_F_MTU))) missing |= BIT_ULL(VIRTIO_NET_F_MTU); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP] && config.net.max_vq_pairs > 1 && !(config.device_features & BIT_ULL(VIRTIO_NET_F_MQ))) missing |= BIT_ULL(VIRTIO_NET_F_MQ); if (missing) { NL_SET_ERR_MSG_FMT_MOD(info->extack, "Missing features 0x%llx for provided attributes", missing); return -EINVAL; } config.mask |= BIT_ULL(VDPA_ATTR_DEV_FEATURES); } /* Skip checking capability if user didn't prefer to configure any * device networking attributes. It is likely that user might have used * a device specific method to configure such attributes or using device * default attributes. */ if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; down_write(&vdpa_dev_lock); mdev = vdpa_mgmtdev_get_from_attr(info->attrs); if (IS_ERR(mdev)) { NL_SET_ERR_MSG_MOD(info->extack, "Fail to find the specified management device"); err = PTR_ERR(mdev); goto err; } if ((config.mask & mdev->config_attr_mask) != config.mask) { NL_SET_ERR_MSG_FMT_MOD(info->extack, "Some provided attributes are not supported: 0x%llx", config.mask & ~mdev->config_attr_mask); err = -EOPNOTSUPP; goto err; } classes = vdpa_mgmtdev_get_classes(mdev, &ncls); if (config.mask & VDPA_DEV_NET_ATTRS_MASK && !(classes & BIT_ULL(VIRTIO_ID_NET))) { NL_SET_ERR_MSG_MOD(info->extack, "Network class attributes provided on unsupported management device"); err = -EINVAL; goto err; } if (!(config.mask & VDPA_DEV_NET_ATTRS_MASK) && config.mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES) && classes & BIT_ULL(VIRTIO_ID_NET) && ncls > 1 && config.device_features & VIRTIO_DEVICE_F_MASK) { NL_SET_ERR_MSG_MOD(info->extack, "Management device supports multi-class while device features specified are ambiguous"); err = -EINVAL; goto err; } err = mdev->ops->dev_add(mdev, name, &config); err: up_write(&vdpa_dev_lock); return err; } static int vdpa_nl_cmd_dev_del_set_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_mgmt_dev *mdev; struct vdpa_device *vdev; struct device *dev; const char *name; int err = 0; if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL; name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); down_write(&vdpa_dev_lock); dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); if (!dev) { NL_SET_ERR_MSG_MOD(info->extack, "device not found"); err = -ENODEV; goto dev_err; } vdev = container_of(dev, struct vdpa_device, dev); if (!vdev->mdev) { NL_SET_ERR_MSG_MOD(info->extack, "Only user created device can be deleted by user"); err = -EINVAL; goto mdev_err; } mdev = vdev->mdev; mdev->ops->dev_del(mdev, vdev); mdev_err: put_device(dev); dev_err: up_write(&vdpa_dev_lock); return err; } static int vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { u16 max_vq_size; u16 min_vq_size = 1; u32 device_id; u32 vendor_id; void *hdr; int err; hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_NEW); if (!hdr) return -EMSGSIZE; err = vdpa_nl_mgmtdev_handle_fill(msg, vdev->mdev); if (err) goto msg_err; device_id = vdev->config->get_device_id(vdev); vendor_id = vdev->config->get_vendor_id(vdev); max_vq_size = vdev->config->get_vq_num_max(vdev); if (vdev->config->get_vq_num_min) min_vq_size = vdev->config->get_vq_num_min(vdev); err = -EMSGSIZE; if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) goto msg_err; if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) goto msg_err; if (nla_put_u32(msg, VDPA_ATTR_DEV_VENDOR_ID, vendor_id)) goto msg_err; if (nla_put_u32(msg, VDPA_ATTR_DEV_MAX_VQS, vdev->nvqs)) goto msg_err; if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size)) goto msg_err; if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size)) goto msg_err; genlmsg_end(msg, hdr); return 0; msg_err: genlmsg_cancel(msg, hdr); return err; } static int vdpa_nl_cmd_dev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_device *vdev; struct sk_buff *msg; const char *devname; struct device *dev; int err; if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL; devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; down_read(&vdpa_dev_lock); dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); if (!dev) { NL_SET_ERR_MSG_MOD(info->extack, "device not found"); err = -ENODEV; goto err; } vdev = container_of(dev, struct vdpa_device, dev); if (!vdev->mdev) { err = -EINVAL; goto mdev_err; } err = vdpa_dev_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); if (err) goto mdev_err; err = genlmsg_reply(msg, info); put_device(dev); up_read(&vdpa_dev_lock); return err; mdev_err: put_device(dev); err: up_read(&vdpa_dev_lock); nlmsg_free(msg); return err; } struct vdpa_dev_dump_info { struct sk_buff *msg; struct netlink_callback *cb; int start_idx; int idx; }; static int vdpa_dev_dump(struct device *dev, void *data) { struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); struct vdpa_dev_dump_info *info = data; int err; if (!vdev->mdev) return 0; if (info->idx < info->start_idx) { info->idx++; return 0; } err = vdpa_dev_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); if (err) return err; info->idx++; return 0; } static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { struct vdpa_dev_dump_info info; info.msg = msg; info.cb = cb; info.start_idx = cb->args[0]; info.idx = 0; down_read(&vdpa_dev_lock); bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_dump); up_read(&vdpa_dev_lock); cb->args[0] = info.idx; return msg->len; } static int vdpa_dev_net_mq_config_fill(struct sk_buff *msg, u64 features, const struct virtio_net_config *config) { u16 val_u16; if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0 && (features & BIT_ULL(VIRTIO_NET_F_RSS)) == 0) return 0; val_u16 = __virtio16_to_cpu(true, config->max_virtqueue_pairs); return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16); } static int vdpa_dev_net_mtu_config_fill(struct sk_buff *msg, u64 features, const struct virtio_net_config *config) { u16 val_u16; if ((features & BIT_ULL(VIRTIO_NET_F_MTU)) == 0) return 0; val_u16 = __virtio16_to_cpu(true, config->mtu); return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16); } static int vdpa_dev_net_mac_config_fill(struct sk_buff *msg, u64 features, const struct virtio_net_config *config) { if ((features & BIT_ULL(VIRTIO_NET_F_MAC)) == 0) return 0; return nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config->mac), config->mac); } static int vdpa_dev_net_status_config_fill(struct sk_buff *msg, u64 features, const struct virtio_net_config *config) { u16 val_u16; if ((features & BIT_ULL(VIRTIO_NET_F_STATUS)) == 0) return 0; val_u16 = __virtio16_to_cpu(true, config->status); return nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16); } static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg) { struct virtio_net_config config = {}; u64 features_device; vdev->config->get_config(vdev, 0, &config, sizeof(config)); features_device = vdev->config->get_device_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device, VDPA_ATTR_PAD)) return -EMSGSIZE; if (vdpa_dev_net_mtu_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_net_mac_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_net_status_config_fill(msg, features_device, &config)) return -EMSGSIZE; return vdpa_dev_net_mq_config_fill(msg, features_device, &config); } static int vdpa_dev_blk_capacity_config_fill(struct sk_buff *msg, const struct virtio_blk_config *config) { u64 val_u64; val_u64 = __virtio64_to_cpu(true, config->capacity); return nla_put_u64_64bit(msg, VDPA_ATTR_DEV_BLK_CFG_CAPACITY, val_u64, VDPA_ATTR_PAD); } static int vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u32 val_u32; if ((features & BIT_ULL(VIRTIO_BLK_F_SIZE_MAX)) == 0) return 0; val_u32 = __virtio32_to_cpu(true, config->size_max); return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32); } /* fill the block size*/ static int vdpa_dev_blk_block_size_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u32 val_u32; if ((features & BIT_ULL(VIRTIO_BLK_F_BLK_SIZE)) == 0) return 0; val_u32 = __virtio32_to_cpu(true, config->blk_size); return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, val_u32); } static int vdpa_dev_blk_seg_max_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u32 val_u32; if ((features & BIT_ULL(VIRTIO_BLK_F_SEG_MAX)) == 0) return 0; val_u32 = __virtio32_to_cpu(true, config->seg_max); return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, val_u32); } static int vdpa_dev_blk_mq_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u16 val_u16; if ((features & BIT_ULL(VIRTIO_BLK_F_MQ)) == 0) return 0; val_u16 = __virtio16_to_cpu(true, config->num_queues); return nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, val_u16); } static int vdpa_dev_blk_topology_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u16 min_io_size; u32 opt_io_size; if ((features & BIT_ULL(VIRTIO_BLK_F_TOPOLOGY)) == 0) return 0; min_io_size = __virtio16_to_cpu(true, config->min_io_size); opt_io_size = __virtio32_to_cpu(true, config->opt_io_size); if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_PHY_BLK_EXP, config->physical_block_exp)) return -EMSGSIZE; if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_ALIGN_OFFSET, config->alignment_offset)) return -EMSGSIZE; if (nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_MIN_IO_SIZE, min_io_size)) return -EMSGSIZE; if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_OPT_IO_SIZE, opt_io_size)) return -EMSGSIZE; return 0; } static int vdpa_dev_blk_discard_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u32 val_u32; if ((features & BIT_ULL(VIRTIO_BLK_F_DISCARD)) == 0) return 0; val_u32 = __virtio32_to_cpu(true, config->max_discard_sectors); if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEC, val_u32)) return -EMSGSIZE; val_u32 = __virtio32_to_cpu(true, config->max_discard_seg); if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEG, val_u32)) return -EMSGSIZE; val_u32 = __virtio32_to_cpu(true, config->discard_sector_alignment); if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN, val_u32)) return -EMSGSIZE; return 0; } static int vdpa_dev_blk_write_zeroes_config_fill(struct sk_buff *msg, u64 features, const struct virtio_blk_config *config) { u32 val_u32; if ((features & BIT_ULL(VIRTIO_BLK_F_WRITE_ZEROES)) == 0) return 0; val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_sectors); if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, val_u32)) return -EMSGSIZE; val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_seg); if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, val_u32)) return -EMSGSIZE; return 0; } static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features) { u8 ro; ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1; if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro)) return -EMSGSIZE; return 0; } static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features) { u8 flush; flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1; if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush)) return -EMSGSIZE; return 0; } static int vdpa_dev_blk_config_fill(struct vdpa_device *vdev, struct sk_buff *msg) { struct virtio_blk_config config = {}; u64 features_device; vdev->config->get_config(vdev, 0, &config, sizeof(config)); features_device = vdev->config->get_device_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device, VDPA_ATTR_PAD)) return -EMSGSIZE; if (vdpa_dev_blk_capacity_config_fill(msg, &config)) return -EMSGSIZE; if (vdpa_dev_blk_seg_size_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_block_size_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_seg_max_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_mq_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_topology_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_discard_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_write_zeroes_config_fill(msg, features_device, &config)) return -EMSGSIZE; if (vdpa_dev_blk_ro_config_fill(msg, features_device)) return -EMSGSIZE; if (vdpa_dev_blk_flush_config_fill(msg, features_device)) return -EMSGSIZE; return 0; } static int vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { u64 features_driver; u8 status = 0; u32 device_id; void *hdr; int err; down_read(&vdev->cf_lock); hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_CONFIG_GET); if (!hdr) { err = -EMSGSIZE; goto out; } if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { err = -EMSGSIZE; goto msg_err; } device_id = vdev->config->get_device_id(vdev); if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { err = -EMSGSIZE; goto msg_err; } /* only read driver features after the feature negotiation is done */ status = vdev->config->get_status(vdev); if (status & VIRTIO_CONFIG_S_FEATURES_OK) { features_driver = vdev->config->get_driver_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features_driver, VDPA_ATTR_PAD)) { err = -EMSGSIZE; goto msg_err; } } switch (device_id) { case VIRTIO_ID_NET: err = vdpa_dev_net_config_fill(vdev, msg); break; case VIRTIO_ID_BLOCK: err = vdpa_dev_blk_config_fill(vdev, msg); break; default: err = -EOPNOTSUPP; break; } if (err) goto msg_err; up_read(&vdev->cf_lock); genlmsg_end(msg, hdr); return 0; msg_err: genlmsg_cancel(msg, hdr); out: up_read(&vdev->cf_lock); return err; } static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg, struct genl_info *info, u32 index) { struct virtio_net_config config = {}; u64 features; u8 status; int err; status = vdev->config->get_status(vdev); if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { NL_SET_ERR_MSG_MOD(info->extack, "feature negotiation not complete"); return -EAGAIN; } vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config)); features = vdev->config->get_driver_features(vdev); if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features, VDPA_ATTR_PAD)) return -EMSGSIZE; err = vdpa_dev_net_mq_config_fill(msg, features, &config); if (err) return err; if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index)) return -EMSGSIZE; err = vdev->config->get_vendor_vq_stats(vdev, index, msg, info->extack); if (err) return err; return 0; } static int vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, struct genl_info *info, u32 index) { int err; down_read(&vdev->cf_lock); if (!vdev->config->get_vendor_vq_stats) { err = -EOPNOTSUPP; goto out; } err = vdpa_fill_stats_rec(vdev, msg, info, index); out: up_read(&vdev->cf_lock); return err; } static int vdpa_dev_vendor_stats_fill(struct vdpa_device *vdev, struct sk_buff *msg, struct genl_info *info, u32 index) { u32 device_id; void *hdr; int err; u32 portid = info->snd_portid; u32 seq = info->snd_seq; u32 flags = 0; hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags, VDPA_CMD_DEV_VSTATS_GET); if (!hdr) return -EMSGSIZE; if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) { err = -EMSGSIZE; goto undo_msg; } device_id = vdev->config->get_device_id(vdev); if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) { err = -EMSGSIZE; goto undo_msg; } switch (device_id) { case VIRTIO_ID_NET: if (index > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) { NL_SET_ERR_MSG_MOD(info->extack, "queue index exceeds max value"); err = -ERANGE; break; } err = vendor_stats_fill(vdev, msg, info, index); break; default: err = -EOPNOTSUPP; break; } genlmsg_end(msg, hdr); return err; undo_msg: genlmsg_cancel(msg, hdr); return err; } static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_device *vdev; struct sk_buff *msg; const char *devname; struct device *dev; int err; if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL; devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; down_read(&vdpa_dev_lock); dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); if (!dev) { NL_SET_ERR_MSG_MOD(info->extack, "device not found"); err = -ENODEV; goto dev_err; } vdev = container_of(dev, struct vdpa_device, dev); if (!vdev->mdev) { NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); err = -EINVAL; goto mdev_err; } err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq, 0, info->extack); if (!err) err = genlmsg_reply(msg, info); mdev_err: put_device(dev); dev_err: up_read(&vdpa_dev_lock); if (err) nlmsg_free(msg); return err; } static int vdpa_dev_net_device_attr_set(struct vdpa_device *vdev, struct genl_info *info) { struct vdpa_dev_set_config set_config = {}; struct vdpa_mgmt_dev *mdev = vdev->mdev; struct nlattr **nl_attrs = info->attrs; const u8 *macaddr; int err = -EOPNOTSUPP; down_write(&vdev->cf_lock); if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) { set_config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR); macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]); if (is_valid_ether_addr(macaddr)) { ether_addr_copy(set_config.net.mac, macaddr); if (mdev->ops->dev_set_attr) { err = mdev->ops->dev_set_attr(mdev, vdev, &set_config); } else { NL_SET_ERR_MSG_FMT_MOD(info->extack, "Operation not supported by the device."); } } else { NL_SET_ERR_MSG_FMT_MOD(info->extack, "Invalid MAC address"); } } up_write(&vdev->cf_lock); return err; } static int vdpa_nl_cmd_dev_attr_set_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_device *vdev; struct device *dev; const char *name; u64 classes; int err = 0; if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL; name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); down_write(&vdpa_dev_lock); dev = bus_find_device(&vdpa_bus, NULL, name, vdpa_name_match); if (!dev) { NL_SET_ERR_MSG_MOD(info->extack, "device not found"); err = -ENODEV; goto dev_err; } vdev = container_of(dev, struct vdpa_device, dev); if (!vdev->mdev) { NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); err = -EINVAL; goto mdev_err; } classes = vdpa_mgmtdev_get_classes(vdev->mdev, NULL); if (classes & BIT_ULL(VIRTIO_ID_NET)) { err = vdpa_dev_net_device_attr_set(vdev, info); } else { NL_SET_ERR_MSG_FMT_MOD(info->extack, "%s device not supported", name); } mdev_err: put_device(dev); dev_err: up_write(&vdpa_dev_lock); return err; } static int vdpa_dev_config_dump(struct device *dev, void *data) { struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev); struct vdpa_dev_dump_info *info = data; int err; if (!vdev->mdev) return 0; if (info->idx < info->start_idx) { info->idx++; return 0; } err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid, info->cb->nlh->nlmsg_seq, NLM_F_MULTI, info->cb->extack); if (err) return err; info->idx++; return 0; } static int vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { struct vdpa_dev_dump_info info; info.msg = msg; info.cb = cb; info.start_idx = cb->args[0]; info.idx = 0; down_read(&vdpa_dev_lock); bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump); up_read(&vdpa_dev_lock); cb->args[0] = info.idx; return msg->len; } static int vdpa_nl_cmd_dev_stats_get_doit(struct sk_buff *skb, struct genl_info *info) { struct vdpa_device *vdev; struct sk_buff *msg; const char *devname; struct device *dev; u32 index; int err; if (!info->attrs[VDPA_ATTR_DEV_NAME]) return -EINVAL; if (!info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]) return -EINVAL; devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; index = nla_get_u32(info->attrs[VDPA_ATTR_DEV_QUEUE_INDEX]); down_read(&vdpa_dev_lock); dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match); if (!dev) { NL_SET_ERR_MSG_MOD(info->extack, "device not found"); err = -ENODEV; goto dev_err; } vdev = container_of(dev, struct vdpa_device, dev); if (!vdev->mdev) { NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device"); err = -EINVAL; goto mdev_err; } err = vdpa_dev_vendor_stats_fill(vdev, msg, info, index); if (err) goto mdev_err; err = genlmsg_reply(msg, info); put_device(dev); up_read(&vdpa_dev_lock); return err; mdev_err: put_device(dev); dev_err: nlmsg_free(msg); up_read(&vdpa_dev_lock); return err; } static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = { [VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING }, [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING }, [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING }, [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR, [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 }, /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */ [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68), [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 }, [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 }, }; static const struct genl_ops vdpa_nl_ops[] = { { .cmd = VDPA_CMD_MGMTDEV_GET, .doit = vdpa_nl_cmd_mgmtdev_get_doit, .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit, }, { .cmd = VDPA_CMD_DEV_NEW, .doit = vdpa_nl_cmd_dev_add_set_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_DEL, .doit = vdpa_nl_cmd_dev_del_set_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_GET, .doit = vdpa_nl_cmd_dev_get_doit, .dumpit = vdpa_nl_cmd_dev_get_dumpit, }, { .cmd = VDPA_CMD_DEV_CONFIG_GET, .doit = vdpa_nl_cmd_dev_config_get_doit, .dumpit = vdpa_nl_cmd_dev_config_get_dumpit, }, { .cmd = VDPA_CMD_DEV_VSTATS_GET, .doit = vdpa_nl_cmd_dev_stats_get_doit, .flags = GENL_ADMIN_PERM, }, { .cmd = VDPA_CMD_DEV_ATTR_SET, .doit = vdpa_nl_cmd_dev_attr_set_doit, .flags = GENL_ADMIN_PERM, }, }; static struct genl_family vdpa_nl_family __ro_after_init = { .name = VDPA_GENL_NAME, .version = VDPA_GENL_VERSION, .maxattr = VDPA_ATTR_MAX, .policy = vdpa_nl_policy, .netnsok = false, .module = THIS_MODULE, .ops = vdpa_nl_ops, .n_ops = ARRAY_SIZE(vdpa_nl_ops), .resv_start_op = VDPA_CMD_DEV_VSTATS_GET + 1, }; static int vdpa_init(void) { int err; err = bus_register(&vdpa_bus); if (err) return err; err = genl_register_family(&vdpa_nl_family); if (err) goto err; return 0; err: bus_unregister(&vdpa_bus); return err; } static void __exit vdpa_exit(void) { genl_unregister_family(&vdpa_nl_family); bus_unregister(&vdpa_bus); ida_destroy(&vdpa_index_ida); } core_initcall(vdpa_init); module_exit(vdpa_exit); MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); MODULE_DESCRIPTION("vDPA bus"); MODULE_LICENSE("GPL v2");
57 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 // SPDX-License-Identifier: GPL-2.0-or-later /* * Handle incoming frames * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/netfilter_bridge.h> #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE #include <net/netfilter/nf_queue.h> #endif #include <linux/neighbour.h> #include <net/arp.h> #include <net/dsa.h> #include <linux/export.h> #include <linux/rculist.h> #include "br_private.h" #include "br_private_tunnel.h" static int br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { br_drop_fake_rtable(skb); return netif_receive_skb(skb); } static int br_pass_frame_up(struct sk_buff *skb, bool promisc) { struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; struct net_bridge *br = netdev_priv(brdev); struct net_bridge_vlan_group *vg; dev_sw_netstats_rx_add(brdev, skb->len); vg = br_vlan_group_rcu(br); /* Reset the offload_fwd_mark because there could be a stacked * bridge above, and it should not think this bridge it doing * that bridge's work forwarding out its ports. */ br_switchdev_frame_unmark(skb); /* Bridge is just like any other port. Make sure the * packet is allowed except in promisc mode when someone * may be running packet capture. */ if (!(brdev->flags & IFF_PROMISC) && !br_allowed_egress(vg, skb)) { kfree_skb(skb); return NET_RX_DROP; } indev = skb->dev; skb->dev = brdev; skb = br_handle_vlan(br, NULL, vg, skb); if (!skb) return NET_RX_DROP; /* update the multicast stats if the packet is IGMP/MLD */ br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb), BR_MCAST_DIR_TX); BR_INPUT_SKB_CB(skb)->promisc = promisc; return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(indev), NULL, skb, indev, NULL, br_netif_receive_skb); } /* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct net_bridge_port *p = br_port_get_rcu(skb->dev); enum br_pkt_type pkt_type = BR_PKT_UNICAST; struct net_bridge_fdb_entry *dst = NULL; struct net_bridge_mcast_port *pmctx; struct net_bridge_mdb_entry *mdst; bool local_rcv, mcast_hit = false; struct net_bridge_mcast *brmctx; struct net_bridge_vlan *vlan; struct net_bridge *br; bool promisc; u16 vid = 0; u8 state; if (!p) goto drop; br = p->br; if (br_mst_is_enabled(br)) { state = BR_STATE_FORWARDING; } else { if (p->state == BR_STATE_DISABLED) { reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE; goto drop; } state = p->state; } brmctx = &p->br->multicast_ctx; pmctx = &p->multicast_ctx; if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid, &state, &vlan)) goto out; if (p->flags & BR_PORT_LOCKED) { struct net_bridge_fdb_entry *fdb_src = br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid); if (!fdb_src) { /* FDB miss. Create locked FDB entry if MAB is enabled * and drop the packet. */ if (p->flags & BR_PORT_MAB) br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, BIT(BR_FDB_LOCKED)); goto drop; } else if (READ_ONCE(fdb_src->dst) != p || test_bit(BR_FDB_LOCAL, &fdb_src->flags)) { /* FDB mismatch. Drop the packet without roaming. */ goto drop; } else if (test_bit(BR_FDB_LOCKED, &fdb_src->flags)) { /* FDB match, but entry is locked. Refresh it and drop * the packet. */ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, BIT(BR_FDB_LOCKED)); goto drop; } } nbp_switchdev_frame_mark(p, skb); /* insert into forwarding database after filtering to avoid spoofing */ if (p->flags & BR_LEARNING) br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0); promisc = !!(br->dev->flags & IFF_PROMISC); local_rcv = promisc; if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) { /* by definition the broadcast is also a multicast address */ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) { pkt_type = BR_PKT_BROADCAST; local_rcv = true; } else { pkt_type = BR_PKT_MULTICAST; if (br_multicast_rcv(&brmctx, &pmctx, vlan, skb, vid)) goto drop; } } if (state == BR_STATE_LEARNING) { reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE; goto drop; } BR_INPUT_SKB_CB(skb)->brdev = br->dev; BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED); if (IS_ENABLED(CONFIG_INET) && (skb->protocol == htons(ETH_P_ARP) || skb->protocol == htons(ETH_P_RARP))) { br_do_proxy_suppress_arp(skb, br, vid, p); } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6) && br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { struct nd_msg *msg, _msg; msg = br_is_nd_neigh_msg(skb, &_msg); if (msg) br_do_suppress_nd(skb, br, vid, p, msg); } switch (pkt_type) { case BR_PKT_MULTICAST: mdst = br_mdb_entry_skb_get(brmctx, skb, vid); if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) { if ((mdst && mdst->host_joined) || br_multicast_is_router(brmctx, skb) || br->dev->flags & IFF_ALLMULTI) { local_rcv = true; DEV_STATS_INC(br->dev, multicast); } mcast_hit = true; } else { local_rcv = true; DEV_STATS_INC(br->dev, multicast); } break; case BR_PKT_UNICAST: dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); break; default: break; } if (dst) { unsigned long now = jiffies; if (test_bit(BR_FDB_LOCAL, &dst->flags)) return br_pass_frame_up(skb, false); if (now != dst->used) dst->used = now; br_forward(dst->dst, skb, local_rcv, false); } else { if (!mcast_hit) br_flood(br, skb, pkt_type, local_rcv, false, vid); else br_multicast_flood(mdst, skb, brmctx, local_rcv, false); } if (local_rcv) return br_pass_frame_up(skb, promisc); out: return 0; drop: kfree_skb_reason(skb, reason); goto out; } EXPORT_SYMBOL_GPL(br_handle_frame_finish); static void __br_handle_local_finish(struct sk_buff *skb) { struct net_bridge_port *p = br_port_get_rcu(skb->dev); u16 vid = 0; /* check if vlan is allowed, to avoid spoofing */ if ((p->flags & BR_LEARNING) && nbp_state_should_learn(p) && !br_opt_get(p->br, BROPT_NO_LL_LEARN) && br_should_learn(p, skb, &vid)) br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0); } /* note: already called with rcu_read_lock */ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { __br_handle_local_finish(skb); /* return 1 to signal the okfn() was called so it's ok to use the skb */ return 1; } static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb) { #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE struct nf_hook_entries *e = NULL; struct nf_hook_state state; unsigned int verdict, i; struct net *net; int ret; net = dev_net(skb->dev); #ifdef HAVE_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING])) goto frame_finish; #endif e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]); if (!e) goto frame_finish; nf_hook_state_init(&state, NF_BR_PRE_ROUTING, NFPROTO_BRIDGE, skb->dev, NULL, NULL, net, br_handle_frame_finish); for (i = 0; i < e->num_hook_entries; i++) { verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state); switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) { *pskb = skb; return RX_HANDLER_PASS; } break; case NF_DROP: kfree_skb(skb); return RX_HANDLER_CONSUMED; case NF_QUEUE: ret = nf_queue(skb, &state, i, verdict); if (ret == 1) continue; return RX_HANDLER_CONSUMED; default: /* STOLEN */ return RX_HANDLER_CONSUMED; } } frame_finish: net = dev_net(skb->dev); br_handle_frame_finish(net, NULL, skb); #else br_handle_frame_finish(dev_net(skb->dev), NULL, skb); #endif return RX_HANDLER_CONSUMED; } /* Return 0 if the frame was not processed otherwise 1 * note: already called with rcu_read_lock */ static int br_process_frame_type(struct net_bridge_port *p, struct sk_buff *skb) { struct br_frame_type *tmp; hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list) if (unlikely(tmp->type == skb->protocol)) return tmp->frame_handler(p, skb); return 0; } /* * Return NULL if skb is handled * note: already called with rcu_read_lock */ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) { enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct net_bridge_port *p; struct sk_buff *skb = *pskb; const unsigned char *dest = eth_hdr(skb)->h_dest; if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) { reason = SKB_DROP_REASON_MAC_INVALID_SOURCE; goto drop; } skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return RX_HANDLER_CONSUMED; memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); br_tc_skb_miss_set(skb, false); p = br_port_get_rcu(skb->dev); if (p->flags & BR_VLAN_TUNNEL) br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p)); if (unlikely(is_link_local_ether_addr(dest))) { u16 fwd_mask = p->br->group_fwd_mask_required; /* * See IEEE 802.1D Table 7-10 Reserved addresses * * Assignment Value * Bridge Group Address 01-80-C2-00-00-00 * (MAC Control) 802.3 01-80-C2-00-00-01 * (Link Aggregation) 802.3 01-80-C2-00-00-02 * 802.1X PAE address 01-80-C2-00-00-03 * * 802.1AB LLDP 01-80-C2-00-00-0E * * Others reserved for future standardization */ fwd_mask |= p->group_fwd_mask; switch (dest[5]) { case 0x00: /* Bridge Group Address */ /* If STP is turned off, then must forward to keep loop detection */ if (p->br->stp_enabled == BR_NO_STP || fwd_mask & (1u << dest[5])) goto forward; *pskb = skb; __br_handle_local_finish(skb); return RX_HANDLER_PASS; case 0x01: /* IEEE MAC (Pause) */ reason = SKB_DROP_REASON_MAC_IEEE_MAC_CONTROL; goto drop; case 0x0E: /* 802.1AB LLDP */ fwd_mask |= p->br->group_fwd_mask; if (fwd_mask & (1u << dest[5])) goto forward; *pskb = skb; __br_handle_local_finish(skb); return RX_HANDLER_PASS; default: /* Allow selective forwarding for most other protocols */ fwd_mask |= p->br->group_fwd_mask; if (fwd_mask & (1u << dest[5])) goto forward; } BR_INPUT_SKB_CB(skb)->promisc = false; /* The else clause should be hit when nf_hook(): * - returns < 0 (drop/error) * - returns = 0 (stolen/nf_queue) * Thus return 1 from the okfn() to signal the skb is ok to pass */ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev), NULL, skb, skb->dev, NULL, br_handle_local_finish) == 1) { return RX_HANDLER_PASS; } else { return RX_HANDLER_CONSUMED; } } if (unlikely(br_process_frame_type(p, skb))) return RX_HANDLER_PASS; forward: if (br_mst_is_enabled(p->br)) goto defer_stp_filtering; switch (p->state) { case BR_STATE_FORWARDING: case BR_STATE_LEARNING: defer_stp_filtering: if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; return nf_hook_bridge_pre(skb, pskb); default: reason = SKB_DROP_REASON_BRIDGE_INGRESS_STP_STATE; drop: kfree_skb_reason(skb, reason); } return RX_HANDLER_CONSUMED; } /* This function has no purpose other than to appease the br_port_get_rcu/rtnl * helpers which identify bridged ports according to the rx_handler installed * on them (so there _needs_ to be a bridge rx_handler even if we don't need it * to do anything useful). This bridge won't support traffic to/from the stack, * but only hardware bridging. So return RX_HANDLER_PASS so we don't steal * frames from the ETH_P_XDSA packet_type handler. */ static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb) { return RX_HANDLER_PASS; } rx_handler_func_t *br_get_rx_handler(const struct net_device *dev) { if (netdev_uses_dsa(dev)) return br_handle_frame_dummy; return br_handle_frame; } void br_add_frame(struct net_bridge *br, struct br_frame_type *ft) { hlist_add_head_rcu(&ft->list, &br->frame_type_list); } void br_del_frame(struct net_bridge *br, struct br_frame_type *ft) { struct br_frame_type *tmp; hlist_for_each_entry(tmp, &br->frame_type_list, list) if (ft == tmp) { hlist_del_rcu(&ft->list); return; } }
3 3 3 3 1 1 1 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * super.c */ /* * This file implements code to read the superblock, read and initialise * in-memory structures at mount time, and all the VFS glue code to register * the filesystem. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/module.h> #include <linux/magic.h> #include <linux/xattr.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "decompressor.h" #include "xattr.h" static struct file_system_type squashfs_fs_type; static const struct super_operations squashfs_super_ops; enum Opt_errors { Opt_errors_continue, Opt_errors_panic, }; enum squashfs_param { Opt_errors, Opt_threads, }; struct squashfs_mount_opts { enum Opt_errors errors; const struct squashfs_decompressor_thread_ops *thread_ops; int thread_num; }; static const struct constant_table squashfs_param_errors[] = { {"continue", Opt_errors_continue }, {"panic", Opt_errors_panic }, {} }; static const struct fs_parameter_spec squashfs_fs_parameters[] = { fsparam_enum("errors", Opt_errors, squashfs_param_errors), fsparam_string("threads", Opt_threads), {} }; static int squashfs_parse_param_threads_str(const char *str, struct squashfs_mount_opts *opts) { #ifdef CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT if (strcmp(str, "single") == 0) { opts->thread_ops = &squashfs_decompressor_single; return 0; } if (strcmp(str, "multi") == 0) { opts->thread_ops = &squashfs_decompressor_multi; return 0; } if (strcmp(str, "percpu") == 0) { opts->thread_ops = &squashfs_decompressor_percpu; return 0; } #endif return -EINVAL; } static int squashfs_parse_param_threads_num(const char *str, struct squashfs_mount_opts *opts) { #ifdef CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS int ret; unsigned long num; ret = kstrtoul(str, 0, &num); if (ret != 0) return -EINVAL; if (num > 1) { opts->thread_ops = &squashfs_decompressor_multi; if (num > opts->thread_ops->max_decompressors()) return -EINVAL; opts->thread_num = (int)num; return 0; } #ifdef CONFIG_SQUASHFS_DECOMP_SINGLE if (num == 1) { opts->thread_ops = &squashfs_decompressor_single; opts->thread_num = 1; return 0; } #endif #endif /* !CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS */ return -EINVAL; } static int squashfs_parse_param_threads(const char *str, struct squashfs_mount_opts *opts) { int ret = squashfs_parse_param_threads_str(str, opts); if (ret == 0) return ret; return squashfs_parse_param_threads_num(str, opts); } static int squashfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct squashfs_mount_opts *opts = fc->fs_private; struct fs_parse_result result; int opt; opt = fs_parse(fc, squashfs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_errors: opts->errors = result.uint_32; break; case Opt_threads: if (squashfs_parse_param_threads(param->string, opts) != 0) return -EINVAL; break; default: return -EINVAL; } return 0; } static const struct squashfs_decompressor *supported_squashfs_filesystem( struct fs_context *fc, short major, short minor, short id) { const struct squashfs_decompressor *decompressor; if (major < SQUASHFS_MAJOR) { errorf(fc, "Major/Minor mismatch, older Squashfs %d.%d " "filesystems are unsupported", major, minor); return NULL; } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) { errorf(fc, "Major/Minor mismatch, trying to mount newer " "%d.%d filesystem", major, minor); errorf(fc, "Please update your kernel"); return NULL; } decompressor = squashfs_lookup_decompressor(id); if (!decompressor->supported) { errorf(fc, "Filesystem uses \"%s\" compression. This is not supported", decompressor->name); return NULL; } return decompressor; } static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct squashfs_mount_opts *opts = fc->fs_private; struct squashfs_sb_info *msblk; struct squashfs_super_block *sblk = NULL; struct inode *root; long long root_inode; unsigned short flags; unsigned int fragments; u64 lookup_table_start, xattr_id_table_start, next_table; int err; TRACE("Entered squashfs_fill_superblock\n"); sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); if (sb->s_fs_info == NULL) { ERROR("Failed to allocate squashfs_sb_info\n"); return -ENOMEM; } msblk = sb->s_fs_info; msblk->thread_ops = opts->thread_ops; msblk->panic_on_errors = (opts->errors == Opt_errors_panic); msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE); if (!msblk->devblksize) { errorf(fc, "squashfs: unable to set blocksize\n"); return -EINVAL; } msblk->devblksize_log2 = ffz(~msblk->devblksize); mutex_init(&msblk->meta_index_mutex); /* * msblk->bytes_used is checked in squashfs_read_table to ensure reads * are not beyond filesystem end. But as we're using * squashfs_read_table here to read the superblock (including the value * of bytes_used) we need to set it to an initial sensible dummy value */ msblk->bytes_used = sizeof(*sblk); sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk)); if (IS_ERR(sblk)) { errorf(fc, "unable to read squashfs_super_block"); err = PTR_ERR(sblk); sblk = NULL; goto failed_mount; } err = -EINVAL; /* Check it is a SQUASHFS superblock */ sb->s_magic = le32_to_cpu(sblk->s_magic); if (sb->s_magic != SQUASHFS_MAGIC) { if (!(fc->sb_flags & SB_SILENT)) errorf(fc, "Can't find a SQUASHFS superblock on %pg", sb->s_bdev); goto failed_mount; } if (opts->thread_num == 0) { msblk->max_thread_num = msblk->thread_ops->max_decompressors(); } else { msblk->max_thread_num = opts->thread_num; } /* Check the MAJOR & MINOR versions and lookup compression type */ msblk->decompressor = supported_squashfs_filesystem( fc, le16_to_cpu(sblk->s_major), le16_to_cpu(sblk->s_minor), le16_to_cpu(sblk->compression)); if (msblk->decompressor == NULL) goto failed_mount; /* Check the filesystem does not extend beyond the end of the block device */ msblk->bytes_used = le64_to_cpu(sblk->bytes_used); if (msblk->bytes_used < 0 || msblk->bytes_used > bdev_nr_bytes(sb->s_bdev)) goto failed_mount; /* Check block size for sanity */ msblk->block_size = le32_to_cpu(sblk->block_size); if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE) goto insanity; /* * Check the system page size is not larger than the filesystem * block size (by default 128K). This is currently not supported. */ if (PAGE_SIZE > msblk->block_size) { errorf(fc, "Page size > filesystem block size (%d). This is " "currently not supported!", msblk->block_size); goto failed_mount; } /* Check block log for sanity */ msblk->block_log = le16_to_cpu(sblk->block_log); if (msblk->block_log > SQUASHFS_FILE_MAX_LOG) goto failed_mount; /* Check that block_size and block_log match */ if (msblk->block_size != (1 << msblk->block_log)) goto insanity; /* Check the root inode for sanity */ root_inode = le64_to_cpu(sblk->root_inode); if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE) goto insanity; msblk->inode_table = le64_to_cpu(sblk->inode_table_start); msblk->directory_table = le64_to_cpu(sblk->directory_table_start); msblk->inodes = le32_to_cpu(sblk->inodes); msblk->fragments = le32_to_cpu(sblk->fragments); msblk->ids = le16_to_cpu(sblk->no_ids); flags = le16_to_cpu(sblk->flags); TRACE("Found valid superblock on %pg\n", sb->s_bdev); TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags) ? "un" : ""); TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags) ? "un" : ""); TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); TRACE("Block size %d\n", msblk->block_size); TRACE("Number of inodes %d\n", msblk->inodes); TRACE("Number of fragments %d\n", msblk->fragments); TRACE("Number of ids %d\n", msblk->ids); TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); TRACE("sblk->fragment_table_start %llx\n", (u64) le64_to_cpu(sblk->fragment_table_start)); TRACE("sblk->id_table_start %llx\n", (u64) le64_to_cpu(sblk->id_table_start)); sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_min = 0; sb->s_time_max = U32_MAX; sb->s_flags |= SB_RDONLY; sb->s_op = &squashfs_super_ops; msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); if (IS_ERR(msblk->block_cache)) { err = PTR_ERR(msblk->block_cache); goto failed_mount; } /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", SQUASHFS_READ_PAGES, msblk->block_size); if (IS_ERR(msblk->read_page)) { errorf(fc, "Failed to allocate read_page block"); err = PTR_ERR(msblk->read_page); goto failed_mount; } if (msblk->devblksize == PAGE_SIZE) { struct inode *cache = new_inode(sb); if (cache == NULL) { err = -ENOMEM; goto failed_mount; } set_nlink(cache, 1); cache->i_size = OFFSET_MAX; mapping_set_gfp_mask(cache->i_mapping, GFP_NOFS); msblk->cache_mapping = cache->i_mapping; } msblk->stream = squashfs_decompressor_setup(sb, flags); if (IS_ERR(msblk->stream)) { err = PTR_ERR(msblk->stream); msblk->stream = NULL; goto insanity; } /* Handle xattrs */ sb->s_xattr = squashfs_xattr_handlers; xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start); if (xattr_id_table_start == SQUASHFS_INVALID_BLK) { next_table = msblk->bytes_used; goto allocate_id_index_table; } /* Allocate and read xattr id lookup table */ msblk->xattr_id_table = squashfs_read_xattr_id_table(sb, xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids); if (IS_ERR(msblk->xattr_id_table)) { errorf(fc, "unable to read xattr id index table"); err = PTR_ERR(msblk->xattr_id_table); msblk->xattr_id_table = NULL; if (err != -ENOTSUPP) goto failed_mount; } next_table = msblk->xattr_table; allocate_id_index_table: /* Allocate and read id index table */ msblk->id_table = squashfs_read_id_index_table(sb, le64_to_cpu(sblk->id_table_start), next_table, msblk->ids); if (IS_ERR(msblk->id_table)) { errorf(fc, "unable to read id index table"); err = PTR_ERR(msblk->id_table); msblk->id_table = NULL; goto failed_mount; } next_table = le64_to_cpu(msblk->id_table[0]); /* Handle inode lookup table */ lookup_table_start = le64_to_cpu(sblk->lookup_table_start); if (lookup_table_start == SQUASHFS_INVALID_BLK) goto handle_fragments; /* Allocate and read inode lookup table */ msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, lookup_table_start, next_table, msblk->inodes); if (IS_ERR(msblk->inode_lookup_table)) { errorf(fc, "unable to read inode lookup table"); err = PTR_ERR(msblk->inode_lookup_table); msblk->inode_lookup_table = NULL; goto failed_mount; } next_table = le64_to_cpu(msblk->inode_lookup_table[0]); sb->s_export_op = &squashfs_export_ops; handle_fragments: fragments = msblk->fragments; if (fragments == 0) goto check_directory_table; msblk->fragment_cache = squashfs_cache_init("fragment", min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size); if (IS_ERR(msblk->fragment_cache)) { err = PTR_ERR(msblk->fragment_cache); goto failed_mount; } /* Allocate and read fragment index table */ msblk->fragment_index = squashfs_read_fragment_index_table(sb, le64_to_cpu(sblk->fragment_table_start), next_table, fragments); if (IS_ERR(msblk->fragment_index)) { errorf(fc, "unable to read fragment index table"); err = PTR_ERR(msblk->fragment_index); msblk->fragment_index = NULL; goto failed_mount; } next_table = le64_to_cpu(msblk->fragment_index[0]); check_directory_table: /* Sanity check directory_table */ if (msblk->directory_table > next_table) { err = -EINVAL; goto insanity; } /* Sanity check inode_table */ if (msblk->inode_table >= msblk->directory_table) { err = -EINVAL; goto insanity; } /* allocate root */ root = new_inode(sb); if (!root) { err = -ENOMEM; goto failed_mount; } err = squashfs_read_inode(root, root_inode); if (err) { make_bad_inode(root); iput(root); goto failed_mount; } insert_inode_hash(root); sb->s_root = d_make_root(root); if (sb->s_root == NULL) { ERROR("Root inode create failed\n"); err = -ENOMEM; goto failed_mount; } TRACE("Leaving squashfs_fill_super\n"); kfree(sblk); return 0; insanity: errorf(fc, "squashfs image failed sanity check"); failed_mount: squashfs_cache_delete(msblk->block_cache); squashfs_cache_delete(msblk->fragment_cache); squashfs_cache_delete(msblk->read_page); if (msblk->cache_mapping) iput(msblk->cache_mapping->host); msblk->thread_ops->destroy(msblk); kfree(msblk->inode_lookup_table); kfree(msblk->fragment_index); kfree(msblk->id_table); kfree(msblk->xattr_id_table); kfree(sb->s_fs_info); sb->s_fs_info = NULL; kfree(sblk); return err; } static int squashfs_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, squashfs_fill_super); } static int squashfs_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; struct squashfs_mount_opts *opts = fc->fs_private; sync_filesystem(fc->root->d_sb); fc->sb_flags |= SB_RDONLY; msblk->panic_on_errors = (opts->errors == Opt_errors_panic); return 0; } static void squashfs_free_fs_context(struct fs_context *fc) { kfree(fc->fs_private); } static const struct fs_context_operations squashfs_context_ops = { .get_tree = squashfs_get_tree, .free = squashfs_free_fs_context, .parse_param = squashfs_parse_param, .reconfigure = squashfs_reconfigure, }; static int squashfs_show_options(struct seq_file *s, struct dentry *root) { struct super_block *sb = root->d_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; if (msblk->panic_on_errors) seq_puts(s, ",errors=panic"); else seq_puts(s, ",errors=continue"); #ifdef CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT if (msblk->thread_ops == &squashfs_decompressor_single) { seq_puts(s, ",threads=single"); return 0; } if (msblk->thread_ops == &squashfs_decompressor_percpu) { seq_puts(s, ",threads=percpu"); return 0; } #endif #ifdef CONFIG_SQUASHFS_MOUNT_DECOMP_THREADS seq_printf(s, ",threads=%d", msblk->max_thread_num); #endif return 0; } static int squashfs_init_fs_context(struct fs_context *fc) { struct squashfs_mount_opts *opts; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return -ENOMEM; #ifdef CONFIG_SQUASHFS_DECOMP_SINGLE opts->thread_ops = &squashfs_decompressor_single; #elif defined(CONFIG_SQUASHFS_DECOMP_MULTI) opts->thread_ops = &squashfs_decompressor_multi; #elif defined(CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU) opts->thread_ops = &squashfs_decompressor_percpu; #else #error "fail: unknown squashfs decompression thread mode?" #endif opts->thread_num = 0; fc->fs_private = opts; fc->ops = &squashfs_context_ops; return 0; } static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info; u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev); TRACE("Entered squashfs_statfs\n"); buf->f_type = SQUASHFS_MAGIC; buf->f_bsize = msblk->block_size; buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1; buf->f_bfree = buf->f_bavail = 0; buf->f_files = msblk->inodes; buf->f_ffree = 0; buf->f_namelen = SQUASHFS_NAME_LEN; buf->f_fsid = u64_to_fsid(id); return 0; } static void squashfs_put_super(struct super_block *sb) { if (sb->s_fs_info) { struct squashfs_sb_info *sbi = sb->s_fs_info; squashfs_cache_delete(sbi->block_cache); squashfs_cache_delete(sbi->fragment_cache); squashfs_cache_delete(sbi->read_page); if (sbi->cache_mapping) iput(sbi->cache_mapping->host); sbi->thread_ops->destroy(sbi); kfree(sbi->id_table); kfree(sbi->fragment_index); kfree(sbi->meta_index); kfree(sbi->inode_lookup_table); kfree(sbi->xattr_id_table); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } } static struct kmem_cache *squashfs_inode_cachep; static void init_once(void *foo) { struct squashfs_inode_info *ei = foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache", sizeof(struct squashfs_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, init_once); return squashfs_inode_cachep ? 0 : -ENOMEM; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(squashfs_inode_cachep); } static int __init init_squashfs_fs(void) { int err = init_inodecache(); if (err) return err; err = register_filesystem(&squashfs_fs_type); if (err) { destroy_inodecache(); return err; } pr_info("version 4.0 (2009/01/31) Phillip Lougher\n"); return 0; } static void __exit exit_squashfs_fs(void) { unregister_filesystem(&squashfs_fs_type); destroy_inodecache(); } static struct inode *squashfs_alloc_inode(struct super_block *sb) { struct squashfs_inode_info *ei = alloc_inode_sb(sb, squashfs_inode_cachep, GFP_KERNEL); return ei ? &ei->vfs_inode : NULL; } static void squashfs_free_inode(struct inode *inode) { kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode)); } static struct file_system_type squashfs_fs_type = { .owner = THIS_MODULE, .name = "squashfs", .init_fs_context = squashfs_init_fs_context, .parameters = squashfs_fs_parameters, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, }; MODULE_ALIAS_FS("squashfs"); static const struct super_operations squashfs_super_ops = { .alloc_inode = squashfs_alloc_inode, .free_inode = squashfs_free_inode, .statfs = squashfs_statfs, .put_super = squashfs_put_super, .show_options = squashfs_show_options, }; module_init(init_squashfs_fs); module_exit(exit_squashfs_fs); MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); MODULE_AUTHOR("Phillip Lougher <phillip@squashfs.org.uk>"); MODULE_LICENSE("GPL");
1 1 1 12 11 11 1 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include "queueing.h" #include <linux/skb_array.h> struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) { int cpu; struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); if (!worker) return NULL; for_each_possible_cpu(cpu) { per_cpu_ptr(worker, cpu)->ptr = ptr; INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); } return worker; } int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len) { int ret; memset(queue, 0, sizeof(*queue)); queue->last_cpu = -1; ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); if (ret) return ret; queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); if (!queue->worker) { ptr_ring_cleanup(&queue->ring, NULL); return -ENOMEM; } return 0; } void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); } #define NEXT(skb) ((skb)->prev) #define STUB(queue) ((struct sk_buff *)&queue->empty) void wg_prev_queue_init(struct prev_queue *queue) { NEXT(STUB(queue)) = NULL; queue->head = queue->tail = STUB(queue); queue->peeked = NULL; atomic_set(&queue->count, 0); BUILD_BUG_ON( offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - offsetof(struct prev_queue, empty) || offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - offsetof(struct prev_queue, empty)); } static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) { WRITE_ONCE(NEXT(skb), NULL); WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); } bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) { if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) return false; __wg_prev_queue_enqueue(queue, skb); return true; } struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) { struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); if (tail == STUB(queue)) { if (!next) return NULL; queue->tail = next; tail = next; next = smp_load_acquire(&NEXT(next)); } if (next) { queue->tail = next; atomic_dec(&queue->count); return tail; } if (tail != READ_ONCE(queue->head)) return NULL; __wg_prev_queue_enqueue(queue, STUB(queue)); next = smp_load_acquire(&NEXT(tail)); if (next) { queue->tail = next; atomic_dec(&queue->count); return tail; } return NULL; } #undef NEXT #undef STUB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __SMP_H #define __SMP_H struct smp_command_hdr { __u8 code; } __packed; #define SMP_CMD_PAIRING_REQ 0x01 #define SMP_CMD_PAIRING_RSP 0x02 struct smp_cmd_pairing { __u8 io_capability; __u8 oob_flag; __u8 auth_req; __u8 max_key_size; __u8 init_key_dist; __u8 resp_key_dist; } __packed; #define SMP_IO_DISPLAY_ONLY 0x00 #define SMP_IO_DISPLAY_YESNO 0x01 #define SMP_IO_KEYBOARD_ONLY 0x02 #define SMP_IO_NO_INPUT_OUTPUT 0x03 #define SMP_IO_KEYBOARD_DISPLAY 0x04 #define SMP_OOB_NOT_PRESENT 0x00 #define SMP_OOB_PRESENT 0x01 #define SMP_DIST_ENC_KEY 0x01 #define SMP_DIST_ID_KEY 0x02 #define SMP_DIST_SIGN 0x04 #define SMP_DIST_LINK_KEY 0x08 #define SMP_AUTH_NONE 0x00 #define SMP_AUTH_BONDING 0x01 #define SMP_AUTH_MITM 0x04 #define SMP_AUTH_SC 0x08 #define SMP_AUTH_KEYPRESS 0x10 #define SMP_AUTH_CT2 0x20 #define SMP_CMD_PAIRING_CONFIRM 0x03 struct smp_cmd_pairing_confirm { __u8 confirm_val[16]; } __packed; #define SMP_CMD_PAIRING_RANDOM 0x04 struct smp_cmd_pairing_random { __u8 rand_val[16]; } __packed; #define SMP_CMD_PAIRING_FAIL 0x05 struct smp_cmd_pairing_fail { __u8 reason; } __packed; #define SMP_CMD_ENCRYPT_INFO 0x06 struct smp_cmd_encrypt_info { __u8 ltk[16]; } __packed; #define SMP_CMD_INITIATOR_IDENT 0x07 struct smp_cmd_initiator_ident { __le16 ediv; __le64 rand; } __packed; #define SMP_CMD_IDENT_INFO 0x08 struct smp_cmd_ident_info { __u8 irk[16]; } __packed; #define SMP_CMD_IDENT_ADDR_INFO 0x09 struct smp_cmd_ident_addr_info { __u8 addr_type; bdaddr_t bdaddr; } __packed; #define SMP_CMD_SIGN_INFO 0x0a struct smp_cmd_sign_info { __u8 csrk[16]; } __packed; #define SMP_CMD_SECURITY_REQ 0x0b struct smp_cmd_security_req { __u8 auth_req; } __packed; #define SMP_CMD_PUBLIC_KEY 0x0c struct smp_cmd_public_key { __u8 x[32]; __u8 y[32]; } __packed; #define SMP_CMD_DHKEY_CHECK 0x0d struct smp_cmd_dhkey_check { __u8 e[16]; } __packed; #define SMP_CMD_KEYPRESS_NOTIFY 0x0e struct smp_cmd_keypress_notify { __u8 value; } __packed; #define SMP_CMD_MAX 0x0e #define SMP_PASSKEY_ENTRY_FAILED 0x01 #define SMP_OOB_NOT_AVAIL 0x02 #define SMP_AUTH_REQUIREMENTS 0x03 #define SMP_CONFIRM_FAILED 0x04 #define SMP_PAIRING_NOTSUPP 0x05 #define SMP_ENC_KEY_SIZE 0x06 #define SMP_CMD_NOTSUPP 0x07 #define SMP_UNSPECIFIED 0x08 #define SMP_REPEATED_ATTEMPTS 0x09 #define SMP_INVALID_PARAMS 0x0a #define SMP_DHKEY_CHECK_FAILED 0x0b #define SMP_NUMERIC_COMP_FAILED 0x0c #define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d #define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e #define SMP_KEY_REJECTED 0x0f #define SMP_MIN_ENC_KEY_SIZE 7 #define SMP_MAX_ENC_KEY_SIZE 16 /* LTK types used in internal storage (struct smp_ltk) */ enum { SMP_STK, SMP_LTK, SMP_LTK_RESPONDER, SMP_LTK_P256, SMP_LTK_P256_DEBUG, }; static inline bool smp_ltk_is_sc(struct smp_ltk *key) { switch (key->type) { case SMP_LTK_P256: case SMP_LTK_P256_DEBUG: return true; } return false; } static inline u8 smp_ltk_sec_level(struct smp_ltk *key) { if (key->authenticated) { if (smp_ltk_is_sc(key)) return BT_SECURITY_FIPS; else return BT_SECURITY_HIGH; } return BT_SECURITY_MEDIUM; } /* Key preferences for smp_sufficient security */ enum smp_key_pref { SMP_ALLOW_STK, SMP_USE_LTK, }; /* SMP Commands */ int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, enum smp_key_pref key_pref); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], const bdaddr_t *bdaddr); int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa); int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]); int smp_force_bredr(struct hci_dev *hdev, bool enable); int smp_register(struct hci_dev *hdev); void smp_unregister(struct hci_dev *hdev); #if IS_ENABLED(CONFIG_BT_SELFTEST_SMP) int bt_selftest_smp(void); #else static inline int bt_selftest_smp(void) { return 0; } #endif #endif /* __SMP_H */
5 5 1 2 1 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 // SPDX-License-Identifier: GPL-2.0-or-later #include <net/genetlink.h> #include <uapi/linux/mrp_bridge.h> #include "br_private.h" #include "br_private_mrp.h" static const struct nla_policy br_mrp_policy[IFLA_BRIDGE_MRP_MAX + 1] = { [IFLA_BRIDGE_MRP_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_INSTANCE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_PORT_STATE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_PORT_ROLE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_RING_STATE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_RING_ROLE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_START_TEST] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_IN_ROLE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_IN_STATE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_START_IN_TEST] = { .type = NLA_NESTED }, }; static const struct nla_policy br_mrp_instance_policy[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1] = { [IFLA_BRIDGE_MRP_INSTANCE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_INSTANCE_RING_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_INSTANCE_PRIO] = { .type = NLA_U16 }, }; static int br_mrp_instance_parse(struct net_bridge *br, struct nlattr *attr, int cmd, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_INSTANCE_MAX + 1]; struct br_mrp_instance inst; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_INSTANCE_MAX, attr, br_mrp_instance_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID] || !tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX] || !tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: RING_ID or P_IFINDEX or S_IFINDEX"); return -EINVAL; } memset(&inst, 0, sizeof(inst)); inst.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_RING_ID]); inst.p_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX]); inst.s_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX]); inst.prio = MRP_DEFAULT_PRIO; if (tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]) inst.prio = nla_get_u16(tb[IFLA_BRIDGE_MRP_INSTANCE_PRIO]); if (cmd == RTM_SETLINK) return br_mrp_add(br, &inst); else return br_mrp_del(br, &inst); return 0; } static const struct nla_policy br_mrp_port_state_policy[IFLA_BRIDGE_MRP_PORT_STATE_MAX + 1] = { [IFLA_BRIDGE_MRP_PORT_STATE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_PORT_STATE_STATE] = { .type = NLA_U32 }, }; static int br_mrp_port_state_parse(struct net_bridge_port *p, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_PORT_STATE_MAX + 1]; enum br_mrp_port_state_type state; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_PORT_STATE_MAX, attr, br_mrp_port_state_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_PORT_STATE_STATE]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: STATE"); return -EINVAL; } state = nla_get_u32(tb[IFLA_BRIDGE_MRP_PORT_STATE_STATE]); return br_mrp_set_port_state(p, state); } static const struct nla_policy br_mrp_port_role_policy[IFLA_BRIDGE_MRP_PORT_ROLE_MAX + 1] = { [IFLA_BRIDGE_MRP_PORT_ROLE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_PORT_ROLE_ROLE] = { .type = NLA_U32 }, }; static int br_mrp_port_role_parse(struct net_bridge_port *p, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_PORT_ROLE_MAX + 1]; enum br_mrp_port_role_type role; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_PORT_ROLE_MAX, attr, br_mrp_port_role_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: ROLE"); return -EINVAL; } role = nla_get_u32(tb[IFLA_BRIDGE_MRP_PORT_ROLE_ROLE]); return br_mrp_set_port_role(p, role); } static const struct nla_policy br_mrp_ring_state_policy[IFLA_BRIDGE_MRP_RING_STATE_MAX + 1] = { [IFLA_BRIDGE_MRP_RING_STATE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_RING_STATE_RING_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_RING_STATE_STATE] = { .type = NLA_U32 }, }; static int br_mrp_ring_state_parse(struct net_bridge *br, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_RING_STATE_MAX + 1]; struct br_mrp_ring_state state; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_RING_STATE_MAX, attr, br_mrp_ring_state_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_RING_STATE_RING_ID] || !tb[IFLA_BRIDGE_MRP_RING_STATE_STATE]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: RING_ID or STATE"); return -EINVAL; } memset(&state, 0x0, sizeof(state)); state.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_STATE_RING_ID]); state.ring_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_STATE_STATE]); return br_mrp_set_ring_state(br, &state); } static const struct nla_policy br_mrp_ring_role_policy[IFLA_BRIDGE_MRP_RING_ROLE_MAX + 1] = { [IFLA_BRIDGE_MRP_RING_ROLE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_RING_ROLE_RING_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_RING_ROLE_ROLE] = { .type = NLA_U32 }, }; static int br_mrp_ring_role_parse(struct net_bridge *br, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_RING_ROLE_MAX + 1]; struct br_mrp_ring_role role; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_RING_ROLE_MAX, attr, br_mrp_ring_role_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID] || !tb[IFLA_BRIDGE_MRP_RING_ROLE_ROLE]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: RING_ID or ROLE"); return -EINVAL; } memset(&role, 0x0, sizeof(role)); role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_ROLE_RING_ID]); role.ring_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_RING_ROLE_ROLE]); return br_mrp_set_ring_role(br, &role); } static const struct nla_policy br_mrp_start_test_policy[IFLA_BRIDGE_MRP_START_TEST_MAX + 1] = { [IFLA_BRIDGE_MRP_START_TEST_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_START_TEST_RING_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_TEST_INTERVAL] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_TEST_MAX_MISS] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_TEST_PERIOD] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_TEST_MONITOR] = { .type = NLA_U32 }, }; static int br_mrp_start_test_parse(struct net_bridge *br, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_START_TEST_MAX + 1]; struct br_mrp_start_test test; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_TEST_MAX, attr, br_mrp_start_test_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_START_TEST_RING_ID] || !tb[IFLA_BRIDGE_MRP_START_TEST_INTERVAL] || !tb[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS] || !tb[IFLA_BRIDGE_MRP_START_TEST_PERIOD]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD"); return -EINVAL; } memset(&test, 0x0, sizeof(test)); test.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_RING_ID]); test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_INTERVAL]); test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_MAX_MISS]); test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_PERIOD]); test.monitor = false; if (tb[IFLA_BRIDGE_MRP_START_TEST_MONITOR]) test.monitor = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_TEST_MONITOR]); return br_mrp_start_test(br, &test); } static const struct nla_policy br_mrp_in_state_policy[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1] = { [IFLA_BRIDGE_MRP_IN_STATE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_IN_STATE_IN_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_IN_STATE_STATE] = { .type = NLA_U32 }, }; static int br_mrp_in_state_parse(struct net_bridge *br, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1]; struct br_mrp_in_state state; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_STATE_MAX, attr, br_mrp_in_state_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] || !tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: IN_ID or STATE"); return -EINVAL; } memset(&state, 0x0, sizeof(state)); state.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID]); state.in_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]); return br_mrp_set_in_state(br, &state); } static const struct nla_policy br_mrp_in_role_policy[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1] = { [IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] = { .type = NLA_U16 }, [IFLA_BRIDGE_MRP_IN_ROLE_ROLE] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] = { .type = NLA_U32 }, }; static int br_mrp_in_role_parse(struct net_bridge *br, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1]; struct br_mrp_in_role role; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_ROLE_MAX, attr, br_mrp_in_role_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] || !tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] || !tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] || !tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: RING_ID or ROLE or IN_ID or I_IFINDEX"); return -EINVAL; } memset(&role, 0x0, sizeof(role)); role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]); role.in_id = nla_get_u16(tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]); role.i_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]); role.in_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]); return br_mrp_set_in_role(br, &role); } static const struct nla_policy br_mrp_start_in_test_policy[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1] = { [IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC] = { .type = NLA_REJECT }, [IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] = { .type = NLA_U32 }, [IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD] = { .type = NLA_U32 }, }; static int br_mrp_start_in_test_parse(struct net_bridge *br, struct nlattr *attr, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1]; struct br_mrp_start_in_test test; int err; err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_IN_TEST_MAX, attr, br_mrp_start_in_test_policy, extack); if (err) return err; if (!tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] || !tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] || !tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] || !tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]) { NL_SET_ERR_MSG_MOD(extack, "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD"); return -EINVAL; } memset(&test, 0x0, sizeof(test)); test.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]); test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]); test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]); test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]); return br_mrp_start_in_test(br, &test); } int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *attr, int cmd, struct netlink_ext_ack *extack) { struct nlattr *tb[IFLA_BRIDGE_MRP_MAX + 1]; int err; /* When this function is called for a port then the br pointer is * invalid, therefor set the br to point correctly */ if (p) br = p->br; if (br->stp_enabled != BR_NO_STP) { NL_SET_ERR_MSG_MOD(extack, "MRP can't be enabled if STP is already enabled"); return -EINVAL; } err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_MAX, attr, br_mrp_policy, extack); if (err) return err; if (tb[IFLA_BRIDGE_MRP_INSTANCE]) { err = br_mrp_instance_parse(br, tb[IFLA_BRIDGE_MRP_INSTANCE], cmd, extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_PORT_STATE]) { err = br_mrp_port_state_parse(p, tb[IFLA_BRIDGE_MRP_PORT_STATE], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_PORT_ROLE]) { err = br_mrp_port_role_parse(p, tb[IFLA_BRIDGE_MRP_PORT_ROLE], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_RING_STATE]) { err = br_mrp_ring_state_parse(br, tb[IFLA_BRIDGE_MRP_RING_STATE], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_RING_ROLE]) { err = br_mrp_ring_role_parse(br, tb[IFLA_BRIDGE_MRP_RING_ROLE], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_START_TEST]) { err = br_mrp_start_test_parse(br, tb[IFLA_BRIDGE_MRP_START_TEST], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_IN_STATE]) { err = br_mrp_in_state_parse(br, tb[IFLA_BRIDGE_MRP_IN_STATE], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_IN_ROLE]) { err = br_mrp_in_role_parse(br, tb[IFLA_BRIDGE_MRP_IN_ROLE], extack); if (err) return err; } if (tb[IFLA_BRIDGE_MRP_START_IN_TEST]) { err = br_mrp_start_in_test_parse(br, tb[IFLA_BRIDGE_MRP_START_IN_TEST], extack); if (err) return err; } return 0; } int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) { struct nlattr *tb, *mrp_tb; struct br_mrp *mrp; mrp_tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP); if (!mrp_tb) return -EMSGSIZE; hlist_for_each_entry_rcu(mrp, &br->mrp_list, list) { struct net_bridge_port *p; tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP_INFO); if (!tb) goto nla_info_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ID, mrp->ring_id)) goto nla_put_failure; p = rcu_dereference(mrp->p_port); if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_P_IFINDEX, p->dev->ifindex)) goto nla_put_failure; p = rcu_dereference(mrp->s_port); if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_S_IFINDEX, p->dev->ifindex)) goto nla_put_failure; p = rcu_dereference(mrp->i_port); if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_I_IFINDEX, p->dev->ifindex)) goto nla_put_failure; if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO, mrp->prio)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_STATE, mrp->ring_state)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ROLE, mrp->ring_role)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, mrp->test_interval)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, mrp->test_max_miss)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, mrp->test_monitor)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_STATE, mrp->in_state)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_ROLE, mrp->in_role)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL, mrp->in_test_interval)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS, mrp->in_test_max_miss)) goto nla_put_failure; nla_nest_end(skb, tb); } nla_nest_end(skb, mrp_tb); return 0; nla_put_failure: nla_nest_cancel(skb, tb); nla_info_failure: nla_nest_cancel(skb, mrp_tb); return -EMSGSIZE; } int br_mrp_ring_port_open(struct net_device *dev, u8 loc) { struct net_bridge_port *p; int err = 0; p = br_port_get_rcu(dev); if (!p) { err = -EINVAL; goto out; } if (loc) p->flags |= BR_MRP_LOST_CONT; else p->flags &= ~BR_MRP_LOST_CONT; br_ifinfo_notify(RTM_NEWLINK, NULL, p); out: return err; } int br_mrp_in_port_open(struct net_device *dev, u8 loc) { struct net_bridge_port *p; int err = 0; p = br_port_get_rcu(dev); if (!p) { err = -EINVAL; goto out; } if (loc) p->flags |= BR_MRP_LOST_IN_CONT; else p->flags &= ~BR_MRP_LOST_IN_CONT; br_ifinfo_notify(RTM_NEWLINK, NULL, p); out: return err; }
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 // SPDX-License-Identifier: GPL-2.0-or-later // SPI init/core code // // Copyright (C) 2005 David Brownell // Copyright (C) 2008 Secret Lab Technologies Ltd. #include <linux/acpi.h> #include <linux/cache.h> #include <linux/clk/clk-conf.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gpio/consumer.h> #include <linux/highmem.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/percpu.h> #include <linux/platform_data/x86/apple.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include <linux/ptp_clock_kernel.h> #include <linux/sched/rt.h> #include <linux/slab.h> #include <linux/spi/offload/types.h> #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <uapi/linux/sched/types.h> #define CREATE_TRACE_POINTS #include <trace/events/spi.h> EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); #include "internals.h" static DEFINE_IDR(spi_controller_idr); static void spidev_release(struct device *dev) { struct spi_device *spi = to_spi_device(dev); spi_controller_put(spi->controller); kfree(spi->driver_override); free_percpu(spi->pcpu_statistics); kfree(spi); } static ssize_t modalias_show(struct device *dev, struct device_attribute *a, char *buf) { const struct spi_device *spi = to_spi_device(dev); int len; len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); } static DEVICE_ATTR_RO(modalias); static ssize_t driver_override_store(struct device *dev, struct device_attribute *a, const char *buf, size_t count) { struct spi_device *spi = to_spi_device(dev); int ret; ret = driver_set_override(dev, &spi->driver_override, buf, count); if (ret) return ret; return count; } static ssize_t driver_override_show(struct device *dev, struct device_attribute *a, char *buf) { const struct spi_device *spi = to_spi_device(dev); ssize_t len; device_lock(dev); len = sysfs_emit(buf, "%s\n", spi->driver_override ? : ""); device_unlock(dev); return len; } static DEVICE_ATTR_RW(driver_override); static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev) { struct spi_statistics __percpu *pcpu_stats; if (dev) pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); else pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); if (pcpu_stats) { int cpu; for_each_possible_cpu(cpu) { struct spi_statistics *stat; stat = per_cpu_ptr(pcpu_stats, cpu); u64_stats_init(&stat->syncp); } } return pcpu_stats; } static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat, char *buf, size_t offset) { u64 val = 0; int i; for_each_possible_cpu(i) { const struct spi_statistics *pcpu_stats; u64_stats_t *field; unsigned int start; u64 inc; pcpu_stats = per_cpu_ptr(stat, i); field = (void *)pcpu_stats + offset; do { start = u64_stats_fetch_begin(&pcpu_stats->syncp); inc = u64_stats_read(field); } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start)); val += inc; } return sysfs_emit(buf, "%llu\n", val); } #define SPI_STATISTICS_ATTRS(field, file) \ static ssize_t spi_controller_##field##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct spi_controller *ctlr = container_of(dev, \ struct spi_controller, dev); \ return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ } \ static struct device_attribute dev_attr_spi_controller_##field = { \ .attr = { .name = file, .mode = 0444 }, \ .show = spi_controller_##field##_show, \ }; \ static ssize_t spi_device_##field##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct spi_device *spi = to_spi_device(dev); \ return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ } \ static struct device_attribute dev_attr_spi_device_##field = { \ .attr = { .name = file, .mode = 0444 }, \ .show = spi_device_##field##_show, \ } #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ char *buf) \ { \ return spi_emit_pcpu_stats(stat, buf, \ offsetof(struct spi_statistics, field)); \ } \ SPI_STATISTICS_ATTRS(name, file) #define SPI_STATISTICS_SHOW(field) \ SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ field) SPI_STATISTICS_SHOW(messages); SPI_STATISTICS_SHOW(transfers); SPI_STATISTICS_SHOW(errors); SPI_STATISTICS_SHOW(timedout); SPI_STATISTICS_SHOW(spi_sync); SPI_STATISTICS_SHOW(spi_sync_immediate); SPI_STATISTICS_SHOW(spi_async); SPI_STATISTICS_SHOW(bytes); SPI_STATISTICS_SHOW(bytes_rx); SPI_STATISTICS_SHOW(bytes_tx); #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ "transfer_bytes_histo_" number, \ transfer_bytes_histo[index]) SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); SPI_STATISTICS_SHOW(transfers_split_maxsize); static struct attribute *spi_dev_attrs[] = { &dev_attr_modalias.attr, &dev_attr_driver_override.attr, NULL, }; static const struct attribute_group spi_dev_group = { .attrs = spi_dev_attrs, }; static struct attribute *spi_device_statistics_attrs[] = { &dev_attr_spi_device_messages.attr, &dev_attr_spi_device_transfers.attr, &dev_attr_spi_device_errors.attr, &dev_attr_spi_device_timedout.attr, &dev_attr_spi_device_spi_sync.attr, &dev_attr_spi_device_spi_sync_immediate.attr, &dev_attr_spi_device_spi_async.attr, &dev_attr_spi_device_bytes.attr, &dev_attr_spi_device_bytes_rx.attr, &dev_attr_spi_device_bytes_tx.attr, &dev_attr_spi_device_transfer_bytes_histo0.attr, &dev_attr_spi_device_transfer_bytes_histo1.attr, &dev_attr_spi_device_transfer_bytes_histo2.attr, &dev_attr_spi_device_transfer_bytes_histo3.attr, &dev_attr_spi_device_transfer_bytes_histo4.attr, &dev_attr_spi_device_transfer_bytes_histo5.attr, &dev_attr_spi_device_transfer_bytes_histo6.attr, &dev_attr_spi_device_transfer_bytes_histo7.attr, &dev_attr_spi_device_transfer_bytes_histo8.attr, &dev_attr_spi_device_transfer_bytes_histo9.attr, &dev_attr_spi_device_transfer_bytes_histo10.attr, &dev_attr_spi_device_transfer_bytes_histo11.attr, &dev_attr_spi_device_transfer_bytes_histo12.attr, &dev_attr_spi_device_transfer_bytes_histo13.attr, &dev_attr_spi_device_transfer_bytes_histo14.attr, &dev_attr_spi_device_transfer_bytes_histo15.attr, &dev_attr_spi_device_transfer_bytes_histo16.attr, &dev_attr_spi_device_transfers_split_maxsize.attr, NULL, }; static const struct attribute_group spi_device_statistics_group = { .name = "statistics", .attrs = spi_device_statistics_attrs, }; static const struct attribute_group *spi_dev_groups[] = { &spi_dev_group, &spi_device_statistics_group, NULL, }; static struct attribute *spi_controller_statistics_attrs[] = { &dev_attr_spi_controller_messages.attr, &dev_attr_spi_controller_transfers.attr, &dev_attr_spi_controller_errors.attr, &dev_attr_spi_controller_timedout.attr, &dev_attr_spi_controller_spi_sync.attr, &dev_attr_spi_controller_spi_sync_immediate.attr, &dev_attr_spi_controller_spi_async.attr, &dev_attr_spi_controller_bytes.attr, &dev_attr_spi_controller_bytes_rx.attr, &dev_attr_spi_controller_bytes_tx.attr, &dev_attr_spi_controller_transfer_bytes_histo0.attr, &dev_attr_spi_controller_transfer_bytes_histo1.attr, &dev_attr_spi_controller_transfer_bytes_histo2.attr, &dev_attr_spi_controller_transfer_bytes_histo3.attr, &dev_attr_spi_controller_transfer_bytes_histo4.attr, &dev_attr_spi_controller_transfer_bytes_histo5.attr, &dev_attr_spi_controller_transfer_bytes_histo6.attr, &dev_attr_spi_controller_transfer_bytes_histo7.attr, &dev_attr_spi_controller_transfer_bytes_histo8.attr, &dev_attr_spi_controller_transfer_bytes_histo9.attr, &dev_attr_spi_controller_transfer_bytes_histo10.attr, &dev_attr_spi_controller_transfer_bytes_histo11.attr, &dev_attr_spi_controller_transfer_bytes_histo12.attr, &dev_attr_spi_controller_transfer_bytes_histo13.attr, &dev_attr_spi_controller_transfer_bytes_histo14.attr, &dev_attr_spi_controller_transfer_bytes_histo15.attr, &dev_attr_spi_controller_transfer_bytes_histo16.attr, &dev_attr_spi_controller_transfers_split_maxsize.attr, NULL, }; static const struct attribute_group spi_controller_statistics_group = { .name = "statistics", .attrs = spi_controller_statistics_attrs, }; static const struct attribute_group *spi_controller_groups[] = { &spi_controller_statistics_group, NULL, }; static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, struct spi_transfer *xfer, struct spi_message *msg) { int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; struct spi_statistics *stats; if (l2len < 0) l2len = 0; get_cpu(); stats = this_cpu_ptr(pcpu_stats); u64_stats_update_begin(&stats->syncp); u64_stats_inc(&stats->transfers); u64_stats_inc(&stats->transfer_bytes_histo[l2len]); u64_stats_add(&stats->bytes, xfer->len); if (spi_valid_txbuf(msg, xfer)) u64_stats_add(&stats->bytes_tx, xfer->len); if (spi_valid_rxbuf(msg, xfer)) u64_stats_add(&stats->bytes_rx, xfer->len); u64_stats_update_end(&stats->syncp); put_cpu(); } /* * modalias support makes "modprobe $MODALIAS" new-style hotplug work, * and the sysfs version makes coldplug work too. */ static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) { while (id->name[0]) { if (!strcmp(name, id->name)) return id; id++; } return NULL; } const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) { const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); return spi_match_id(sdrv->id_table, sdev->modalias); } EXPORT_SYMBOL_GPL(spi_get_device_id); const void *spi_get_device_match_data(const struct spi_device *sdev) { const void *match; match = device_get_match_data(&sdev->dev); if (match) return match; return (const void *)spi_get_device_id(sdev)->driver_data; } EXPORT_SYMBOL_GPL(spi_get_device_match_data); static int spi_match_device(struct device *dev, const struct device_driver *drv) { const struct spi_device *spi = to_spi_device(dev); const struct spi_driver *sdrv = to_spi_driver(drv); /* Check override first, and if set, only use the named driver */ if (spi->driver_override) return strcmp(spi->driver_override, drv->name) == 0; /* Attempt an OF style match */ if (of_driver_match_device(dev, drv)) return 1; /* Then try ACPI */ if (acpi_driver_match_device(dev, drv)) return 1; if (sdrv->id_table) return !!spi_match_id(sdrv->id_table, spi->modalias); return strcmp(spi->modalias, drv->name) == 0; } static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct spi_device *spi = to_spi_device(dev); int rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); } static int spi_probe(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); struct spi_device *spi = to_spi_device(dev); struct fwnode_handle *fwnode = dev_fwnode(dev); int ret; ret = of_clk_set_defaults(dev->of_node, false); if (ret) return ret; if (is_of_node(fwnode)) spi->irq = of_irq_get(dev->of_node, 0); else if (is_acpi_device_node(fwnode) && spi->irq < 0) spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0); if (spi->irq == -EPROBE_DEFER) return dev_err_probe(dev, spi->irq, "Failed to get irq\n"); if (spi->irq < 0) spi->irq = 0; ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) return ret; if (sdrv->probe) { ret = sdrv->probe(spi); if (ret) dev_pm_domain_detach(dev, true); } return ret; } static void spi_remove(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); if (sdrv->remove) sdrv->remove(to_spi_device(dev)); dev_pm_domain_detach(dev, true); } static void spi_shutdown(struct device *dev) { if (dev->driver) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); if (sdrv->shutdown) sdrv->shutdown(to_spi_device(dev)); } } const struct bus_type spi_bus_type = { .name = "spi", .dev_groups = spi_dev_groups, .match = spi_match_device, .uevent = spi_uevent, .probe = spi_probe, .remove = spi_remove, .shutdown = spi_shutdown, }; EXPORT_SYMBOL_GPL(spi_bus_type); /** * __spi_register_driver - register a SPI driver * @owner: owner module of the driver to register * @sdrv: the driver to register * Context: can sleep * * Return: zero on success, else a negative error code. */ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) { sdrv->driver.owner = owner; sdrv->driver.bus = &spi_bus_type; /* * For Really Good Reasons we use spi: modaliases not of: * modaliases for DT so module autoloading won't work if we * don't have a spi_device_id as well as a compatible string. */ if (sdrv->driver.of_match_table) { const struct of_device_id *of_id; for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; of_id++) { const char *of_name; /* Strip off any vendor prefix */ of_name = strnchr(of_id->compatible, sizeof(of_id->compatible), ','); if (of_name) of_name++; else of_name = of_id->compatible; if (sdrv->id_table) { const struct spi_device_id *spi_id; spi_id = spi_match_id(sdrv->id_table, of_name); if (spi_id) continue; } else { if (strcmp(sdrv->driver.name, of_name) == 0) continue; } pr_warn("SPI driver %s has no spi_device_id for %s\n", sdrv->driver.name, of_id->compatible); } } return driver_register(&sdrv->driver); } EXPORT_SYMBOL_GPL(__spi_register_driver); /*-------------------------------------------------------------------------*/ /* * SPI devices should normally not be created by SPI device drivers; that * would make them board-specific. Similarly with SPI controller drivers. * Device registration normally goes into like arch/.../mach.../board-YYY.c * with other readonly (flashable) information about mainboard devices. */ struct boardinfo { struct list_head list; struct spi_board_info board_info; }; static LIST_HEAD(board_list); static LIST_HEAD(spi_controller_list); /* * Used to protect add/del operation for board_info list and * spi_controller list, and their matching process also used * to protect object of type struct idr. */ static DEFINE_MUTEX(board_lock); /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected * Context: can sleep * * Allows a driver to allocate and initialize a spi_device without * registering it immediately. This allows a driver to directly * fill the spi_device with device parameters before calling * spi_add_device() on it. * * Caller is responsible to call spi_add_device() on the returned * spi_device structure to add it to the SPI controller. If the caller * needs to discard the spi_device without adding it, then it should * call spi_dev_put() on it. * * Return: a pointer to the new device, or NULL. */ struct spi_device *spi_alloc_device(struct spi_controller *ctlr) { struct spi_device *spi; if (!spi_controller_get(ctlr)) return NULL; spi = kzalloc(sizeof(*spi), GFP_KERNEL); if (!spi) { spi_controller_put(ctlr); return NULL; } spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); if (!spi->pcpu_statistics) { kfree(spi); spi_controller_put(ctlr); return NULL; } spi->controller = ctlr; spi->dev.parent = &ctlr->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; spi->mode = ctlr->buswidth_override_bits; device_initialize(&spi->dev); return spi; } EXPORT_SYMBOL_GPL(spi_alloc_device); static void spi_dev_set_name(struct spi_device *spi) { struct device *dev = &spi->dev; struct fwnode_handle *fwnode = dev_fwnode(dev); if (is_acpi_device_node(fwnode)) { dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); return; } if (is_software_node(fwnode)) { dev_set_name(dev, "spi-%pfwP", fwnode); return; } dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), spi_get_chipselect(spi, 0)); } /* * Zero(0) is a valid physical CS value and can be located at any * logical CS in the spi->chip_select[]. If all the physical CS * are initialized to 0 then It would be difficult to differentiate * between a valid physical CS 0 & an unused logical CS whose physical * CS can be 0. As a solution to this issue initialize all the CS to -1. * Now all the unused logical CS will have -1 physical CS value & can be * ignored while performing physical CS validity checks. */ #define SPI_INVALID_CS ((s8)-1) static inline bool is_valid_cs(s8 chip_select) { return chip_select != SPI_INVALID_CS; } static inline int spi_dev_check_cs(struct device *dev, struct spi_device *spi, u8 idx, struct spi_device *new_spi, u8 new_idx) { u8 cs, cs_new; u8 idx_new; cs = spi_get_chipselect(spi, idx); for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) { cs_new = spi_get_chipselect(new_spi, idx_new); if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) { dev_err(dev, "chipselect %u already in use\n", cs_new); return -EBUSY; } } return 0; } static int spi_dev_check(struct device *dev, void *data) { struct spi_device *spi = to_spi_device(dev); struct spi_device *new_spi = data; int status, idx; if (spi->controller == new_spi->controller) { for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { status = spi_dev_check_cs(dev, spi, idx, new_spi, 0); if (status) return status; } } return 0; } static void spi_cleanup(struct spi_device *spi) { if (spi->controller->cleanup) spi->controller->cleanup(spi); } static int __spi_add_device(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status, idx; u8 cs; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { /* Chipselects are numbered 0..max; validate. */ cs = spi_get_chipselect(spi, idx); if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) { dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx), ctlr->num_chipselect); return -EINVAL; } } /* * Make sure that multiple logical CS doesn't map to the same physical CS. * For example, spi->chip_select[0] != spi->chip_select[1] and so on. */ if (!spi_controller_is_target(ctlr)) { for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); if (status) return status; } } /* Set the bus ID string */ spi_dev_set_name(spi); /* * We need to make sure there's no other device with this * chipselect **BEFORE** we call setup(), else we'll trash * its configuration. */ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); if (status) return status; /* Controller may unregister concurrently */ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && !device_is_registered(&ctlr->dev)) { return -ENODEV; } if (ctlr->cs_gpiods) { u8 cs; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { cs = spi_get_chipselect(spi, idx); if (is_valid_cs(cs)) spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]); } } /* * Drivers may modify this initial i/o setup, but will * normally rely on the device being setup. Devices * using SPI_CS_HIGH can't coexist well otherwise... */ status = spi_setup(spi); if (status < 0) { dev_err(dev, "can't setup %s, status %d\n", dev_name(&spi->dev), status); return status; } /* Device may be bound to an active driver when this returns */ status = device_add(&spi->dev); if (status < 0) { dev_err(dev, "can't add %s, status %d\n", dev_name(&spi->dev), status); spi_cleanup(spi); } else { dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); } return status; } /** * spi_add_device - Add spi_device allocated with spi_alloc_device * @spi: spi_device to register * * Companion function to spi_alloc_device. Devices allocated with * spi_alloc_device can be added onto the SPI bus with this function. * * Return: 0 on success; negative errno on failure */ int spi_add_device(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; int status; /* Set the bus ID string */ spi_dev_set_name(spi); mutex_lock(&ctlr->add_lock); status = __spi_add_device(spi); mutex_unlock(&ctlr->add_lock); return status; } EXPORT_SYMBOL_GPL(spi_add_device); static void spi_set_all_cs_unused(struct spi_device *spi) { u8 idx; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) spi_set_chipselect(spi, idx, SPI_INVALID_CS); } /** * spi_new_device - instantiate one new SPI device * @ctlr: Controller to which device is connected * @chip: Describes the SPI device * Context: can sleep * * On typical mainboards, this is purely internal; and it's not needed * after board init creates the hard-wired devices. Some development * platforms may not be able to use spi_register_board_info though, and * this is exported so that for example a USB or parport based adapter * driver could add devices (which it would learn about out-of-band). * * Return: the new device, or NULL. */ struct spi_device *spi_new_device(struct spi_controller *ctlr, struct spi_board_info *chip) { struct spi_device *proxy; int status; /* * NOTE: caller did any chip->bus_num checks necessary. * * Also, unless we change the return value convention to use * error-or-pointer (not NULL-or-pointer), troubleshootability * suggests syslogged diagnostics are best here (ugh). */ proxy = spi_alloc_device(ctlr); if (!proxy) return NULL; WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); /* Use provided chip-select for proxy device */ spi_set_all_cs_unused(proxy); spi_set_chipselect(proxy, 0, chip->chip_select); proxy->max_speed_hz = chip->max_speed_hz; proxy->mode = chip->mode; proxy->irq = chip->irq; strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); proxy->dev.platform_data = (void *) chip->platform_data; proxy->controller_data = chip->controller_data; proxy->controller_state = NULL; /* * By default spi->chip_select[0] will hold the physical CS number, * so set bit 0 in spi->cs_index_mask. */ proxy->cs_index_mask = BIT(0); if (chip->swnode) { status = device_add_software_node(&proxy->dev, chip->swnode); if (status) { dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", chip->modalias, status); goto err_dev_put; } } status = spi_add_device(proxy); if (status < 0) goto err_dev_put; return proxy; err_dev_put: device_remove_software_node(&proxy->dev); spi_dev_put(proxy); return NULL; } EXPORT_SYMBOL_GPL(spi_new_device); /** * spi_unregister_device - unregister a single SPI device * @spi: spi_device to unregister * * Start making the passed SPI device vanish. Normally this would be handled * by spi_unregister_controller(). */ void spi_unregister_device(struct spi_device *spi) { struct fwnode_handle *fwnode; if (!spi) return; fwnode = dev_fwnode(&spi->dev); if (is_of_node(fwnode)) { of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); of_node_put(to_of_node(fwnode)); } else if (is_acpi_device_node(fwnode)) { acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); } device_remove_software_node(&spi->dev); device_del(&spi->dev); spi_cleanup(spi); put_device(&spi->dev); } EXPORT_SYMBOL_GPL(spi_unregister_device); static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, struct spi_board_info *bi) { struct spi_device *dev; if (ctlr->bus_num != bi->bus_num) return; dev = spi_new_device(ctlr, bi); if (!dev) dev_err(ctlr->dev.parent, "can't create new device for %s\n", bi->modalias); } /** * spi_register_board_info - register SPI devices for a given board * @info: array of chip descriptors * @n: how many descriptors are provided * Context: can sleep * * Board-specific early init code calls this (probably during arch_initcall) * with segments of the SPI device table. Any device nodes are created later, * after the relevant parent SPI controller (bus_num) is defined. We keep * this table of devices forever, so that reloading a controller driver will * not make Linux forget about these hard-wired devices. * * Other code can also call this, e.g. a particular add-on board might provide * SPI devices through its expansion connector, so code initializing that board * would naturally declare its SPI devices. * * The board info passed can safely be __initdata ... but be careful of * any embedded pointers (platform_data, etc), they're copied as-is. * * Return: zero on success, else a negative error code. */ int spi_register_board_info(struct spi_board_info const *info, unsigned n) { struct boardinfo *bi; int i; if (!n) return 0; bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM; for (i = 0; i < n; i++, bi++, info++) { struct spi_controller *ctlr; memcpy(&bi->board_info, info, sizeof(*info)); mutex_lock(&board_lock); list_add_tail(&bi->list, &board_list); list_for_each_entry(ctlr, &spi_controller_list, list) spi_match_controller_to_boardinfo(ctlr, &bi->board_info); mutex_unlock(&board_lock); } return 0; } /*-------------------------------------------------------------------------*/ /* Core methods for SPI resource management */ /** * spi_res_alloc - allocate a spi resource that is life-cycle managed * during the processing of a spi_message while using * spi_transfer_one * @spi: the SPI device for which we allocate memory * @release: the release code to execute for this resource * @size: size to alloc and return * @gfp: GFP allocation flags * * Return: the pointer to the allocated data * * This may get enhanced in the future to allocate from a memory pool * of the @spi_device or @spi_controller to avoid repeated allocations. */ static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, size_t size, gfp_t gfp) { struct spi_res *sres; sres = kzalloc(sizeof(*sres) + size, gfp); if (!sres) return NULL; INIT_LIST_HEAD(&sres->entry); sres->release = release; return sres->data; } /** * spi_res_free - free an SPI resource * @res: pointer to the custom data of a resource */ static void spi_res_free(void *res) { struct spi_res *sres = container_of(res, struct spi_res, data); WARN_ON(!list_empty(&sres->entry)); kfree(sres); } /** * spi_res_add - add a spi_res to the spi_message * @message: the SPI message * @res: the spi_resource */ static void spi_res_add(struct spi_message *message, void *res) { struct spi_res *sres = container_of(res, struct spi_res, data); WARN_ON(!list_empty(&sres->entry)); list_add_tail(&sres->entry, &message->resources); } /** * spi_res_release - release all SPI resources for this message * @ctlr: the @spi_controller * @message: the @spi_message */ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) { struct spi_res *res, *tmp; list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { if (res->release) res->release(ctlr, message, res->data); list_del(&res->entry); kfree(res); } } /*-------------------------------------------------------------------------*/ #define spi_for_each_valid_cs(spi, idx) \ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \ if (!(spi->cs_index_mask & BIT(idx))) {} else static inline bool spi_is_last_cs(struct spi_device *spi) { u8 idx; bool last = false; spi_for_each_valid_cs(spi, idx) { if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) last = true; } return last; } static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) { /* * Historically ACPI has no means of the GPIO polarity and * thus the SPISerialBus() resource defines it on the per-chip * basis. In order to avoid a chain of negations, the GPIO * polarity is considered being Active High. Even for the cases * when _DSD() is involved (in the updated versions of ACPI) * the GPIO CS polarity must be defined Active High to avoid * ambiguity. That's why we use enable, that takes SPI_CS_HIGH * into account. */ if (is_acpi_device_node(dev_fwnode(&spi->dev))) gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); else /* Polarity handled by GPIO library */ gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); if (activate) spi_delay_exec(&spi->cs_setup, NULL); else spi_delay_exec(&spi->cs_inactive, NULL); } static void spi_set_cs(struct spi_device *spi, bool enable, bool force) { bool activate = enable; u8 idx; /* * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called. */ if (!force && (enable == spi_is_last_cs(spi)) && (spi->controller->last_cs_index_mask == spi->cs_index_mask) && (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return; trace_spi_set_cs(spi, activate); spi->controller->last_cs_index_mask = spi->cs_index_mask; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS; spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; if (spi->controller->last_cs_mode_high) enable = !enable; /* * Handle chip select delays for GPIO based CS or controllers without * programmable chip select timing. */ if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate) spi_delay_exec(&spi->cs_hold, NULL); if (spi_is_csgpiod(spi)) { if (!(spi->mode & SPI_NO_CS)) { spi_for_each_valid_cs(spi, idx) { if (spi_get_csgpiod(spi, idx)) spi_toggle_csgpiod(spi, idx, enable, activate); } } /* Some SPI controllers need both GPIO CS & ->set_cs() */ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && spi->controller->set_cs) spi->controller->set_cs(spi, !enable); } else if (spi->controller->set_cs) { spi->controller->set_cs(spi, !enable); } if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) { if (activate) spi_delay_exec(&spi->cs_setup, NULL); else spi_delay_exec(&spi->cs_inactive, NULL); } } #ifdef CONFIG_HAS_DMA static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, void *buf, size_t len, enum dma_data_direction dir, unsigned long attrs) { const bool vmalloced_buf = is_vmalloc_addr(buf); unsigned int max_seg_size = dma_get_max_seg_size(dev); #ifdef CONFIG_HIGHMEM const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && (unsigned long)buf < (PKMAP_BASE + (LAST_PKMAP * PAGE_SIZE))); #else const bool kmap_buf = false; #endif int desc_len; int sgs; struct page *vm_page; struct scatterlist *sg; void *sg_buf; size_t min; int i, ret; if (vmalloced_buf || kmap_buf) { desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; } ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); if (ret != 0) return ret; sg = &sgt->sgl[0]; for (i = 0; i < sgs; i++) { if (vmalloced_buf || kmap_buf) { /* * Next scatterlist entry size is the minimum between * the desc_len and the remaining buffer length that * fits in a page. */ min = min_t(size_t, desc_len, min_t(size_t, len, PAGE_SIZE - offset_in_page(buf))); if (vmalloced_buf) vm_page = vmalloc_to_page(buf); else vm_page = kmap_to_page(buf); if (!vm_page) { sg_free_table(sgt); return -ENOMEM; } sg_set_page(sg, vm_page, min, offset_in_page(buf)); } else { min = min_t(size_t, len, desc_len); sg_buf = buf; sg_set_buf(sg, sg_buf, min); } buf += min; len -= min; sg = sg_next(sg); } ret = dma_map_sgtable(dev, sgt, dir, attrs); if (ret < 0) { sg_free_table(sgt); return ret; } return 0; } int spi_map_buf(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, void *buf, size_t len, enum dma_data_direction dir) { return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0); } static void spi_unmap_buf_attrs(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, enum dma_data_direction dir, unsigned long attrs) { dma_unmap_sgtable(dev, sgt, dir, attrs); sg_free_table(sgt); sgt->orig_nents = 0; sgt->nents = 0; } void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, struct sg_table *sgt, enum dma_data_direction dir) { spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); } static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; int ret; if (!ctlr->can_dma) return 0; if (ctlr->dma_tx) tx_dev = ctlr->dma_tx->device->dev; else if (ctlr->dma_map_dev) tx_dev = ctlr->dma_map_dev; else tx_dev = ctlr->dev.parent; if (ctlr->dma_rx) rx_dev = ctlr->dma_rx->device->dev; else if (ctlr->dma_map_dev) rx_dev = ctlr->dma_map_dev; else rx_dev = ctlr->dev.parent; ret = -ENOMSG; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync is done before each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; if (!ctlr->can_dma(ctlr, msg->spi, xfer)) continue; if (xfer->tx_buf != NULL) { ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, (void *)xfer->tx_buf, xfer->len, DMA_TO_DEVICE, attrs); if (ret != 0) return ret; xfer->tx_sg_mapped = true; } if (xfer->rx_buf != NULL) { ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, xfer->rx_buf, xfer->len, DMA_FROM_DEVICE, attrs); if (ret != 0) { spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE, attrs); return ret; } xfer->rx_sg_mapped = true; } } /* No transfer has been mapped, bail out with success */ if (ret) return 0; ctlr->cur_rx_dma_dev = rx_dev; ctlr->cur_tx_dma_dev = tx_dev; return 0; } static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; struct spi_transfer *xfer; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* The sync has already been done after each transfer. */ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; if (xfer->rx_sg_mapped) spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE, attrs); xfer->rx_sg_mapped = false; if (xfer->tx_sg_mapped) spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE, attrs); xfer->tx_sg_mapped = false; } return 0; } static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; if (xfer->tx_sg_mapped) dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); if (xfer->rx_sg_mapped) dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; struct device *tx_dev = ctlr->cur_tx_dma_dev; if (xfer->rx_sg_mapped) dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); if (xfer->tx_sg_mapped) dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } #else /* !CONFIG_HAS_DMA */ static inline int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { return 0; } static inline int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) { return 0; } static void spi_dma_sync_for_device(struct spi_controller *ctrl, struct spi_transfer *xfer) { } static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, struct spi_transfer *xfer) { } #endif /* !CONFIG_HAS_DMA */ static inline int spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* * Restore the original value of tx_buf or rx_buf if they are * NULL. */ if (xfer->tx_buf == ctlr->dummy_tx) xfer->tx_buf = NULL; if (xfer->rx_buf == ctlr->dummy_rx) xfer->rx_buf = NULL; } return __spi_unmap_msg(ctlr, msg); } static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; void *tmp; unsigned int max_tx, max_rx; if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) && !(msg->spi->mode & SPI_3WIRE)) { max_tx = 0; max_rx = 0; list_for_each_entry(xfer, &msg->transfers, transfer_list) { if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && !xfer->tx_buf) max_tx = max(xfer->len, max_tx); if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && !xfer->rx_buf) max_rx = max(xfer->len, max_rx); } if (max_tx) { tmp = krealloc(ctlr->dummy_tx, max_tx, GFP_KERNEL | GFP_DMA | __GFP_ZERO); if (!tmp) return -ENOMEM; ctlr->dummy_tx = tmp; } if (max_rx) { tmp = krealloc(ctlr->dummy_rx, max_rx, GFP_KERNEL | GFP_DMA); if (!tmp) return -ENOMEM; ctlr->dummy_rx = tmp; } if (max_tx || max_rx) { list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (!xfer->len) continue; if (!xfer->tx_buf) xfer->tx_buf = ctlr->dummy_tx; if (!xfer->rx_buf) xfer->rx_buf = ctlr->dummy_rx; } } } return __spi_map_msg(ctlr, msg); } static int spi_transfer_wait(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; u32 speed_hz = xfer->speed_hz; unsigned long long ms; if (spi_controller_is_target(ctlr)) { if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); return -EINTR; } } else { if (!speed_hz) speed_hz = 100000; /* * For each byte we wait for 8 cycles of the SPI clock. * Since speed is defined in Hz and we want milliseconds, * use respective multiplier, but before the division, * otherwise we may get 0 for short transfers. */ ms = 8LL * MSEC_PER_SEC * xfer->len; do_div(ms, speed_hz); /* * Increase it twice and add 200 ms tolerance, use * predefined maximum in case of overflow. */ ms += ms + 200; if (ms > UINT_MAX) ms = UINT_MAX; ms = wait_for_completion_timeout(&ctlr->xfer_completion, msecs_to_jiffies(ms)); if (ms == 0) { SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); dev_err(&msg->spi->dev, "SPI transfer timed out\n"); return -ETIMEDOUT; } if (xfer->error & SPI_TRANS_FAIL_IO) return -EIO; } return 0; } static void _spi_transfer_delay_ns(u32 ns) { if (!ns) return; if (ns <= NSEC_PER_USEC) { ndelay(ns); } else { u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); fsleep(us); } } int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) { u32 delay = _delay->value; u32 unit = _delay->unit; u32 hz; if (!delay) return 0; switch (unit) { case SPI_DELAY_UNIT_USECS: delay *= NSEC_PER_USEC; break; case SPI_DELAY_UNIT_NSECS: /* Nothing to do here */ break; case SPI_DELAY_UNIT_SCK: /* Clock cycles need to be obtained from spi_transfer */ if (!xfer) return -EINVAL; /* * If there is unknown effective speed, approximate it * by underestimating with half of the requested Hz. */ hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; if (!hz) return -EINVAL; /* Convert delay to nanoseconds */ delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); break; default: return -EINVAL; } return delay; } EXPORT_SYMBOL_GPL(spi_delay_to_ns); int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) { int delay; might_sleep(); if (!_delay) return -EINVAL; delay = spi_delay_to_ns(_delay, xfer); if (delay < 0) return delay; _spi_transfer_delay_ns(delay); return 0; } EXPORT_SYMBOL_GPL(spi_delay_exec); static void _spi_transfer_cs_change_delay(struct spi_message *msg, struct spi_transfer *xfer) { u32 default_delay_ns = 10 * NSEC_PER_USEC; u32 delay = xfer->cs_change_delay.value; u32 unit = xfer->cs_change_delay.unit; int ret; /* Return early on "fast" mode - for everything but USECS */ if (!delay) { if (unit == SPI_DELAY_UNIT_USECS) _spi_transfer_delay_ns(default_delay_ns); return; } ret = spi_delay_exec(&xfer->cs_change_delay, xfer); if (ret) { dev_err_once(&msg->spi->dev, "Use of unsupported delay unit %i, using default of %luus\n", unit, default_delay_ns / NSEC_PER_USEC); _spi_transfer_delay_ns(default_delay_ns); } } void spi_transfer_cs_change_delay_exec(struct spi_message *msg, struct spi_transfer *xfer) { _spi_transfer_cs_change_delay(msg, xfer); } EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec); /* * spi_transfer_one_message - Default implementation of transfer_one_message() * * This is a standard implementation of transfer_one_message() for * drivers which implement a transfer_one() operation. It provides * standard handling of delays and chip select management. */ static int spi_transfer_one_message(struct spi_controller *ctlr, struct spi_message *msg) { struct spi_transfer *xfer; bool keep_cs = false; int ret = 0; struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); spi_set_cs(msg->spi, !xfer->cs_off, false); SPI_STATISTICS_INCREMENT_FIELD(statm, messages); SPI_STATISTICS_INCREMENT_FIELD(stats, messages); list_for_each_entry(xfer, &msg->transfers, transfer_list) { trace_spi_transfer_start(msg, xfer); spi_statistics_add_transfer_stats(statm, xfer, msg); spi_statistics_add_transfer_stats(stats, xfer, msg); if (!ctlr->ptp_sts_supported) { xfer->ptp_sts_word_pre = 0; ptp_read_system_prets(xfer->ptp_sts); } if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { reinit_completion(&ctlr->xfer_completion); fallback_pio: spi_dma_sync_for_device(ctlr, xfer); ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { spi_dma_sync_for_cpu(ctlr, xfer); if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && (xfer->error & SPI_TRANS_FAIL_NO_START)) { __spi_unmap_msg(ctlr, msg); ctlr->fallback = true; xfer->error &= ~SPI_TRANS_FAIL_NO_START; goto fallback_pio; } SPI_STATISTICS_INCREMENT_FIELD(statm, errors); SPI_STATISTICS_INCREMENT_FIELD(stats, errors); dev_err(&msg->spi->dev, "SPI transfer failed: %d\n", ret); goto out; } if (ret > 0) { ret = spi_transfer_wait(ctlr, msg, xfer); if (ret < 0) msg->status = ret; } spi_dma_sync_for_cpu(ctlr, xfer); } else { if (xfer->len) dev_err(&msg->spi->dev, "Bufferless transfer has length %u\n", xfer->len); } if (!ctlr->ptp_sts_supported) { ptp_read_system_postts(xfer->ptp_sts); xfer->ptp_sts_word_post = xfer->len; } trace_spi_transfer_stop(msg, xfer); if (msg->status != -EINPROGRESS) goto out; spi_transfer_delay_exec(xfer); if (xfer->cs_change) { if (list_is_last(&xfer->transfer_list, &msg->transfers)) { keep_cs = true; } else { if (!xfer->cs_off) spi_set_cs(msg->spi, false, false); _spi_transfer_cs_change_delay(msg, xfer); if (!list_next_entry(xfer, transfer_list)->cs_off) spi_set_cs(msg->spi, true, false); } } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { spi_set_cs(msg->spi, xfer->cs_off, false); } msg->actual_length += xfer->len; } out: if (ret != 0 || !keep_cs) spi_set_cs(msg->spi, false, false); if (msg->status == -EINPROGRESS) msg->status = ret; if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); spi_finalize_current_message(ctlr); return ret; } /** * spi_finalize_current_transfer - report completion of a transfer * @ctlr: the controller reporting completion * * Called by SPI drivers using the core transfer_one_message() * implementation to notify it that the current interrupt driven * transfer has finished and the next one may be scheduled. */ void spi_finalize_current_transfer(struct spi_controller *ctlr) { complete(&ctlr->xfer_completion); } EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); static void spi_idle_runtime_pm(struct spi_controller *ctlr) { if (ctlr->auto_runtime_pm) { pm_runtime_put_autosuspend(ctlr->dev.parent); } } static int __spi_pump_transfer_message(struct spi_controller *ctlr, struct spi_message *msg, bool was_busy) { struct spi_transfer *xfer; int ret; if (!was_busy && ctlr->auto_runtime_pm) { ret = pm_runtime_get_sync(ctlr->dev.parent); if (ret < 0) { pm_runtime_put_noidle(ctlr->dev.parent); dev_err(&ctlr->dev, "Failed to power device: %d\n", ret); msg->status = ret; spi_finalize_current_message(ctlr); return ret; } } if (!was_busy) trace_spi_controller_busy(ctlr); if (!was_busy && ctlr->prepare_transfer_hardware) { ret = ctlr->prepare_transfer_hardware(ctlr); if (ret) { dev_err(&ctlr->dev, "failed to prepare transfer hardware: %d\n", ret); if (ctlr->auto_runtime_pm) pm_runtime_put(ctlr->dev.parent); msg->status = ret; spi_finalize_current_message(ctlr); return ret; } } trace_spi_message_start(msg); if (ctlr->prepare_message) { ret = ctlr->prepare_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, "failed to prepare message: %d\n", ret); msg->status = ret; spi_finalize_current_message(ctlr); return ret; } msg->prepared = true; } ret = spi_map_msg(ctlr, msg); if (ret) { msg->status = ret; spi_finalize_current_message(ctlr); return ret; } if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { list_for_each_entry(xfer, &msg->transfers, transfer_list) { xfer->ptp_sts_word_pre = 0; ptp_read_system_prets(xfer->ptp_sts); } } /* * Drivers implementation of transfer_one_message() must arrange for * spi_finalize_current_message() to get called. Most drivers will do * this in the calling context, but some don't. For those cases, a * completion is used to guarantee that this function does not return * until spi_finalize_current_message() is done accessing * ctlr->cur_msg. * Use of the following two flags enable to opportunistically skip the * use of the completion since its use involves expensive spin locks. * In case of a race with the context that calls * spi_finalize_current_message() the completion will always be used, * due to strict ordering of these flags using barriers. */ WRITE_ONCE(ctlr->cur_msg_incomplete, true); WRITE_ONCE(ctlr->cur_msg_need_completion, false); reinit_completion(&ctlr->cur_msg_completion); smp_wmb(); /* Make these available to spi_finalize_current_message() */ ret = ctlr->transfer_one_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, "failed to transfer one message from queue\n"); return ret; } WRITE_ONCE(ctlr->cur_msg_need_completion, true); smp_mb(); /* See spi_finalize_current_message()... */ if (READ_ONCE(ctlr->cur_msg_incomplete)) wait_for_completion(&ctlr->cur_msg_completion); return 0; } /** * __spi_pump_messages - function which processes SPI message queue * @ctlr: controller to process queue for * @in_kthread: true if we are in the context of the message pump thread * * This function checks if there is any SPI message in the queue that * needs processing and if so call out to the driver to initialize hardware * and transfer each message. * * Note that it is called both from the kthread itself and also from * inside spi_sync(); the queue extraction handling at the top of the * function should deal with this safely. */ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) { struct spi_message *msg; bool was_busy = false; unsigned long flags; int ret; /* Take the I/O mutex */ mutex_lock(&ctlr->io_mutex); /* Lock queue */ spin_lock_irqsave(&ctlr->queue_lock, flags); /* Make sure we are not already running a message */ if (ctlr->cur_msg) goto out_unlock; /* Check if the queue is idle */ if (list_empty(&ctlr->queue) || !ctlr->running) { if (!ctlr->busy) goto out_unlock; /* Defer any non-atomic teardown to the thread */ if (!in_kthread) { if (!ctlr->dummy_rx && !ctlr->dummy_tx && !ctlr->unprepare_transfer_hardware) { spi_idle_runtime_pm(ctlr); ctlr->busy = false; ctlr->queue_empty = true; trace_spi_controller_idle(ctlr); } else { kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); } goto out_unlock; } ctlr->busy = false; spin_unlock_irqrestore(&ctlr->queue_lock, flags); kfree(ctlr->dummy_rx); ctlr->dummy_rx = NULL; kfree(ctlr->dummy_tx); ctlr->dummy_tx = NULL; if (ctlr->unprepare_transfer_hardware && ctlr->unprepare_transfer_hardware(ctlr)) dev_err(&ctlr->dev, "failed to unprepare transfer hardware\n"); spi_idle_runtime_pm(ctlr); trace_spi_controller_idle(ctlr); spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->queue_empty = true; goto out_unlock; } /* Extract head of queue */ msg = list_first_entry(&ctlr->queue, struct spi_message, queue); ctlr->cur_msg = msg; list_del_init(&msg->queue); if (ctlr->busy) was_busy = true; else ctlr->busy = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); ret = __spi_pump_transfer_message(ctlr, msg, was_busy); kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); ctlr->cur_msg = NULL; ctlr->fallback = false; mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ if (!ret) cond_resched(); return; out_unlock: spin_unlock_irqrestore(&ctlr->queue_lock, flags); mutex_unlock(&ctlr->io_mutex); } /** * spi_pump_messages - kthread work function which processes spi message queue * @work: pointer to kthread work struct contained in the controller struct */ static void spi_pump_messages(struct kthread_work *work) { struct spi_controller *ctlr = container_of(work, struct spi_controller, pump_messages); __spi_pump_messages(ctlr, true); } /** * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will disable IRQs and preemption for the duration of the * transfer, for less jitter in time measurement. Only compatible * with PIO drivers. If true, must follow up with * spi_take_timestamp_post or otherwise system will crash. * WARNING: for fully predictable results, the CPU frequency must * also be under control (governor). * * This is a helper for drivers to collect the beginning of the TX timestamp * for the requested byte from the SPI transfer. The frequency with which this * function must be called (once per word, once for the whole transfer, once * per batch of words etc) is arbitrary as long as the @tx buffer offset is * greater than or equal to the requested byte at the time of the call. The * timestamp is only taken once, at the first such call. It is assumed that * the driver advances its @tx buffer pointer monotonically. */ void spi_take_timestamp_pre(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off) { if (!xfer->ptp_sts) return; if (xfer->timestamped) return; if (progress > xfer->ptp_sts_word_pre) return; /* Capture the resolution of the timestamp */ xfer->ptp_sts_word_pre = progress; if (irqs_off) { local_irq_save(ctlr->irq_flags); preempt_disable(); } ptp_read_system_prets(xfer->ptp_sts); } EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); /** * spi_take_timestamp_post - helper to collect the end of the TX timestamp * @ctlr: Pointer to the spi_controller structure of the driver * @xfer: Pointer to the transfer being timestamped * @progress: How many words (not bytes) have been transferred so far * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. * * This is a helper for drivers to collect the end of the TX timestamp for * the requested byte from the SPI transfer. Can be called with an arbitrary * frequency: only the first call where @tx exceeds or is equal to the * requested word will be timestamped. */ void spi_take_timestamp_post(struct spi_controller *ctlr, struct spi_transfer *xfer, size_t progress, bool irqs_off) { if (!xfer->ptp_sts) return; if (xfer->timestamped) return; if (progress < xfer->ptp_sts_word_post) return; ptp_read_system_postts(xfer->ptp_sts); if (irqs_off) { local_irq_restore(ctlr->irq_flags); preempt_enable(); } /* Capture the resolution of the timestamp */ xfer->ptp_sts_word_post = progress; xfer->timestamped = 1; } EXPORT_SYMBOL_GPL(spi_take_timestamp_post); /** * spi_set_thread_rt - set the controller to pump at realtime priority * @ctlr: controller to boost priority of * * This can be called because the controller requested realtime priority * (by setting the ->rt value before calling spi_register_controller()) or * because a device on the bus said that its transfers needed realtime * priority. * * NOTE: at the moment if any device on a bus says it needs realtime then * the thread will be at realtime priority for all transfers on that * controller. If this eventually becomes a problem we may see if we can * find a way to boost the priority only temporarily during relevant * transfers. */ static void spi_set_thread_rt(struct spi_controller *ctlr) { dev_info(&ctlr->dev, "will run message pump with realtime priority\n"); sched_set_fifo(ctlr->kworker->task); } static int spi_init_queue(struct spi_controller *ctlr) { ctlr->running = false; ctlr->busy = false; ctlr->queue_empty = true; ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev)); if (IS_ERR(ctlr->kworker)) { dev_err(&ctlr->dev, "failed to create message pump kworker\n"); return PTR_ERR(ctlr->kworker); } kthread_init_work(&ctlr->pump_messages, spi_pump_messages); /* * Controller config will indicate if this controller should run the * message pump with high (realtime) priority to reduce the transfer * latency on the bus by minimising the delay between a transfer * request and the scheduling of the message pump thread. Without this * setting the message pump thread will remain at default priority. */ if (ctlr->rt) spi_set_thread_rt(ctlr); return 0; } /** * spi_get_next_queued_message() - called by driver to check for queued * messages * @ctlr: the controller to check for queued messages * * If there are more messages in the queue, the next message is returned from * this call. * * Return: the next message in the queue, else NULL if the queue is empty. */ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) { struct spi_message *next; unsigned long flags; /* Get a pointer to the next message, if any */ spin_lock_irqsave(&ctlr->queue_lock, flags); next = list_first_entry_or_null(&ctlr->queue, struct spi_message, queue); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return next; } EXPORT_SYMBOL_GPL(spi_get_next_queued_message); /* * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() * and spi_maybe_unoptimize_message() * @msg: the message to unoptimize * * Peripheral drivers should use spi_unoptimize_message() and callers inside * core should use spi_maybe_unoptimize_message() rather than calling this * function directly. * * It is not valid to call this on a message that is not currently optimized. */ static void __spi_unoptimize_message(struct spi_message *msg) { struct spi_controller *ctlr = msg->spi->controller; if (ctlr->unoptimize_message) ctlr->unoptimize_message(msg); spi_res_release(ctlr, msg); msg->optimized = false; msg->opt_state = NULL; } /* * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral * @msg: the message to unoptimize * * This function is used to unoptimize a message if and only if it was * optimized by the core (via spi_maybe_optimize_message()). */ static void spi_maybe_unoptimize_message(struct spi_message *msg) { if (!msg->pre_optimized && msg->optimized && !msg->spi->controller->defer_optimize_message) __spi_unoptimize_message(msg); } /** * spi_finalize_current_message() - the current message is complete * @ctlr: the controller to return the message to * * Called by the driver to notify the core that the message in the front of the * queue is complete and can be removed from the queue. */ void spi_finalize_current_message(struct spi_controller *ctlr) { struct spi_transfer *xfer; struct spi_message *mesg; int ret; mesg = ctlr->cur_msg; if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { list_for_each_entry(xfer, &mesg->transfers, transfer_list) { ptp_read_system_postts(xfer->ptp_sts); xfer->ptp_sts_word_post = xfer->len; } } if (unlikely(ctlr->ptp_sts_supported)) list_for_each_entry(xfer, &mesg->transfers, transfer_list) WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); spi_unmap_msg(ctlr, mesg); if (mesg->prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { dev_err(&ctlr->dev, "failed to unprepare message: %d\n", ret); } } mesg->prepared = false; spi_maybe_unoptimize_message(mesg); WRITE_ONCE(ctlr->cur_msg_incomplete, false); smp_mb(); /* See __spi_pump_transfer_message()... */ if (READ_ONCE(ctlr->cur_msg_need_completion)) complete(&ctlr->cur_msg_completion); trace_spi_message_done(mesg); mesg->state = NULL; if (mesg->complete) mesg->complete(mesg->context); } EXPORT_SYMBOL_GPL(spi_finalize_current_message); static int spi_start_queue(struct spi_controller *ctlr) { unsigned long flags; spin_lock_irqsave(&ctlr->queue_lock, flags); if (ctlr->running || ctlr->busy) { spin_unlock_irqrestore(&ctlr->queue_lock, flags); return -EBUSY; } ctlr->running = true; ctlr->cur_msg = NULL; spin_unlock_irqrestore(&ctlr->queue_lock, flags); kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); return 0; } static int spi_stop_queue(struct spi_controller *ctlr) { unsigned int limit = 500; unsigned long flags; /* * This is a bit lame, but is optimized for the common execution path. * A wait_queue on the ctlr->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead. */ do { spin_lock_irqsave(&ctlr->queue_lock, flags); if (list_empty(&ctlr->queue) && !ctlr->busy) { ctlr->running = false; spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0; } spin_unlock_irqrestore(&ctlr->queue_lock, flags); usleep_range(10000, 11000); } while (--limit); return -EBUSY; } static int spi_destroy_queue(struct spi_controller *ctlr) { int ret; ret = spi_stop_queue(ctlr); /* * kthread_flush_worker will block until all work is done. * If the reason that stop_queue timed out is that the work will never * finish, then it does no good to call flush/stop thread, so * return anyway. */ if (ret) { dev_err(&ctlr->dev, "problem destroying queue\n"); return ret; } kthread_destroy_worker(ctlr->kworker); return 0; } static int __spi_queued_transfer(struct spi_device *spi, struct spi_message *msg, bool need_pump) { struct spi_controller *ctlr = spi->controller; unsigned long flags; spin_lock_irqsave(&ctlr->queue_lock, flags); if (!ctlr->running) { spin_unlock_irqrestore(&ctlr->queue_lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &ctlr->queue); ctlr->queue_empty = false; if (!ctlr->busy && need_pump) kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0; } /** * spi_queued_transfer - transfer function for queued transfers * @spi: SPI device which is requesting transfer * @msg: SPI message which is to handled is queued to driver queue * * Return: zero on success, else a negative error code. */ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) { return __spi_queued_transfer(spi, msg, true); } static int spi_controller_initialize_queue(struct spi_controller *ctlr) { int ret; ctlr->transfer = spi_queued_transfer; if (!ctlr->transfer_one_message) ctlr->transfer_one_message = spi_transfer_one_message; /* Initialize and start queue */ ret = spi_init_queue(ctlr); if (ret) { dev_err(&ctlr->dev, "problem initializing queue\n"); goto err_init_queue; } ctlr->queued = true; ret = spi_start_queue(ctlr); if (ret) { dev_err(&ctlr->dev, "problem starting queue\n"); goto err_start_queue; } return 0; err_start_queue: spi_destroy_queue(ctlr); err_init_queue: return ret; } /** * spi_flush_queue - Send all pending messages in the queue from the callers' * context * @ctlr: controller to process queue for * * This should be used when one wants to ensure all pending messages have been * sent before doing something. Is used by the spi-mem code to make sure SPI * memory operations do not preempt regular SPI transfers that have been queued * before the spi-mem operation. */ void spi_flush_queue(struct spi_controller *ctlr) { if (ctlr->transfer == spi_queued_transfer) __spi_pump_messages(ctlr, false); } /*-------------------------------------------------------------------------*/ #if defined(CONFIG_OF) static void of_spi_parse_dt_cs_delay(struct device_node *nc, struct spi_delay *delay, const char *prop) { u32 value; if (!of_property_read_u32(nc, prop, &value)) { if (value > U16_MAX) { delay->value = DIV_ROUND_UP(value, 1000); delay->unit = SPI_DELAY_UNIT_USECS; } else { delay->value = value; delay->unit = SPI_DELAY_UNIT_NSECS; } } } static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, struct device_node *nc) { u32 value, cs[SPI_CS_CNT_MAX]; int rc, idx; /* Mode (clock phase/polarity/etc.) */ if (of_property_read_bool(nc, "spi-cpha")) spi->mode |= SPI_CPHA; if (of_property_read_bool(nc, "spi-cpol")) spi->mode |= SPI_CPOL; if (of_property_read_bool(nc, "spi-3wire")) spi->mode |= SPI_3WIRE; if (of_property_read_bool(nc, "spi-lsb-first")) spi->mode |= SPI_LSB_FIRST; if (of_property_read_bool(nc, "spi-cs-high")) spi->mode |= SPI_CS_HIGH; /* Device DUAL/QUAD mode */ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { switch (value) { case 0: spi->mode |= SPI_NO_TX; break; case 1: break; case 2: spi->mode |= SPI_TX_DUAL; break; case 4: spi->mode |= SPI_TX_QUAD; break; case 8: spi->mode |= SPI_TX_OCTAL; break; default: dev_warn(&ctlr->dev, "spi-tx-bus-width %d not supported\n", value); break; } } if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { switch (value) { case 0: spi->mode |= SPI_NO_RX; break; case 1: break; case 2: spi->mode |= SPI_RX_DUAL; break; case 4: spi->mode |= SPI_RX_QUAD; break; case 8: spi->mode |= SPI_RX_OCTAL; break; default: dev_warn(&ctlr->dev, "spi-rx-bus-width %d not supported\n", value); break; } } if (spi_controller_is_target(ctlr)) { if (!of_node_name_eq(nc, "slave")) { dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", nc); return -EINVAL; } return 0; } if (ctlr->num_chipselect > SPI_CS_CNT_MAX) { dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n"); return -EINVAL; } spi_set_all_cs_unused(spi); /* Device address */ rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1, SPI_CS_CNT_MAX); if (rc < 0) { dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", nc, rc); return rc; } if (rc > ctlr->num_chipselect) { dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n", nc, rc); return rc; } if ((of_property_present(nc, "parallel-memories")) && (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n"); return -EINVAL; } for (idx = 0; idx < rc; idx++) spi_set_chipselect(spi, idx, cs[idx]); /* * By default spi->chip_select[0] will hold the physical CS number, * so set bit 0 in spi->cs_index_mask. */ spi->cs_index_mask = BIT(0); /* Device speed */ if (!of_property_read_u32(nc, "spi-max-frequency", &value)) spi->max_speed_hz = value; /* Device CS delays */ of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns"); of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns"); of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns"); return 0; } static struct spi_device * of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) { struct spi_device *spi; int rc; /* Alloc an spi_device */ spi = spi_alloc_device(ctlr); if (!spi) { dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); rc = -ENOMEM; goto err_out; } /* Select device driver */ rc = of_alias_from_compatible(nc, spi->modalias, sizeof(spi->modalias)); if (rc < 0) { dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); goto err_out; } rc = of_spi_parse_dt(ctlr, spi, nc); if (rc) goto err_out; /* Store a pointer to the node in the device structure */ of_node_get(nc); device_set_node(&spi->dev, of_fwnode_handle(nc)); /* Register the new device */ rc = spi_add_device(spi); if (rc) { dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); goto err_of_node_put; } return spi; err_of_node_put: of_node_put(nc); err_out: spi_dev_put(spi); return ERR_PTR(rc); } /** * of_register_spi_devices() - Register child devices onto the SPI bus * @ctlr: Pointer to spi_controller device * * Registers an spi_device for each child node of controller node which * represents a valid SPI target device. */ static void of_register_spi_devices(struct spi_controller *ctlr) { struct spi_device *spi; struct device_node *nc; for_each_available_child_of_node(ctlr->dev.of_node, nc) { if (of_node_test_and_set_flag(nc, OF_POPULATED)) continue; spi = of_register_spi_device(ctlr, nc); if (IS_ERR(spi)) { dev_warn(&ctlr->dev, "Failed to create SPI device for %pOF\n", nc); of_node_clear_flag(nc, OF_POPULATED); } } } #else static void of_register_spi_devices(struct spi_controller *ctlr) { } #endif /** * spi_new_ancillary_device() - Register ancillary SPI device * @spi: Pointer to the main SPI device registering the ancillary device * @chip_select: Chip Select of the ancillary device * * Register an ancillary SPI device; for example some chips have a chip-select * for normal device usage and another one for setup/firmware upload. * * This may only be called from main SPI device's probe routine. * * Return: 0 on success; negative errno on failure */ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select) { struct spi_controller *ctlr = spi->controller; struct spi_device *ancillary; int rc; /* Alloc an spi_device */ ancillary = spi_alloc_device(ctlr); if (!ancillary) { rc = -ENOMEM; goto err_out; } strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); /* Use provided chip-select for ancillary device */ spi_set_all_cs_unused(ancillary); spi_set_chipselect(ancillary, 0, chip_select); /* Take over SPI mode/speed from SPI main device */ ancillary->max_speed_hz = spi->max_speed_hz; ancillary->mode = spi->mode; /* * By default spi->chip_select[0] will hold the physical CS number, * so set bit 0 in spi->cs_index_mask. */ ancillary->cs_index_mask = BIT(0); WARN_ON(!mutex_is_locked(&ctlr->add_lock)); /* Register the new device */ rc = __spi_add_device(ancillary); if (rc) { dev_err(&spi->dev, "failed to register ancillary device\n"); goto err_out; } return ancillary; err_out: spi_dev_put(ancillary); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(spi_new_ancillary_device); #ifdef CONFIG_ACPI struct acpi_spi_lookup { struct spi_controller *ctlr; u32 max_speed_hz; u32 mode; int irq; u8 bits_per_word; u8 chip_select; int n; int index; }; static int acpi_spi_count(struct acpi_resource *ares, void *data) { struct acpi_resource_spi_serialbus *sb; int *count = data; if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) return 1; sb = &ares->data.spi_serial_bus; if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) return 1; *count = *count + 1; return 1; } /** * acpi_spi_count_resources - Count the number of SpiSerialBus resources * @adev: ACPI device * * Return: the number of SpiSerialBus resources in the ACPI-device's * resource-list; or a negative error code. */ int acpi_spi_count_resources(struct acpi_device *adev) { LIST_HEAD(r); int count = 0; int ret; ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); if (ret < 0) return ret; acpi_dev_free_resource_list(&r); return count; } EXPORT_SYMBOL_GPL(acpi_spi_count_resources); static void acpi_spi_parse_apple_properties(struct acpi_device *dev, struct acpi_spi_lookup *lookup) { const union acpi_object *obj; if (!x86_apple_machine) return; if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) && obj->buffer.length >= 4) lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) && obj->buffer.length == 8) lookup->bits_per_word = *(u64 *)obj->buffer.pointer; if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) lookup->mode |= SPI_LSB_FIRST; if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) lookup->mode |= SPI_CPOL; if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) lookup->mode |= SPI_CPHA; } static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) { struct acpi_spi_lookup *lookup = data; struct spi_controller *ctlr = lookup->ctlr; if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { struct acpi_resource_spi_serialbus *sb; acpi_handle parent_handle; acpi_status status; sb = &ares->data.spi_serial_bus; if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { if (lookup->index != -1 && lookup->n++ != lookup->index) return 1; status = acpi_get_handle(NULL, sb->resource_source.string_ptr, &parent_handle); if (ACPI_FAILURE(status)) return -ENODEV; if (ctlr) { if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) return -ENODEV; } else { struct acpi_device *adev; adev = acpi_fetch_acpi_dev(parent_handle); if (!adev) return -ENODEV; ctlr = acpi_spi_find_controller_by_adev(adev); if (!ctlr) return -EPROBE_DEFER; lookup->ctlr = ctlr; } /* * ACPI DeviceSelection numbering is handled by the * host controller driver in Windows and can vary * from driver to driver. In Linux we always expect * 0 .. max - 1 so we need to ask the driver to * translate between the two schemes. */ if (ctlr->fw_translate_cs) { int cs = ctlr->fw_translate_cs(ctlr, sb->device_selection); if (cs < 0) return cs; lookup->chip_select = cs; } else { lookup->chip_select = sb->device_selection; } lookup->max_speed_hz = sb->connection_speed; lookup->bits_per_word = sb->data_bit_length; if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) lookup->mode |= SPI_CPHA; if (sb->clock_polarity == ACPI_SPI_START_HIGH) lookup->mode |= SPI_CPOL; if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) lookup->mode |= SPI_CS_HIGH; } } else if (lookup->irq < 0) { struct resource r; if (acpi_dev_resource_interrupt(ares, 0, &r)) lookup->irq = r.start; } /* Always tell the ACPI core to skip this resource */ return 1; } /** * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information * @ctlr: controller to which the spi device belongs * @adev: ACPI Device for the spi device * @index: Index of the spi resource inside the ACPI Node * * This should be used to allocate a new SPI device from and ACPI Device node. * The caller is responsible for calling spi_add_device to register the SPI device. * * If ctlr is set to NULL, the Controller for the SPI device will be looked up * using the resource. * If index is set to -1, index is not used. * Note: If index is -1, ctlr must be set. * * Return: a pointer to the new device, or ERR_PTR on error. */ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, struct acpi_device *adev, int index) { acpi_handle parent_handle = NULL; struct list_head resource_list; struct acpi_spi_lookup lookup = {}; struct spi_device *spi; int ret; if (!ctlr && index == -1) return ERR_PTR(-EINVAL); lookup.ctlr = ctlr; lookup.irq = -1; lookup.index = index; lookup.n = 0; INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, acpi_spi_add_resource, &lookup); acpi_dev_free_resource_list(&resource_list); if (ret < 0) /* Found SPI in _CRS but it points to another controller */ return ERR_PTR(ret); if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { /* Apple does not use _CRS but nested devices for SPI target devices */ acpi_spi_parse_apple_properties(adev, &lookup); } if (!lookup.max_speed_hz) return ERR_PTR(-ENODEV); spi = spi_alloc_device(lookup.ctlr); if (!spi) { dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", dev_name(&adev->dev)); return ERR_PTR(-ENOMEM); } spi_set_all_cs_unused(spi); spi_set_chipselect(spi, 0, lookup.chip_select); ACPI_COMPANION_SET(&spi->dev, adev); spi->max_speed_hz = lookup.max_speed_hz; spi->mode |= lookup.mode; spi->irq = lookup.irq; spi->bits_per_word = lookup.bits_per_word; /* * By default spi->chip_select[0] will hold the physical CS number, * so set bit 0 in spi->cs_index_mask. */ spi->cs_index_mask = BIT(0); return spi; } EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, struct acpi_device *adev) { struct spi_device *spi; if (acpi_bus_get_status(adev) || !adev->status.present || acpi_device_enumerated(adev)) return AE_OK; spi = acpi_spi_device_alloc(ctlr, adev, -1); if (IS_ERR(spi)) { if (PTR_ERR(spi) == -ENOMEM) return AE_NO_MEMORY; else return AE_OK; } acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, sizeof(spi->modalias)); acpi_device_set_enumerated(adev); adev->power.flags.ignore_parent = true; if (spi_add_device(spi)) { adev->power.flags.ignore_parent = false; dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", dev_name(&adev->dev)); spi_dev_put(spi); } return AE_OK; } static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { struct acpi_device *adev = acpi_fetch_acpi_dev(handle); struct spi_controller *ctlr = data; if (!adev) return AE_OK; return acpi_register_spi_device(ctlr, adev); } #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 static void acpi_register_spi_devices(struct spi_controller *ctlr) { acpi_status status; acpi_handle handle; handle = ACPI_HANDLE(ctlr->dev.parent); if (!handle) return; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, SPI_ACPI_ENUMERATE_MAX_DEPTH, acpi_spi_add_device, NULL, ctlr, NULL); if (ACPI_FAILURE(status)) dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n"); } #else static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} #endif /* CONFIG_ACPI */ static void spi_controller_release(struct device *dev) { struct spi_controller *ctlr; ctlr = container_of(dev, struct spi_controller, dev); kfree(ctlr); } static const struct class spi_controller_class = { .name = "spi_master", .dev_release = spi_controller_release, .dev_groups = spi_controller_groups, }; #ifdef CONFIG_SPI_SLAVE /** * spi_target_abort - abort the ongoing transfer request on an SPI target controller * @spi: device used for the current transfer */ int spi_target_abort(struct spi_device *spi) { struct spi_controller *ctlr = spi->controller; if (spi_controller_is_target(ctlr) && ctlr->target_abort) return ctlr->target_abort(ctlr); return -ENOTSUPP; } EXPORT_SYMBOL_GPL(spi_target_abort); static ssize_t slave_show(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_controller *ctlr = container_of(dev, struct spi_controller, dev); struct device *child; int ret; child = device_find_any_child(&ctlr->dev); ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); put_device(child); return ret; } static ssize_t slave_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct spi_controller *ctlr = container_of(dev, struct spi_controller, dev); struct spi_device *spi; struct device *child; char name[32]; int rc; rc = sscanf(buf, "%31s", name); if (rc != 1 || !name[0]) return -EINVAL; child = device_find_any_child(&ctlr->dev); if (child) { /* Remove registered target device */ device_unregister(child); put_device(child); } if (strcmp(name, "(null)")) { /* Register new target device */ spi = spi_alloc_device(ctlr); if (!spi) return -ENOMEM; strscpy(spi->modalias, name, sizeof(spi->modalias)); rc = spi_add_device(spi); if (rc) { spi_dev_put(spi); return rc; } } return count; } static DEVICE_ATTR_RW(slave); static struct attribute *spi_target_attrs[] = { &dev_attr_slave.attr, NULL, }; static const struct attribute_group spi_target_group = { .attrs = spi_target_attrs, }; static const struct attribute_group *spi_target_groups[] = { &spi_controller_statistics_group, &spi_target_group, NULL, }; static const struct class spi_target_class = { .name = "spi_slave", .dev_release = spi_controller_release, .dev_groups = spi_target_groups, }; #else extern struct class spi_target_class; /* dummy */ #endif /** * __spi_alloc_controller - allocate an SPI host or target controller * @dev: the controller, possibly using the platform_bus * @size: how much zeroed driver-private data to allocate; the pointer to this * memory is in the driver_data field of the returned device, accessible * with spi_controller_get_devdata(); the memory is cacheline aligned; * drivers granting DMA access to portions of their private data need to * round up @size using ALIGN(size, dma_get_cache_alignment()). * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true) * controller * Context: can sleep * * This call is used only by SPI controller drivers, which are the * only ones directly touching chip registers. It's how they allocate * an spi_controller structure, prior to calling spi_register_controller(). * * This must be called from context that can sleep. * * The caller is responsible for assigning the bus number and initializing the * controller's methods before calling spi_register_controller(); and (after * errors adding the device) calling spi_controller_put() to prevent a memory * leak. * * Return: the SPI controller structure on success, else NULL. */ struct spi_controller *__spi_alloc_controller(struct device *dev, unsigned int size, bool target) { struct spi_controller *ctlr; size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); if (!dev) return NULL; ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); if (!ctlr) return NULL; device_initialize(&ctlr->dev); INIT_LIST_HEAD(&ctlr->queue); spin_lock_init(&ctlr->queue_lock); spin_lock_init(&ctlr->bus_lock_spinlock); mutex_init(&ctlr->bus_lock_mutex); mutex_init(&ctlr->io_mutex); mutex_init(&ctlr->add_lock); ctlr->bus_num = -1; ctlr->num_chipselect = 1; ctlr->target = target; if (IS_ENABLED(CONFIG_SPI_SLAVE) && target) ctlr->dev.class = &spi_target_class; else ctlr->dev.class = &spi_controller_class; ctlr->dev.parent = dev; pm_suspend_ignore_children(&ctlr->dev, true); spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); return ctlr; } EXPORT_SYMBOL_GPL(__spi_alloc_controller); static void devm_spi_release_controller(struct device *dev, void *ctlr) { spi_controller_put(*(struct spi_controller **)ctlr); } /** * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() * @dev: physical device of SPI controller * @size: how much zeroed driver-private data to allocate * @target: whether to allocate an SPI host (false) or SPI target (true) controller * Context: can sleep * * Allocate an SPI controller and automatically release a reference on it * when @dev is unbound from its driver. Drivers are thus relieved from * having to call spi_controller_put(). * * The arguments to this function are identical to __spi_alloc_controller(). * * Return: the SPI controller structure on success, else NULL. */ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, bool target) { struct spi_controller **ptr, *ctlr; ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; ctlr = __spi_alloc_controller(dev, size, target); if (ctlr) { ctlr->devm_allocated = true; *ptr = ctlr; devres_add(dev, ptr); } else { devres_free(ptr); } return ctlr; } EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); /** * spi_get_gpio_descs() - grab chip select GPIOs for the controller * @ctlr: The SPI controller to grab GPIO descriptors for */ static int spi_get_gpio_descs(struct spi_controller *ctlr) { int nb, i; struct gpio_desc **cs; struct device *dev = &ctlr->dev; unsigned long native_cs_mask = 0; unsigned int num_cs_gpios = 0; nb = gpiod_count(dev, "cs"); if (nb < 0) { /* No GPIOs at all is fine, else return the error */ if (nb == -ENOENT) return 0; return nb; } ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), GFP_KERNEL); if (!cs) return -ENOMEM; ctlr->cs_gpiods = cs; for (i = 0; i < nb; i++) { /* * Most chipselects are active low, the inverted * semantics are handled by special quirks in gpiolib, * so initializing them GPIOD_OUT_LOW here means * "unasserted", in most cases this will drive the physical * line high. */ cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, GPIOD_OUT_LOW); if (IS_ERR(cs[i])) return PTR_ERR(cs[i]); if (cs[i]) { /* * If we find a CS GPIO, name it after the device and * chip select line. */ char *gpioname; gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", dev_name(dev), i); if (!gpioname) return -ENOMEM; gpiod_set_consumer_name(cs[i], gpioname); num_cs_gpios++; continue; } if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { dev_err(dev, "Invalid native chip select %d\n", i); return -EINVAL; } native_cs_mask |= BIT(i); } ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios && ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { dev_err(dev, "No unused native chip select available\n"); return -EINVAL; } return 0; } static int spi_controller_check_ops(struct spi_controller *ctlr) { /* * The controller may implement only the high-level SPI-memory like * operations if it does not support regular SPI transfers, and this is * valid use case. * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least * one of the ->transfer_xxx() method be implemented. */ if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { if (!ctlr->transfer && !ctlr->transfer_one && !ctlr->transfer_one_message) { return -EINVAL; } } return 0; } /* Allocate dynamic bus number using Linux idr */ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end) { int id; mutex_lock(&board_lock); id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL); mutex_unlock(&board_lock); if (WARN(id < 0, "couldn't get idr")) return id == -ENOSPC ? -EBUSY : id; ctlr->bus_num = id; return 0; } /** * spi_register_controller - register SPI host or target controller * @ctlr: initialized controller, originally from spi_alloc_host() or * spi_alloc_target() * Context: can sleep * * SPI controllers connect to their drivers using some non-SPI bus, * such as the platform bus. The final stage of probe() in that code * includes calling spi_register_controller() to hook up to this SPI bus glue. * * SPI controllers use board specific (often SOC specific) bus numbers, * and board-specific addressing for SPI devices combines those numbers * with chip select numbers. Since SPI does not directly support dynamic * device identification, boards need configuration tables telling which * chip is at which address. * * This must be called from context that can sleep. It returns zero on * success, else a negative error code (dropping the controller's refcount). * After a successful return, the caller is responsible for calling * spi_unregister_controller(). * * Return: zero on success, else a negative error code. */ int spi_register_controller(struct spi_controller *ctlr) { struct device *dev = ctlr->dev.parent; struct boardinfo *bi; int first_dynamic; int status; int idx; if (!dev) return -ENODEV; /* * Make sure all necessary hooks are implemented before registering * the SPI controller. */ status = spi_controller_check_ops(ctlr); if (status) return status; if (ctlr->bus_num < 0) ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi"); if (ctlr->bus_num >= 0) { /* Devices with a fixed bus num must check-in with the num */ status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1); if (status) return status; } if (ctlr->bus_num < 0) { first_dynamic = of_alias_get_highest_id("spi"); if (first_dynamic < 0) first_dynamic = 0; else first_dynamic++; status = spi_controller_id_alloc(ctlr, first_dynamic, 0); if (status) return status; } ctlr->bus_lock_flag = 0; init_completion(&ctlr->xfer_completion); init_completion(&ctlr->cur_msg_completion); if (!ctlr->max_dma_len) ctlr->max_dma_len = INT_MAX; /* * Register the device, then userspace will see it. * Registration fails if the bus ID is in use. */ dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) { status = spi_get_gpio_descs(ctlr); if (status) goto free_bus_id; /* * A controller using GPIO descriptors always * supports SPI_CS_HIGH if need be. */ ctlr->mode_bits |= SPI_CS_HIGH; } /* * Even if it's just one always-selected device, there must * be at least one chipselect. */ if (!ctlr->num_chipselect) { status = -EINVAL; goto free_bus_id; } /* Setting last_cs to SPI_INVALID_CS means no chip selected */ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) ctlr->last_cs[idx] = SPI_INVALID_CS; status = device_add(&ctlr->dev); if (status < 0) goto free_bus_id; dev_dbg(dev, "registered %s %s\n", spi_controller_is_target(ctlr) ? "target" : "host", dev_name(&ctlr->dev)); /* * If we're using a queued driver, start the queue. Note that we don't * need the queueing logic if the driver is only supporting high-level * memory operations. */ if (ctlr->transfer) { dev_info(dev, "controller is unqueued, this is deprecated\n"); } else if (ctlr->transfer_one || ctlr->transfer_one_message) { status = spi_controller_initialize_queue(ctlr); if (status) { device_del(&ctlr->dev); goto free_bus_id; } } /* Add statistics */ ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); if (!ctlr->pcpu_statistics) { dev_err(dev, "Error allocating per-cpu statistics\n"); status = -ENOMEM; goto destroy_queue; } mutex_lock(&board_lock); list_add_tail(&ctlr->list, &spi_controller_list); list_for_each_entry(bi, &board_list, list) spi_match_controller_to_boardinfo(ctlr, &bi->board_info); mutex_unlock(&board_lock); /* Register devices from the device tree and ACPI */ of_register_spi_devices(ctlr); acpi_register_spi_devices(ctlr); return status; destroy_queue: spi_destroy_queue(ctlr); free_bus_id: mutex_lock(&board_lock); idr_remove(&spi_controller_idr, ctlr->bus_num); mutex_unlock(&board_lock); return status; } EXPORT_SYMBOL_GPL(spi_register_controller); static void devm_spi_unregister(struct device *dev, void *res) { spi_unregister_controller(*(struct spi_controller **)res); } /** * devm_spi_register_controller - register managed SPI host or target controller * @dev: device managing SPI controller * @ctlr: initialized controller, originally from spi_alloc_host() or * spi_alloc_target() * Context: can sleep * * Register a SPI device as with spi_register_controller() which will * automatically be unregistered and freed. * * Return: zero on success, else a negative error code. */ int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr) { struct spi_controller **ptr; int ret; ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; ret = spi_register_controller(ctlr); if (!ret) { *ptr = ctlr; devres_add(dev, ptr); } else { devres_free(ptr); } return ret; } EXPORT_SYMBOL_GPL(devm_spi_register_controller); static int __unregister(struct device *dev, void *null) { spi_unregister_device(to_spi_device(dev)); return 0; } /** * spi_unregister_controller - unregister SPI host or target controller * @ctlr: the controller being unregistered * Context: can sleep * * This call is used only by SPI controller drivers, which are the * only ones directly touching chip registers. * * This must be called from context that can sleep. * * Note that this function also drops a reference to the controller. */ void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; int id = ctlr->bus_num; /* Prevent addition of new devices, unregister existing ones */ if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) mutex_lock(&ctlr->add_lock); device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ mutex_lock(&board_lock); found = idr_find(&spi_controller_idr, id); mutex_unlock(&board_lock); if (ctlr->queued) { if (spi_destroy_queue(ctlr)) dev_err(&ctlr->dev, "queue remove failed\n"); } mutex_lock(&board_lock); list_del(&ctlr->list); mutex_unlock(&board_lock); device_del(&ctlr->dev); /* Free bus id */ mutex_lock(&board_lock); if (found == ctlr) idr_remove(&spi_controller_idr, id); mutex_unlock(&board_lock); if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) mutex_unlock(&ctlr->add_lock); /* * Release the last reference on the controller if its driver * has not yet been converted to devm_spi_alloc_host/target(). */ if (!ctlr->devm_allocated) put_device(&ctlr->dev); } EXPORT_SYMBOL_GPL(spi_unregister_controller); static inline int __spi_check_suspended(const struct spi_controller *ctlr) { return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0; } static inline void __spi_mark_suspended(struct spi_controller *ctlr) { mutex_lock(&ctlr->bus_lock_mutex); ctlr->flags |= SPI_CONTROLLER_SUSPENDED; mutex_unlock(&ctlr->bus_lock_mutex); } static inline void __spi_mark_resumed(struct spi_controller *ctlr) { mutex_lock(&ctlr->bus_lock_mutex); ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED; mutex_unlock(&ctlr->bus_lock_mutex); } int spi_controller_suspend(struct spi_controller *ctlr) { int ret = 0; /* Basically no-ops for non-queued controllers */ if (ctlr->queued) { ret = spi_stop_queue(ctlr); if (ret) dev_err(&ctlr->dev, "queue stop failed\n"); } __spi_mark_suspended(ctlr); return ret; } EXPORT_SYMBOL_GPL(spi_controller_suspend); int spi_controller_resume(struct spi_controller *ctlr) { int ret = 0; __spi_mark_resumed(ctlr); if (ctlr->queued) { ret = spi_start_queue(ctlr); if (ret) dev_err(&ctlr->dev, "queue restart failed\n"); } return ret; } EXPORT_SYMBOL_GPL(spi_controller_resume); /*-------------------------------------------------------------------------*/ /* Core methods for spi_message alterations */ static void __spi_replace_transfers_release(struct spi_controller *ctlr, struct spi_message *msg, void *res) { struct spi_replaced_transfers *rxfer = res; size_t i; /* Call extra callback if requested */ if (rxfer->release) rxfer->release(ctlr, msg, res); /* Insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); /* Remove the formerly inserted entries */ for (i = 0; i < rxfer->inserted; i++) list_del(&rxfer->inserted_transfers[i].transfer_list); } /** * spi_replace_transfers - replace transfers with several transfers * and register change with spi_message.resources * @msg: the spi_message we work upon * @xfer_first: the first spi_transfer we want to replace * @remove: number of transfers to remove * @insert: the number of transfers we want to insert instead * @release: extra release code necessary in some circumstances * @extradatasize: extra data to allocate (with alignment guarantees * of struct @spi_transfer) * @gfp: gfp flags * * Returns: pointer to @spi_replaced_transfers, * PTR_ERR(...) in case of errors. */ static struct spi_replaced_transfers *spi_replace_transfers( struct spi_message *msg, struct spi_transfer *xfer_first, size_t remove, size_t insert, spi_replaced_release_t release, size_t extradatasize, gfp_t gfp) { struct spi_replaced_transfers *rxfer; struct spi_transfer *xfer; size_t i; /* Allocate the structure using spi_res */ rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, struct_size(rxfer, inserted_transfers, insert) + extradatasize, gfp); if (!rxfer) return ERR_PTR(-ENOMEM); /* The release code to invoke before running the generic release */ rxfer->release = release; /* Assign extradata */ if (extradatasize) rxfer->extradata = &rxfer->inserted_transfers[insert]; /* Init the replaced_transfers list */ INIT_LIST_HEAD(&rxfer->replaced_transfers); /* * Assign the list_entry after which we should reinsert * the @replaced_transfers - it may be spi_message.messages! */ rxfer->replaced_after = xfer_first->transfer_list.prev; /* Remove the requested number of transfers */ for (i = 0; i < remove; i++) { /* * If the entry after replaced_after it is msg->transfers * then we have been requested to remove more transfers * than are in the list. */ if (rxfer->replaced_after->next == &msg->transfers) { dev_err(&msg->spi->dev, "requested to remove more spi_transfers than are available\n"); /* Insert replaced transfers back into the message */ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); /* Free the spi_replace_transfer structure... */ spi_res_free(rxfer); /* ...and return with an error */ return ERR_PTR(-EINVAL); } /* * Remove the entry after replaced_after from list of * transfers and add it to list of replaced_transfers. */ list_move_tail(rxfer->replaced_after->next, &rxfer->replaced_transfers); } /* * Create copy of the given xfer with identical settings * based on the first transfer to get removed. */ for (i = 0; i < insert; i++) { /* We need to run in reverse order */ xfer = &rxfer->inserted_transfers[insert - 1 - i]; /* Copy all spi_transfer data */ memcpy(xfer, xfer_first, sizeof(*xfer)); /* Add to list */ list_add(&xfer->transfer_list, rxfer->replaced_after); /* Clear cs_change and delay for all but the last */ if (i) { xfer->cs_change = false; xfer->delay.value = 0; } } /* Set up inserted... */ rxfer->inserted = insert; /* ...and register it with spi_res/spi_message */ spi_res_add(msg, rxfer); return rxfer; } static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer **xferp, size_t maxsize) { struct spi_transfer *xfer = *xferp, *xfers; struct spi_replaced_transfers *srt; size_t offset; size_t count, i; /* Calculate how many we have to replace */ count = DIV_ROUND_UP(xfer->len, maxsize); /* Create replacement */ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL); if (IS_ERR(srt)) return PTR_ERR(srt); xfers = srt->inserted_transfers; /* * Now handle each of those newly inserted spi_transfers. * Note that the replacements spi_transfers all are preset * to the same values as *xferp, so tx_buf, rx_buf and len * are all identical (as well as most others) * so we just have to fix up len and the pointers. */ /* * The first transfer just needs the length modified, so we * run it outside the loop. */ xfers[0].len = min_t(size_t, maxsize, xfer[0].len); /* All the others need rx_buf/tx_buf also set */ for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { /* Update rx_buf, tx_buf and DMA */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; if (xfers[i].tx_buf) xfers[i].tx_buf += offset; /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); } /* * We set up xferp to the last entry we have inserted, * so that we skip those already split transfers. */ *xferp = &xfers[count - 1]; /* Increment statistics counters */ SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, transfers_split_maxsize); SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, transfers_split_maxsize); return 0; } /** * spi_split_transfers_maxsize - split spi transfers into multiple transfers * when an individual transfer exceeds a * certain size * @ctlr: the @spi_controller for this transfer * @msg: the @spi_message to transform * @maxsize: the maximum when to apply this * * This function allocates resources that are automatically freed during the * spi message unoptimize phase so this function should only be called from * optimize_message callbacks. * * Return: status of transformation */ int spi_split_transfers_maxsize(struct spi_controller *ctlr, struct spi_message *msg, size_t maxsize) { struct spi_transfer *xfer; int ret; /* * Iterate over the transfer_list, * but note that xfer is advanced to the last transfer inserted * to avoid checking sizes again unnecessarily (also xfer does * potentially belong to a different list by the time the * replacement has happened). */ list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (xfer->len > maxsize) { ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, maxsize); if (ret) return ret; } } return 0; } EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); /** * spi_split_transfers_maxwords - split SPI transfers into multiple transfers * when an individual transfer exceeds a * certain number of SPI words * @ctlr: the @spi_controller for this transfer * @msg: the @spi_message to transform * @maxwords: the number of words to limit each transfer to * * This function allocates resources that are automatically freed during the * spi message unoptimize phase so this function should only be called from * optimize_message callbacks. * * Return: status of transformation */ int spi_split_transfers_maxwords(struct spi_controller *ctlr, struct spi_message *msg, size_t maxwords) { struct spi_transfer *xfer; /* * Iterate over the transfer_list, * but note that xfer is advanced to the last transfer inserted * to avoid checking sizes again unnecessarily (also xfer does * potentially belong to a different list by the time the * replacement has happened). */ list_for_each_entry(xfer, &msg->transfers, transfer_list) { size_t maxsize; int ret; maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word); if (xfer->len > maxsize) { ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, maxsize); if (ret) return ret; } } return 0; } EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords); /*-------------------------------------------------------------------------*/ /* * Core methods for SPI controller protocol drivers. Some of the * other core methods are currently defined as inline functions. */ static int __spi_validate_bits_per_word(struct spi_controller *ctlr, u8 bits_per_word) { if (ctlr->bits_per_word_mask) { /* Only 32 bits fit in the mask */ if (bits_per_word > 32) return -EINVAL; if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) return -EINVAL; } return 0; } /** * spi_set_cs_timing - configure CS setup, hold, and inactive delays * @spi: the device that requires specific CS timing configuration * * Return: zero on success, else a negative error code. */ static int spi_set_cs_timing(struct spi_device *spi) { struct device *parent = spi->controller->dev.parent; int status = 0; if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) { if (spi->controller->auto_runtime_pm) { status = pm_runtime_get_sync(parent); if (status < 0) { pm_runtime_put_noidle(parent); dev_err(&spi->controller->dev, "Failed to power device: %d\n", status); return status; } status = spi->controller->set_cs_timing(spi); pm_runtime_put_autosuspend(parent); } else { status = spi->controller->set_cs_timing(spi); } } return status; } /** * spi_setup - setup SPI mode and clock rate * @spi: the device whose settings are being modified * Context: can sleep, and no requests are queued to the device * * SPI protocol drivers may need to update the transfer mode if the * device doesn't work with its default. They may likewise need * to update clock rates or word sizes from initial values. This function * changes those settings, and must be called from a context that can sleep. * Except for SPI_CS_HIGH, which takes effect immediately, the changes take * effect the next time the device is selected and data is transferred to * or from it. When this function returns, the SPI device is deselected. * * Note that this call will fail if the protocol driver specifies an option * that the underlying controller or its driver does not support. For * example, not all hardware supports wire transfers using nine bit words, * LSB-first wire encoding, or active-high chipselects. * * Return: zero on success, else a negative error code. */ int spi_setup(struct spi_device *spi) { unsigned bad_bits, ugly_bits; int status; /* * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO * are set at the same time. */ if ((hweight_long(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || (hweight_long(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { dev_err(&spi->dev, "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); return -EINVAL; } /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ if ((spi->mode & SPI_3WIRE) && (spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; /* Check against conflicting MOSI idle configuration */ if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) { dev_err(&spi->dev, "setup: MOSI configured to idle low and high at the same time.\n"); return -EINVAL; } /* * Help drivers fail *cleanly* when they need options * that aren't supported with their current controller. * SPI_CS_WORD has a fallback software implementation, * so it is ignored here. */ bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | SPI_NO_TX | SPI_NO_RX); ugly_bits = bad_bits & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); if (ugly_bits) { dev_warn(&spi->dev, "setup: ignoring unsupported mode bits %x\n", ugly_bits); spi->mode &= ~ugly_bits; bad_bits &= ~ugly_bits; } if (bad_bits) { dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); return -EINVAL; } if (!spi->bits_per_word) { spi->bits_per_word = 8; } else { /* * Some controllers may not support the default 8 bits-per-word * so only perform the check when this is explicitly provided. */ status = __spi_validate_bits_per_word(spi->controller, spi->bits_per_word); if (status) return status; } if (spi->controller->max_speed_hz && (!spi->max_speed_hz || spi->max_speed_hz > spi->controller->max_speed_hz)) spi->max_speed_hz = spi->controller->max_speed_hz; mutex_lock(&spi->controller->io_mutex); if (spi->controller->setup) { status = spi->controller->setup(spi); if (status) { mutex_unlock(&spi->controller->io_mutex); dev_err(&spi->controller->dev, "Failed to setup device: %d\n", status); return status; } } status = spi_set_cs_timing(spi); if (status) { mutex_unlock(&spi->controller->io_mutex); return status; } if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { status = pm_runtime_resume_and_get(spi->controller->dev.parent); if (status < 0) { mutex_unlock(&spi->controller->io_mutex); dev_err(&spi->controller->dev, "Failed to power device: %d\n", status); return status; } /* * We do not want to return positive value from pm_runtime_get, * there are many instances of devices calling spi_setup() and * checking for a non-zero return value instead of a negative * return value. */ status = 0; spi_set_cs(spi, false, true); pm_runtime_put_autosuspend(spi->controller->dev.parent); } else { spi_set_cs(spi, false, true); } mutex_unlock(&spi->controller->io_mutex); if (spi->rt && !spi->controller->rt) { spi->controller->rt = true; spi_set_thread_rt(spi->controller); } trace_spi_setup(spi, status); dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", spi->mode & SPI_MODE_X_MASK, (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", (spi->mode & SPI_3WIRE) ? "3wire, " : "", (spi->mode & SPI_LOOP) ? "loopback, " : "", spi->bits_per_word, spi->max_speed_hz, status); return status; } EXPORT_SYMBOL_GPL(spi_setup); static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, struct spi_device *spi) { int delay1, delay2; delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); if (delay1 < 0) return delay1; delay2 = spi_delay_to_ns(&spi->word_delay, xfer); if (delay2 < 0) return delay2; if (delay1 < delay2) memcpy(&xfer->word_delay, &spi->word_delay, sizeof(xfer->word_delay)); return 0; } static int __spi_validate(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; struct spi_transfer *xfer; int w_size; if (list_empty(&message->transfers)) return -EINVAL; message->spi = spi; /* * Half-duplex links include original MicroWire, and ones with * only one data pin like SPI_3WIRE (switches direction) or where * either MOSI or MISO is missing. They can also be caused by * software limitations. */ if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || (spi->mode & SPI_3WIRE)) { unsigned flags = ctlr->flags; list_for_each_entry(xfer, &message->transfers, transfer_list) { if (xfer->rx_buf && xfer->tx_buf) return -EINVAL; if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) return -EINVAL; if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) return -EINVAL; } } /* * Set transfer bits_per_word and max speed as spi device default if * it is not set for this transfer. * Set transfer tx_nbits and rx_nbits as single transfer default * (SPI_NBITS_SINGLE) if it is not set for this transfer. * Ensure transfer word_delay is at least as long as that required by * device itself. */ message->frame_length = 0; list_for_each_entry(xfer, &message->transfers, transfer_list) { xfer->effective_speed_hz = 0; message->frame_length += xfer->len; if (!xfer->bits_per_word) xfer->bits_per_word = spi->bits_per_word; if (!xfer->speed_hz) xfer->speed_hz = spi->max_speed_hz; if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) xfer->speed_hz = ctlr->max_speed_hz; if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) return -EINVAL; /* DDR mode is supported only if controller has dtr_caps=true. * default considered as SDR mode for SPI and QSPI controller. * Note: This is applicable only to QSPI controller. */ if (xfer->dtr_mode && !ctlr->dtr_caps) return -EINVAL; /* * SPI transfer length should be multiple of SPI word size * where SPI word size should be power-of-two multiple. */ if (xfer->bits_per_word <= 8) w_size = 1; else if (xfer->bits_per_word <= 16) w_size = 2; else w_size = 4; /* No partial transfers accepted */ if (xfer->len % w_size) return -EINVAL; if (xfer->speed_hz && ctlr->min_speed_hz && xfer->speed_hz < ctlr->min_speed_hz) return -EINVAL; if (xfer->tx_buf && !xfer->tx_nbits) xfer->tx_nbits = SPI_NBITS_SINGLE; if (xfer->rx_buf && !xfer->rx_nbits) xfer->rx_nbits = SPI_NBITS_SINGLE; /* * Check transfer tx/rx_nbits: * 1. check the value matches one of single, dual and quad * 2. check tx/rx_nbits match the mode in spi_device */ if (xfer->tx_buf) { if (spi->mode & SPI_NO_TX) return -EINVAL; if (xfer->tx_nbits != SPI_NBITS_SINGLE && xfer->tx_nbits != SPI_NBITS_DUAL && xfer->tx_nbits != SPI_NBITS_QUAD && xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_QUAD) && !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_OCTAL) && !(spi->mode & SPI_TX_OCTAL)) return -EINVAL; } /* Check transfer rx_nbits */ if (xfer->rx_buf) { if (spi->mode & SPI_NO_RX) return -EINVAL; if (xfer->rx_nbits != SPI_NBITS_SINGLE && xfer->rx_nbits != SPI_NBITS_DUAL && xfer->rx_nbits != SPI_NBITS_QUAD && xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_QUAD) && !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_OCTAL) && !(spi->mode & SPI_RX_OCTAL)) return -EINVAL; } if (_spi_xfer_word_delay_update(xfer, spi)) return -EINVAL; /* Make sure controller supports required offload features. */ if (xfer->offload_flags) { if (!message->offload) return -EINVAL; if (xfer->offload_flags & ~message->offload->xfer_flags) return -EINVAL; } } message->status = -EINPROGRESS; return 0; } /* * spi_split_transfers - generic handling of transfer splitting * @msg: the message to split * * Under certain conditions, a SPI controller may not support arbitrary * transfer sizes or other features required by a peripheral. This function * will split the transfers in the message into smaller transfers that are * supported by the controller. * * Controllers with special requirements not covered here can also split * transfers in the optimize_message() callback. * * Context: can sleep * Return: zero on success, else a negative error code */ static int spi_split_transfers(struct spi_message *msg) { struct spi_controller *ctlr = msg->spi->controller; struct spi_transfer *xfer; int ret; /* * If an SPI controller does not support toggling the CS line on each * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO * for the CS line, we can emulate the CS-per-word hardware function by * splitting transfers into one-word transfers and ensuring that * cs_change is set for each transfer. */ if ((msg->spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) { ret = spi_split_transfers_maxwords(ctlr, msg, 1); if (ret) return ret; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* Don't change cs_change on the last entry in the list */ if (list_is_last(&xfer->transfer_list, &msg->transfers)) break; xfer->cs_change = 1; } } else { ret = spi_split_transfers_maxsize(ctlr, msg, spi_max_transfer_size(msg->spi)); if (ret) return ret; } return 0; } /* * __spi_optimize_message - shared implementation for spi_optimize_message() * and spi_maybe_optimize_message() * @spi: the device that will be used for the message * @msg: the message to optimize * * Peripheral drivers will call spi_optimize_message() and the spi core will * call spi_maybe_optimize_message() instead of calling this directly. * * It is not valid to call this on a message that has already been optimized. * * Return: zero on success, else a negative error code */ static int __spi_optimize_message(struct spi_device *spi, struct spi_message *msg) { struct spi_controller *ctlr = spi->controller; int ret; ret = __spi_validate(spi, msg); if (ret) return ret; ret = spi_split_transfers(msg); if (ret) return ret; if (ctlr->optimize_message) { ret = ctlr->optimize_message(msg); if (ret) { spi_res_release(ctlr, msg); return ret; } } msg->optimized = true; return 0; } /* * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized * @spi: the device that will be used for the message * @msg: the message to optimize * Return: zero on success, else a negative error code */ static int spi_maybe_optimize_message(struct spi_device *spi, struct spi_message *msg) { if (spi->controller->defer_optimize_message) { msg->spi = spi; return 0; } if (msg->pre_optimized) return 0; return __spi_optimize_message(spi, msg); } /** * spi_optimize_message - do any one-time validation and setup for a SPI message * @spi: the device that will be used for the message * @msg: the message to optimize * * Peripheral drivers that reuse the same message repeatedly may call this to * perform as much message prep as possible once, rather than repeating it each * time a message transfer is performed to improve throughput and reduce CPU * usage. * * Once a message has been optimized, it cannot be modified with the exception * of updating the contents of any xfer->tx_buf (the pointer can't be changed, * only the data in the memory it points to). * * Calls to this function must be balanced with calls to spi_unoptimize_message() * to avoid leaking resources. * * Context: can sleep * Return: zero on success, else a negative error code */ int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) { int ret; /* * Pre-optimization is not supported and optimization is deferred e.g. * when using spi-mux. */ if (spi->controller->defer_optimize_message) return 0; ret = __spi_optimize_message(spi, msg); if (ret) return ret; /* * This flag indicates that the peripheral driver called spi_optimize_message() * and therefore we shouldn't unoptimize message automatically when finalizing * the message but rather wait until spi_unoptimize_message() is called * by the peripheral driver. */ msg->pre_optimized = true; return 0; } EXPORT_SYMBOL_GPL(spi_optimize_message); /** * spi_unoptimize_message - releases any resources allocated by spi_optimize_message() * @msg: the message to unoptimize * * Calls to this function must be balanced with calls to spi_optimize_message(). * * Context: can sleep */ void spi_unoptimize_message(struct spi_message *msg) { if (msg->spi->controller->defer_optimize_message) return; __spi_unoptimize_message(msg); msg->pre_optimized = false; } EXPORT_SYMBOL_GPL(spi_unoptimize_message); static int __spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; struct spi_transfer *xfer; /* * Some controllers do not support doing regular SPI transfers. Return * ENOTSUPP when this is the case. */ if (!ctlr->transfer) return -ENOTSUPP; SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); trace_spi_message_submit(message); if (!ctlr->ptp_sts_supported) { list_for_each_entry(xfer, &message->transfers, transfer_list) { xfer->ptp_sts_word_pre = 0; ptp_read_system_prets(xfer->ptp_sts); } } return ctlr->transfer(spi, message); } static void devm_spi_unoptimize_message(void *msg) { spi_unoptimize_message(msg); } /** * devm_spi_optimize_message - managed version of spi_optimize_message() * @dev: the device that manages @msg (usually @spi->dev) * @spi: the device that will be used for the message * @msg: the message to optimize * Return: zero on success, else a negative error code * * spi_unoptimize_message() will automatically be called when the device is * removed. */ int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, struct spi_message *msg) { int ret; ret = spi_optimize_message(spi, msg); if (ret) return ret; return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); } EXPORT_SYMBOL_GPL(devm_spi_optimize_message); /** * spi_async - asynchronous SPI transfer * @spi: device with which data will be exchanged * @message: describes the data transfers, including completion callback * Context: any (IRQs may be blocked, etc) * * This call may be used in_irq and other contexts which can't sleep, * as well as from task contexts which can sleep. * * The completion callback is invoked in a context which can't sleep. * Before that invocation, the value of message->status is undefined. * When the callback is issued, message->status holds either zero (to * indicate complete success) or a negative error code. After that * callback returns, the driver which issued the transfer request may * deallocate the associated memory; it's no longer in use by any SPI * core or controller driver code. * * Note that although all messages to a spi_device are handled in * FIFO order, messages may go to different devices in other orders. * Some device might be higher priority, or have various "hard" access * time requirements, for example. * * On detection of any fault during the transfer, processing of * the entire message is aborted, and the device is deselected. * Until returning from the associated message completion callback, * no other spi_message queued to that device will be processed. * (This rule applies equally to all the synchronous transfer calls, * which are wrappers around this core asynchronous primitive.) * * Return: zero on success, else a negative error code. */ int spi_async(struct spi_device *spi, struct spi_message *message) { struct spi_controller *ctlr = spi->controller; int ret; unsigned long flags; ret = spi_maybe_optimize_message(spi, message); if (ret) return ret; spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); if (ctlr->bus_lock_flag) ret = -EBUSY; else ret = __spi_async(spi, message); spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); return ret; } EXPORT_SYMBOL_GPL(spi_async); static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) { bool was_busy; int ret; mutex_lock(&ctlr->io_mutex); was_busy = ctlr->busy; ctlr->cur_msg = msg; ret = __spi_pump_transfer_message(ctlr, msg, was_busy); if (ret) dev_err(&ctlr->dev, "noqueue transfer failed\n"); ctlr->cur_msg = NULL; ctlr->fallback = false; if (!was_busy) { kfree(ctlr->dummy_rx); ctlr->dummy_rx = NULL; kfree(ctlr->dummy_tx); ctlr->dummy_tx = NULL; if (ctlr->unprepare_transfer_hardware && ctlr->unprepare_transfer_hardware(ctlr)) dev_err(&ctlr->dev, "failed to unprepare transfer hardware\n"); spi_idle_runtime_pm(ctlr); } mutex_unlock(&ctlr->io_mutex); } /*-------------------------------------------------------------------------*/ /* * Utility methods for SPI protocol drivers, layered on * top of the core. Some other utility methods are defined as * inline functions. */ static void spi_complete(void *arg) { complete(arg); } static int __spi_sync(struct spi_device *spi, struct spi_message *message) { DECLARE_COMPLETION_ONSTACK(done); unsigned long flags; int status; struct spi_controller *ctlr = spi->controller; if (__spi_check_suspended(ctlr)) { dev_warn_once(&spi->dev, "Attempted to sync while suspend\n"); return -ESHUTDOWN; } status = spi_maybe_optimize_message(spi, message); if (status) return status; SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); /* * Checking queue_empty here only guarantees async/sync message * ordering when coming from the same context. It does not need to * guard against reentrancy from a different context. The io_mutex * will catch those cases. */ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { message->actual_length = 0; message->status = -EINPROGRESS; trace_spi_message_submit(message); SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); __spi_transfer_message_noqueue(ctlr, message); return message->status; } /* * There are messages in the async queue that could have originated * from the same context, so we need to preserve ordering. * Therefor we send the message to the async queue and wait until they * are completed. */ message->complete = spi_complete; message->context = &done; spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); status = __spi_async(spi, message); spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); if (status == 0) { wait_for_completion(&done); status = message->status; } message->complete = NULL; message->context = NULL; return status; } /** * spi_sync - blocking/synchronous SPI data transfers * @spi: device with which data will be exchanged * @message: describes the data transfers * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. Low-overhead controller * drivers may DMA directly into and out of the message buffers. * * Note that the SPI device's chip select is active during the message, * and then is normally disabled between messages. Drivers for some * frequently-used devices may want to minimize costs of selecting a chip, * by leaving it selected in anticipation that the next message will go * to the same chip. (That may increase power usage.) * * Also, the caller is guaranteeing that the memory associated with the * message will not be freed before this call returns. * * Return: zero on success, else a negative error code. */ int spi_sync(struct spi_device *spi, struct spi_message *message) { int ret; mutex_lock(&spi->controller->bus_lock_mutex); ret = __spi_sync(spi, message); mutex_unlock(&spi->controller->bus_lock_mutex); return ret; } EXPORT_SYMBOL_GPL(spi_sync); /** * spi_sync_locked - version of spi_sync with exclusive bus usage * @spi: device with which data will be exchanged * @message: describes the data transfers * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. Low-overhead controller * drivers may DMA directly into and out of the message buffers. * * This call should be used by drivers that require exclusive access to the * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must * be released by a spi_bus_unlock call when the exclusive access is over. * * Return: zero on success, else a negative error code. */ int spi_sync_locked(struct spi_device *spi, struct spi_message *message) { return __spi_sync(spi, message); } EXPORT_SYMBOL_GPL(spi_sync_locked); /** * spi_bus_lock - obtain a lock for exclusive SPI bus usage * @ctlr: SPI bus controller that should be locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. * * This call should be used by drivers that require exclusive access to the * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the * exclusive access is over. Data transfer must be done by spi_sync_locked * and spi_async_locked calls when the SPI bus lock is held. * * Return: always zero. */ int spi_bus_lock(struct spi_controller *ctlr) { unsigned long flags; mutex_lock(&ctlr->bus_lock_mutex); spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); ctlr->bus_lock_flag = 1; spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); /* Mutex remains locked until spi_bus_unlock() is called */ return 0; } EXPORT_SYMBOL_GPL(spi_bus_lock); /** * spi_bus_unlock - release the lock for exclusive SPI bus usage * @ctlr: SPI bus controller that was locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep * is non-interruptible, and has no timeout. * * This call releases an SPI bus lock previously obtained by an spi_bus_lock * call. * * Return: always zero. */ int spi_bus_unlock(struct spi_controller *ctlr) { ctlr->bus_lock_flag = 0; mutex_unlock(&ctlr->bus_lock_mutex); return 0; } EXPORT_SYMBOL_GPL(spi_bus_unlock); /* Portable code must never pass more than 32 bytes */ #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) static u8 *buf; /** * spi_write_then_read - SPI synchronous write followed by read * @spi: device with which data will be exchanged * @txbuf: data to be written (need not be DMA-safe) * @n_tx: size of txbuf, in bytes * @rxbuf: buffer into which data will be read (need not be DMA-safe) * @n_rx: size of rxbuf, in bytes * Context: can sleep * * This performs a half duplex MicroWire style transaction with the * device, sending txbuf and then reading rxbuf. The return value * is zero for success, else a negative errno status code. * This call may only be used from a context that may sleep. * * Parameters to this routine are always copied using a small buffer. * Performance-sensitive or bulk transfer code should instead use * spi_{async,sync}() calls with DMA-safe buffers. * * Return: zero on success, else a negative error code. */ int spi_write_then_read(struct spi_device *spi, const void *txbuf, unsigned n_tx, void *rxbuf, unsigned n_rx) { static DEFINE_MUTEX(lock); int status; struct spi_message message; struct spi_transfer x[2]; u8 *local_buf; /* * Use preallocated DMA-safe buffer if we can. We can't avoid * copying here, (as a pure convenience thing), but we can * keep heap costs out of the hot path unless someone else is * using the pre-allocated buffer or the transfer is too large. */ if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), GFP_KERNEL | GFP_DMA); if (!local_buf) return -ENOMEM; } else { local_buf = buf; } spi_message_init(&message); memset(x, 0, sizeof(x)); if (n_tx) { x[0].len = n_tx; spi_message_add_tail(&x[0], &message); } if (n_rx) { x[1].len = n_rx; spi_message_add_tail(&x[1], &message); } memcpy(local_buf, txbuf, n_tx); x[0].tx_buf = local_buf; x[1].rx_buf = local_buf + n_tx; /* Do the I/O */ status = spi_sync(spi, &message); if (status == 0) memcpy(rxbuf, x[1].rx_buf, n_rx); if (x[0].tx_buf == buf) mutex_unlock(&lock); else kfree(local_buf); return status; } EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF_DYNAMIC) /* Must call put_device() when done with returned spi_device device */ static struct spi_device *of_find_spi_device_by_node(struct device_node *node) { struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); return dev ? to_spi_device(dev) : NULL; } /* The spi controllers are not using spi_bus, so we find it with another way */ static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; dev = class_find_device_by_of_node(&spi_controller_class, node); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) dev = class_find_device_by_of_node(&spi_target_class, node); if (!dev) return NULL; /* Reference got in class_find_device */ return container_of(dev, struct spi_controller, dev); } static int of_spi_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; struct spi_controller *ctlr; struct spi_device *spi; switch (of_reconfig_get_state_change(action, arg)) { case OF_RECONFIG_CHANGE_ADD: ctlr = of_find_spi_controller_by_node(rd->dn->parent); if (ctlr == NULL) return NOTIFY_OK; /* Not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { put_device(&ctlr->dev); return NOTIFY_OK; } /* * Clear the flag before adding the device so that fw_devlink * doesn't skip adding consumers to this device. */ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; spi = of_register_spi_device(ctlr, rd->dn); put_device(&ctlr->dev); if (IS_ERR(spi)) { pr_err("%s: failed to create for '%pOF'\n", __func__, rd->dn); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(spi)); } break; case OF_RECONFIG_CHANGE_REMOVE: /* Already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* Find our device by node */ spi = of_find_spi_device_by_node(rd->dn); if (spi == NULL) return NOTIFY_OK; /* No? not meant for us */ /* Unregister takes one ref away */ spi_unregister_device(spi); /* And put the reference of the find */ put_device(&spi->dev); break; } return NOTIFY_OK; } static struct notifier_block spi_of_notifier = { .notifier_call = of_spi_notify, }; #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ extern struct notifier_block spi_of_notifier; #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ #if IS_ENABLED(CONFIG_ACPI) static int spi_acpi_controller_match(struct device *dev, const void *data) { return device_match_acpi_dev(dev->parent, data); } struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) { struct device *dev; dev = class_find_device(&spi_controller_class, NULL, adev, spi_acpi_controller_match); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) dev = class_find_device(&spi_target_class, NULL, adev, spi_acpi_controller_match); if (!dev) return NULL; return container_of(dev, struct spi_controller, dev); } EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev); static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) { struct device *dev; dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); return to_spi_device(dev); } static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, void *arg) { struct acpi_device *adev = arg; struct spi_controller *ctlr; struct spi_device *spi; switch (value) { case ACPI_RECONFIG_DEVICE_ADD: ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); if (!ctlr) break; acpi_register_spi_device(ctlr, adev); put_device(&ctlr->dev); break; case ACPI_RECONFIG_DEVICE_REMOVE: if (!acpi_device_enumerated(adev)) break; spi = acpi_spi_find_device_by_adev(adev); if (!spi) break; spi_unregister_device(spi); put_device(&spi->dev); break; } return NOTIFY_OK; } static struct notifier_block spi_acpi_notifier = { .notifier_call = acpi_spi_notify, }; #else extern struct notifier_block spi_acpi_notifier; #endif static int __init spi_init(void) { int status; buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); if (!buf) { status = -ENOMEM; goto err0; } status = bus_register(&spi_bus_type); if (status < 0) goto err1; status = class_register(&spi_controller_class); if (status < 0) goto err2; if (IS_ENABLED(CONFIG_SPI_SLAVE)) { status = class_register(&spi_target_class); if (status < 0) goto err3; } if (IS_ENABLED(CONFIG_OF_DYNAMIC)) WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); if (IS_ENABLED(CONFIG_ACPI)) WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); return 0; err3: class_unregister(&spi_controller_class); err2: bus_unregister(&spi_bus_type); err1: kfree(buf); buf = NULL; err0: return status; } /* * A board_info is normally registered in arch_initcall(), * but even essential drivers wait till later. * * REVISIT only boardinfo really needs static linking. The rest (device and * driver registration) _could_ be dynamically linked (modular) ... Costs * include needing to have boardinfo data structures be much more public. */ postcore_initcall(spi_init);
11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 // SPDX-License-Identifier: GPL-2.0-or-later /* Socket buffer accounting * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include "ar-internal.h" #define select_skb_count(skb) (&rxrpc_n_rx_skbs) /* * Note the allocation or reception of a socket buffer. */ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { int n = atomic_inc_return(select_skb_count(skb)); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); } /* * Note the re-emergence of a socket buffer from a queue or buffer. */ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { if (skb) { int n = atomic_read(select_skb_count(skb)); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); } } /* * Note the addition of a ref on a socket buffer. */ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { int n = atomic_inc_return(select_skb_count(skb)); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); skb_get(skb); } /* * Note the dropping of a ref on a socket buffer by the core. */ void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { int n = atomic_inc_return(&rxrpc_n_rx_skbs); trace_rxrpc_skb(skb, 0, n, why); } /* * Note the destruction of a socket buffer. */ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace why) { if (skb) { int n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, why); consume_skb(skb); } } /* * Clear a queue of socket buffers. */ void rxrpc_purge_queue(struct sk_buff_head *list) { struct sk_buff *skb; while ((skb = skb_dequeue((list))) != NULL) { int n = atomic_dec_return(select_skb_count(skb)); trace_rxrpc_skb(skb, refcount_read(&skb->users), n, rxrpc_skb_put_purge); consume_skb(skb); } }
8 1 179 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IPC_NAMESPACE_H__ #define __IPC_NAMESPACE_H__ #include <linux/err.h> #include <linux/idr.h> #include <linux/rwsem.h> #include <linux/notifier.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/refcount.h> #include <linux/rhashtable-types.h> #include <linux/sysctl.h> #include <linux/percpu_counter.h> struct user_namespace; struct ipc_ids { int in_use; unsigned short seq; struct rw_semaphore rwsem; struct idr ipcs_idr; int max_idx; int last_idx; /* For wrap around detection */ #ifdef CONFIG_CHECKPOINT_RESTORE int next_id; #endif struct rhashtable key_ht; }; struct ipc_namespace { struct ipc_ids ids[3]; int sem_ctls[4]; int used_sems; unsigned int msg_ctlmax; unsigned int msg_ctlmnb; unsigned int msg_ctlmni; struct percpu_counter percpu_msg_bytes; struct percpu_counter percpu_msg_hdrs; size_t shm_ctlmax; size_t shm_ctlall; unsigned long shm_tot; int shm_ctlmni; /* * Defines whether IPC_RMID is forced for _all_ shm segments regardless * of shmctl() */ int shm_rmid_forced; struct notifier_block ipcns_nb; /* The kern_mount of the mqueuefs sb. We take a ref on it */ struct vfsmount *mq_mnt; /* # queues in this ns, protected by mq_lock */ unsigned int mq_queues_count; /* next fields are set through sysctl */ unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ unsigned int mq_msg_default; unsigned int mq_msgsize_default; struct ctl_table_set mq_set; struct ctl_table_header *mq_sysctls; struct ctl_table_set ipc_set; struct ctl_table_header *ipc_sysctls; /* user_ns which owns the ipc ns */ struct user_namespace *user_ns; struct ucounts *ucounts; struct llist_node mnt_llist; struct ns_common ns; } __randomize_layout; extern struct ipc_namespace init_ipc_ns; extern spinlock_t mq_lock; #ifdef CONFIG_SYSVIPC extern void shm_destroy_orphaned(struct ipc_namespace *ns); #else /* CONFIG_SYSVIPC */ static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} #endif /* CONFIG_SYSVIPC */ #ifdef CONFIG_POSIX_MQUEUE extern int mq_init_ns(struct ipc_namespace *ns); /* * POSIX Message Queue default values: * * MIN_*: Lowest value an admin can set the maximum unprivileged limit to * DFLT_*MAX: Default values for the maximum unprivileged limits * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply * an attribute to the open call and the queue must be created * HARD_*: Highest value the maximums can be set to. These are enforced * on CAP_SYS_RESOURCE apps as well making them inviolate (so make them * suitably high) * * POSIX Requirements: * Per app minimum openable message queues - 8. This does not map well * to the fact that we limit the number of queues on a per namespace * basis instead of a per app basis. So, make the default high enough * that no given app should have a hard time opening 8 queues. * Minimum maximum for HARD_MSGMAX - 32767. I bumped this to 65536. * Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this. However, * we have run into a situation where running applications in the wild * require this to be at least 5MB, and preferably 10MB, so I set the * value to 16MB in hopes that this user is the worst of the bunch and * the new maximum will handle anyone else. I may have to revisit this * in the future. */ #define DFLT_QUEUESMAX 256 #define MIN_MSGMAX 1 #define DFLT_MSG 10U #define DFLT_MSGMAX 10 #define HARD_MSGMAX 65536 #define MIN_MSGSIZEMAX 128 #define DFLT_MSGSIZE 8192U #define DFLT_MSGSIZEMAX 8192 #define HARD_MSGSIZEMAX (16*1024*1024) #else static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } #endif #if defined(CONFIG_IPC_NS) extern struct ipc_namespace *copy_ipcs(unsigned long flags, struct user_namespace *user_ns, struct ipc_namespace *ns); static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) refcount_inc(&ns->ns.count); return ns; } static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) { if (ns) { if (refcount_inc_not_zero(&ns->ns.count)) return ns; } return NULL; } extern void put_ipc_ns(struct ipc_namespace *ns); #else static inline struct ipc_namespace *copy_ipcs(unsigned long flags, struct user_namespace *user_ns, struct ipc_namespace *ns) { if (flags & CLONE_NEWIPC) return ERR_PTR(-EINVAL); return ns; } static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { return ns; } static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns) { return ns; } static inline void put_ipc_ns(struct ipc_namespace *ns) { } #endif #ifdef CONFIG_POSIX_MQUEUE_SYSCTL void retire_mq_sysctls(struct ipc_namespace *ns); bool setup_mq_sysctls(struct ipc_namespace *ns); #else /* CONFIG_POSIX_MQUEUE_SYSCTL */ static inline void retire_mq_sysctls(struct ipc_namespace *ns) { } static inline bool setup_mq_sysctls(struct ipc_namespace *ns) { return true; } #endif /* CONFIG_POSIX_MQUEUE_SYSCTL */ #ifdef CONFIG_SYSVIPC_SYSCTL bool setup_ipc_sysctls(struct ipc_namespace *ns); void retire_ipc_sysctls(struct ipc_namespace *ns); #else /* CONFIG_SYSVIPC_SYSCTL */ static inline void retire_ipc_sysctls(struct ipc_namespace *ns) { } static inline bool setup_ipc_sysctls(struct ipc_namespace *ns) { return true; } #endif /* CONFIG_SYSVIPC_SYSCTL */ #endif
16 118 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_LABELS_H #define _NF_CONNTRACK_LABELS_H #include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <linux/types.h> #include <net/net_namespace.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> #include <uapi/linux/netfilter/xt_connlabel.h> #define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE) struct nf_conn_labels { unsigned long bits[NF_CT_LABELS_MAX_SIZE / sizeof(long)]; }; /* Can't use nf_ct_ext_find(), flow dissector cannot use symbols * exported by nf_conntrack module. */ static inline struct nf_conn_labels *nf_ct_labels_find(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS struct nf_ct_ext *ext = ct->ext; if (!ext || !__nf_ct_ext_exist(ext, NF_CT_EXT_LABELS)) return NULL; return (void *)ct->ext + ct->ext->offset[NF_CT_EXT_LABELS]; #else return NULL; #endif } static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_LABELS struct net *net = nf_ct_net(ct); if (atomic_read(&net->ct.labels_used) == 0) return NULL; return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); #else return NULL; #endif } int nf_connlabels_replace(struct nf_conn *ct, const u32 *data, const u32 *mask, unsigned int words); #ifdef CONFIG_NF_CONNTRACK_LABELS int nf_connlabels_get(struct net *net, unsigned int bit); void nf_connlabels_put(struct net *net); #else static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } static inline void nf_connlabels_put(struct net *net) {} #endif #endif /* _NF_CONNTRACK_LABELS_H */
130 139 53 43 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 #undef TRACE_SYSTEM #define TRACE_SYSTEM qdisc #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_QDISC_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> #include <linux/pkt_sched.h> #include <net/sch_generic.h> TRACE_EVENT(qdisc_dequeue, TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, int packets, struct sk_buff *skb), TP_ARGS(qdisc, txq, packets, skb), TP_STRUCT__entry( __field( struct Qdisc *, qdisc ) __field(const struct netdev_queue *, txq ) __field( int, packets ) __field( void *, skbaddr ) __field( int, ifindex ) __field( u32, handle ) __field( u32, parent ) __field( unsigned long, txq_state) ), /* skb==NULL indicate packets dequeued was 0, even when packets==1 */ TP_fast_assign( __entry->qdisc = qdisc; __entry->txq = txq; __entry->packets = skb ? packets : 0; __entry->skbaddr = skb; __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; __entry->handle = qdisc->handle; __entry->parent = qdisc->parent; __entry->txq_state = txq->state; ), TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); TRACE_EVENT(qdisc_enqueue, TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, struct sk_buff *skb), TP_ARGS(qdisc, txq, skb), TP_STRUCT__entry( __field(struct Qdisc *, qdisc) __field(const struct netdev_queue *, txq) __field(void *, skbaddr) __field(int, ifindex) __field(u32, handle) __field(u32, parent) ), TP_fast_assign( __entry->qdisc = qdisc; __entry->txq = txq; __entry->skbaddr = skb; __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; __entry->handle = qdisc->handle; __entry->parent = qdisc->parent; ), TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr) ); TRACE_EVENT(qdisc_reset, TP_PROTO(struct Qdisc *q), TP_ARGS(q), TP_STRUCT__entry( __string( dev, qdisc_dev(q) ? qdisc_dev(q)->name : "(null)" ) __string( kind, q->ops->id ) __field( u32, parent ) __field( u32, handle ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) ); TRACE_EVENT(qdisc_destroy, TP_PROTO(struct Qdisc *q), TP_ARGS(q), TP_STRUCT__entry( __string( dev, qdisc_dev(q)->name ) __string( kind, q->ops->id ) __field( u32, parent ) __field( u32, handle ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = q->parent; __entry->handle = q->handle; ), TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) ); TRACE_EVENT(qdisc_create, TP_PROTO(const struct Qdisc_ops *ops, struct net_device *dev, u32 parent), TP_ARGS(ops, dev, parent), TP_STRUCT__entry( __string( dev, dev->name ) __string( kind, ops->id ) __field( u32, parent ) ), TP_fast_assign( __assign_str(dev); __assign_str(kind); __entry->parent = parent; ), TP_printk("dev=%s kind=%s parent=%x:%x", __get_str(dev), __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent)) ); #endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 // SPDX-License-Identifier: GPL-2.0-only /* DVB frontend part of the Linux driver for TwinhanDTV Alpha/MagicBoxII USB2.0 * DVB-T receiver. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * Thanks to Twinhan who kindly provided hardware and information. * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "vp7045.h" /* It is a Zarlink MT352 within a Samsung Tuner (DNOS404ZH102A) - 040929 - AAT * * Programming is hidden inside the firmware, so set_frontend is very easy. * Even though there is a Firmware command that one can use to access the demod * via its registers. This is used for status information. */ struct vp7045_fe_state { struct dvb_frontend fe; struct dvb_usb_device *d; }; static int vp7045_fe_read_status(struct dvb_frontend *fe, enum fe_status *status) { struct vp7045_fe_state *state = fe->demodulator_priv; u8 s0 = vp7045_read_reg(state->d,0x00), s1 = vp7045_read_reg(state->d,0x01), s3 = vp7045_read_reg(state->d,0x03); *status = 0; if (s0 & (1 << 4)) *status |= FE_HAS_CARRIER; if (s0 & (1 << 1)) *status |= FE_HAS_VITERBI; if (s0 & (1 << 5)) *status |= FE_HAS_LOCK; if (s1 & (1 << 1)) *status |= FE_HAS_SYNC; if (s3 & (1 << 6)) *status |= FE_HAS_SIGNAL; if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) != (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) *status &= ~FE_HAS_LOCK; return 0; } static int vp7045_fe_read_ber(struct dvb_frontend* fe, u32 *ber) { struct vp7045_fe_state *state = fe->demodulator_priv; *ber = (vp7045_read_reg(state->d, 0x0D) << 16) | (vp7045_read_reg(state->d, 0x0E) << 8) | vp7045_read_reg(state->d, 0x0F); return 0; } static int vp7045_fe_read_unc_blocks(struct dvb_frontend* fe, u32 *unc) { struct vp7045_fe_state *state = fe->demodulator_priv; *unc = (vp7045_read_reg(state->d, 0x10) << 8) | vp7045_read_reg(state->d, 0x11); return 0; } static int vp7045_fe_read_signal_strength(struct dvb_frontend* fe, u16 *strength) { struct vp7045_fe_state *state = fe->demodulator_priv; u16 signal = (vp7045_read_reg(state->d, 0x14) << 8) | vp7045_read_reg(state->d, 0x15); *strength = ~signal; return 0; } static int vp7045_fe_read_snr(struct dvb_frontend* fe, u16 *snr) { struct vp7045_fe_state *state = fe->demodulator_priv; u8 _snr = vp7045_read_reg(state->d, 0x09); *snr = (_snr << 8) | _snr; return 0; } static int vp7045_fe_init(struct dvb_frontend* fe) { return 0; } static int vp7045_fe_sleep(struct dvb_frontend* fe) { return 0; } static int vp7045_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 800; return 0; } static int vp7045_fe_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct vp7045_fe_state *state = fe->demodulator_priv; u8 buf[5]; u32 freq = fep->frequency / 1000; buf[0] = (freq >> 16) & 0xff; buf[1] = (freq >> 8) & 0xff; buf[2] = freq & 0xff; buf[3] = 0; switch (fep->bandwidth_hz) { case 8000000: buf[4] = 8; break; case 7000000: buf[4] = 7; break; case 6000000: buf[4] = 6; break; default: return -EINVAL; } vp7045_usb_op(state->d,LOCK_TUNER_COMMAND,buf,5,NULL,0,200); return 0; } static void vp7045_fe_release(struct dvb_frontend* fe) { struct vp7045_fe_state *state = fe->demodulator_priv; kfree(state); } static const struct dvb_frontend_ops vp7045_fe_ops; struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d) { struct vp7045_fe_state *s = kzalloc(sizeof(struct vp7045_fe_state), GFP_KERNEL); if (s == NULL) goto error; s->d = d; memcpy(&s->fe.ops, &vp7045_fe_ops, sizeof(struct dvb_frontend_ops)); s->fe.demodulator_priv = s; return &s->fe; error: return NULL; } static const struct dvb_frontend_ops vp7045_fe_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Twinhan VP7045/46 USB DVB-T", .frequency_min_hz = 44250 * kHz, .frequency_max_hz = 867250 * kHz, .frequency_stepsize_hz = 1 * kHz, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = vp7045_fe_release, .init = vp7045_fe_init, .sleep = vp7045_fe_sleep, .set_frontend = vp7045_fe_set_frontend, .get_tune_settings = vp7045_fe_get_tune_settings, .read_status = vp7045_fe_read_status, .read_ber = vp7045_fe_read_ber, .read_signal_strength = vp7045_fe_read_signal_strength, .read_snr = vp7045_fe_read_snr, .read_ucblocks = vp7045_fe_read_unc_blocks, };
3 3 1 1 3 3 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux driver for TerraTec DMX 6Fire USB * * Main routines and module definitions. * * Author: Torsten Schenk <torsten.schenk@zoho.com> * Created: Jan 01, 2011 * Copyright: (C) Torsten Schenk */ #include "chip.h" #include "firmware.h" #include "pcm.h" #include "control.h" #include "comm.h" #include "midi.h" #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <sound/initval.h> MODULE_AUTHOR("Torsten Schenk <torsten.schenk@zoho.com>"); MODULE_DESCRIPTION("TerraTec DMX 6Fire USB audio driver"); MODULE_LICENSE("GPL v2"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable card */ static struct sfire_chip *chips[SNDRV_CARDS] = SNDRV_DEFAULT_PTR; static struct usb_device *devices[SNDRV_CARDS] = SNDRV_DEFAULT_PTR; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the 6fire sound device"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the 6fire sound device."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the 6fire sound device."); static DEFINE_MUTEX(register_mutex); static void usb6fire_chip_abort(struct sfire_chip *chip) { if (chip) { if (chip->pcm) usb6fire_pcm_abort(chip); if (chip->midi) usb6fire_midi_abort(chip); if (chip->comm) usb6fire_comm_abort(chip); if (chip->control) usb6fire_control_abort(chip); if (chip->card) { snd_card_disconnect(chip->card); snd_card_free_when_closed(chip->card); chip->card = NULL; } } } static void usb6fire_card_free(struct snd_card *card) { struct sfire_chip *chip = card->private_data; if (chip) { if (chip->pcm) usb6fire_pcm_destroy(chip); if (chip->midi) usb6fire_midi_destroy(chip); if (chip->comm) usb6fire_comm_destroy(chip); if (chip->control) usb6fire_control_destroy(chip); } } static int usb6fire_chip_probe(struct usb_interface *intf, const struct usb_device_id *usb_id) { int ret; int i; struct sfire_chip *chip = NULL; struct usb_device *device = interface_to_usbdev(intf); int regidx = -1; /* index in module parameter array */ struct snd_card *card = NULL; /* look if we already serve this card and return if so */ mutex_lock(&register_mutex); for (i = 0; i < SNDRV_CARDS; i++) { if (devices[i] == device) { if (chips[i]) chips[i]->intf_count++; usb_set_intfdata(intf, chips[i]); mutex_unlock(&register_mutex); return 0; } else if (!devices[i] && regidx < 0) regidx = i; } if (regidx < 0) { mutex_unlock(&register_mutex); dev_err(&intf->dev, "too many cards registered.\n"); return -ENODEV; } devices[regidx] = device; mutex_unlock(&register_mutex); /* check, if firmware is present on device, upload it if not */ ret = usb6fire_fw_init(intf); if (ret < 0) return ret; else if (ret == FW_NOT_READY) /* firmware update performed */ return 0; /* if we are here, card can be registered in alsa. */ if (usb_set_interface(device, 0, 0) != 0) { dev_err(&intf->dev, "can't set first interface.\n"); return -EIO; } ret = snd_card_new(&intf->dev, index[regidx], id[regidx], THIS_MODULE, sizeof(struct sfire_chip), &card); if (ret < 0) { dev_err(&intf->dev, "cannot create alsa card.\n"); return ret; } strscpy(card->driver, "6FireUSB"); strscpy(card->shortname, "TerraTec DMX6FireUSB"); sprintf(card->longname, "%s at %d:%d", card->shortname, device->bus->busnum, device->devnum); chip = card->private_data; chips[regidx] = chip; chip->dev = device; chip->regidx = regidx; chip->intf_count = 1; chip->card = card; card->private_free = usb6fire_card_free; ret = usb6fire_comm_init(chip); if (ret < 0) goto destroy_chip; ret = usb6fire_midi_init(chip); if (ret < 0) goto destroy_chip; ret = usb6fire_pcm_init(chip); if (ret < 0) goto destroy_chip; ret = usb6fire_control_init(chip); if (ret < 0) goto destroy_chip; ret = snd_card_register(card); if (ret < 0) { dev_err(&intf->dev, "cannot register card."); goto destroy_chip; } usb_set_intfdata(intf, chip); return 0; destroy_chip: snd_card_free(card); return ret; } static void usb6fire_chip_disconnect(struct usb_interface *intf) { struct sfire_chip *chip; chip = usb_get_intfdata(intf); if (chip) { /* if !chip, fw upload has been performed */ chip->intf_count--; if (!chip->intf_count) { mutex_lock(&register_mutex); devices[chip->regidx] = NULL; chips[chip->regidx] = NULL; mutex_unlock(&register_mutex); chip->shutdown = true; usb6fire_chip_abort(chip); } } } static const struct usb_device_id device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0ccd, .idProduct = 0x0080 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); static struct usb_driver usb_driver = { .name = "snd-usb-6fire", .probe = usb6fire_chip_probe, .disconnect = usb6fire_chip_disconnect, .id_table = device_table, }; module_usb_driver(usb_driver);
1 1 1 1 1 1 1 1 1 1 2 2 1 1 1 2 2 1 1 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 // SPDX-License-Identifier: GPL-2.0-only /* * Binary Increase Congestion control for TCP * Home page: * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC * This is from the implementation of BICTCP in * Lison-Xu, Kahaled Harfoush, and Injong Rhee. * "Binary Increase Congestion Control for Fast, Long Distance * Networks" in InfoComm 2004 * Available from: * http://netsrv.csc.ncsu.edu/export/bitcp.pdf * * Unless BIC is enabled and congestion window is large * this behaves the same as the original Reno. */ #include <linux/mm.h> #include <linux/module.h> #include <net/tcp.h> #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation * max_cwnd = snd_cwnd * beta */ #define BICTCP_B 4 /* * In binary search, * go to point (max+min)/N */ static int fast_convergence = 1; static int max_increment = 16; static int low_window = 14; static int beta = 819; /* = 819/1024 (BICTCP_BETA_SCALE) */ static int initial_ssthresh; static int smooth_part = 20; module_param(fast_convergence, int, 0644); MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence"); module_param(max_increment, int, 0644); MODULE_PARM_DESC(max_increment, "Limit on increment allowed during binary search"); module_param(low_window, int, 0644); MODULE_PARM_DESC(low_window, "lower bound on congestion window (for TCP friendliness)"); module_param(beta, int, 0644); MODULE_PARM_DESC(beta, "beta for multiplicative increase"); module_param(initial_ssthresh, int, 0644); MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); module_param(smooth_part, int, 0644); MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax"); /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 epoch_start; /* beginning of an epoch */ #define ACK_RATIO_SHIFT 4 u32 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */ }; static inline void bictcp_reset(struct bictcp *ca) { ca->cnt = 0; ca->last_max_cwnd = 0; ca->last_cwnd = 0; ca->last_time = 0; ca->epoch_start = 0; ca->delayed_ack = 2 << ACK_RATIO_SHIFT; } static void bictcp_init(struct sock *sk) { struct bictcp *ca = inet_csk_ca(sk); bictcp_reset(ca); if (initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } /* * Compute congestion window to use. */ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) { if (ca->last_cwnd == cwnd && (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32) return; ca->last_cwnd = cwnd; ca->last_time = tcp_jiffies32; if (ca->epoch_start == 0) /* record the beginning of an epoch */ ca->epoch_start = tcp_jiffies32; /* start off normal */ if (cwnd <= low_window) { ca->cnt = cwnd; return; } /* binary increase */ if (cwnd < ca->last_max_cwnd) { __u32 dist = (ca->last_max_cwnd - cwnd) / BICTCP_B; if (dist > max_increment) /* linear increase */ ca->cnt = cwnd / max_increment; else if (dist <= 1U) /* binary search increase */ ca->cnt = (cwnd * smooth_part) / BICTCP_B; else /* binary search increase */ ca->cnt = cwnd / dist; } else { /* slow start AMD linear increase */ if (cwnd < ca->last_max_cwnd + BICTCP_B) /* slow start */ ca->cnt = (cwnd * smooth_part) / BICTCP_B; else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) /* slow start */ ca->cnt = (cwnd * (BICTCP_B-1)) / (cwnd - ca->last_max_cwnd); else /* linear increase */ ca->cnt = cwnd / max_increment; } /* if in slow start or link utilization is very low */ if (ca->last_max_cwnd == 0) { if (ca->cnt > 20) /* increase cwnd 5% per RTT */ ca->cnt = 20; } ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack; if (ca->cnt == 0) /* cannot be zero */ ca->cnt = 1; } static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); if (!tcp_is_cwnd_limited(sk)) return; if (tcp_in_slow_start(tp)) { acked = tcp_slow_start(tp, acked); if (!acked) return; } bictcp_update(ca, tcp_snd_cwnd(tp)); tcp_cong_avoid_ai(tp, ca->cnt, acked); } /* * behave like Reno until low_window is reached, * then increase congestion window slowly */ static u32 bictcp_recalc_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct bictcp *ca = inet_csk_ca(sk); ca->epoch_start = 0; /* end of epoch */ /* Wmax and fast convergence */ if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence) ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta)) / (2 * BICTCP_BETA_SCALE); else ca->last_max_cwnd = tcp_snd_cwnd(tp); if (tcp_snd_cwnd(tp) <= low_window) return max(tcp_snd_cwnd(tp) >> 1U, 2U); else return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U); } static void bictcp_state(struct sock *sk, u8 new_state) { if (new_state == TCP_CA_Loss) bictcp_reset(inet_csk_ca(sk)); } /* Track delayed acknowledgment ratio using sliding window * ratio = (15*ratio + sample) / 16 */ static void bictcp_acked(struct sock *sk, const struct ack_sample *sample) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_state == TCP_CA_Open) { struct bictcp *ca = inet_csk_ca(sk); ca->delayed_ack += sample->pkts_acked - (ca->delayed_ack >> ACK_RATIO_SHIFT); } } static struct tcp_congestion_ops bictcp __read_mostly = { .init = bictcp_init, .ssthresh = bictcp_recalc_ssthresh, .cong_avoid = bictcp_cong_avoid, .set_state = bictcp_state, .undo_cwnd = tcp_reno_undo_cwnd, .pkts_acked = bictcp_acked, .owner = THIS_MODULE, .name = "bic", }; static int __init bictcp_register(void) { BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE); return tcp_register_congestion_control(&bictcp); } static void __exit bictcp_unregister(void) { tcp_unregister_congestion_control(&bictcp); } module_init(bictcp_register); module_exit(bictcp_unregister); MODULE_AUTHOR("Stephen Hemminger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("BIC TCP");
3 3 3 3 3 3 3 3 3 3 8 3 8 8 7 7 7 4 4 4 4 3 1 7 6 3 8 7 7 8 7 8 3 3 4 7 8 8 1 7 8 1 8 1 7 8 3 5 5 8 8 8 4 5 3 3 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 // SPDX-License-Identifier: GPL-2.0-only /* Network filesystem write subrequest result collection, assessment * and retrying. * * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/slab.h> #include "internal.h" /* Notes made in the collector */ #define HIT_PENDING 0x01 /* A front op was still pending */ #define NEED_REASSESS 0x02 /* Need to loop round and reassess */ #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */ #define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */ #define NEED_RETRY 0x10 /* A front op requests retrying */ #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */ static void netfs_dump_request(const struct netfs_io_request *rreq) { pr_err("Request R=%08x r=%d fl=%lx or=%x e=%ld\n", rreq->debug_id, refcount_read(&rreq->ref), rreq->flags, rreq->origin, rreq->error); pr_err(" st=%llx tsl=%zx/%llx/%llx\n", rreq->start, rreq->transferred, rreq->submitted, rreq->len); pr_err(" cci=%llx/%llx/%llx\n", rreq->cleaned_to, rreq->collected_to, atomic64_read(&rreq->issued_to)); pr_err(" iw=%pSR\n", rreq->netfs_ops->issue_write); for (int i = 0; i < NR_IO_STREAMS; i++) { const struct netfs_io_subrequest *sreq; const struct netfs_io_stream *s = &rreq->io_streams[i]; pr_err(" str[%x] s=%x e=%d acnf=%u,%u,%u,%u\n", s->stream_nr, s->source, s->error, s->avail, s->active, s->need_retry, s->failed); pr_err(" str[%x] ct=%llx t=%zx\n", s->stream_nr, s->collected_to, s->transferred); list_for_each_entry(sreq, &s->subrequests, rreq_link) { pr_err(" sreq[%x:%x] sc=%u s=%llx t=%zx/%zx r=%d f=%lx\n", sreq->stream_nr, sreq->debug_index, sreq->source, sreq->start, sreq->transferred, sreq->len, refcount_read(&sreq->ref), sreq->flags); } } } /* * Successful completion of write of a folio to the server and/or cache. Note * that we are not allowed to lock the folio here on pain of deadlocking with * truncate. */ int netfs_folio_written_back(struct folio *folio) { enum netfs_folio_trace why = netfs_folio_trace_clear; struct netfs_inode *ictx = netfs_inode(folio->mapping->host); struct netfs_folio *finfo; struct netfs_group *group = NULL; int gcount = 0; if ((finfo = netfs_folio_info(folio))) { /* Streaming writes cannot be redirtied whilst under writeback, * so discard the streaming record. */ unsigned long long fend; fend = folio_pos(folio) + finfo->dirty_offset + finfo->dirty_len; if (fend > ictx->zero_point) ictx->zero_point = fend; folio_detach_private(folio); group = finfo->netfs_group; gcount++; kfree(finfo); why = netfs_folio_trace_clear_s; goto end_wb; } if ((group = netfs_folio_group(folio))) { if (group == NETFS_FOLIO_COPY_TO_CACHE) { why = netfs_folio_trace_clear_cc; folio_detach_private(folio); goto end_wb; } /* Need to detach the group pointer if the page didn't get * redirtied. If it has been redirtied, then it must be within * the same group. */ why = netfs_folio_trace_redirtied; if (!folio_test_dirty(folio)) { folio_detach_private(folio); gcount++; why = netfs_folio_trace_clear_g; } } end_wb: trace_netfs_folio(folio, why); folio_end_writeback(folio); return gcount; } /* * Unlock any folios we've finished with. */ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq, unsigned int *notes) { struct folio_queue *folioq = wreq->buffer.tail; unsigned long long collected_to = wreq->collected_to; unsigned int slot = wreq->buffer.first_tail_slot; if (WARN_ON_ONCE(!folioq)) { pr_err("[!] Writeback unlock found empty rolling buffer!\n"); netfs_dump_request(wreq); return; } if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) { if (netfs_pgpriv2_unlock_copied_folios(wreq)) *notes |= MADE_PROGRESS; return; } if (slot >= folioq_nr_slots(folioq)) { folioq = rolling_buffer_delete_spent(&wreq->buffer); if (!folioq) return; slot = 0; } for (;;) { struct folio *folio; struct netfs_folio *finfo; unsigned long long fpos, fend; size_t fsize, flen; folio = folioq_folio(folioq, slot); if (WARN_ONCE(!folio_test_writeback(folio), "R=%08x: folio %lx is not under writeback\n", wreq->debug_id, folio->index)) trace_netfs_folio(folio, netfs_folio_trace_not_under_wback); fpos = folio_pos(folio); fsize = folio_size(folio); finfo = netfs_folio_info(folio); flen = finfo ? finfo->dirty_offset + finfo->dirty_len : fsize; fend = min_t(unsigned long long, fpos + flen, wreq->i_size); trace_netfs_collect_folio(wreq, folio, fend, collected_to); /* Unlock any folio we've transferred all of. */ if (collected_to < fend) break; wreq->nr_group_rel += netfs_folio_written_back(folio); wreq->cleaned_to = fpos + fsize; *notes |= MADE_PROGRESS; /* Clean up the head folioq. If we clear an entire folioq, then * we can get rid of it provided it's not also the tail folioq * being filled by the issuer. */ folioq_clear(folioq, slot); slot++; if (slot >= folioq_nr_slots(folioq)) { folioq = rolling_buffer_delete_spent(&wreq->buffer); if (!folioq) goto done; slot = 0; } if (fpos + fsize >= collected_to) break; } wreq->buffer.tail = folioq; done: wreq->buffer.first_tail_slot = slot; } /* * Collect and assess the results of various write subrequests. We may need to * retry some of the results - or even do an RMW cycle for content crypto. * * Note that we have a number of parallel, overlapping lists of subrequests, * one to the server and one to the local cache for example, which may not be * the same size or starting position and may not even correspond in boundary * alignment. */ static void netfs_collect_write_results(struct netfs_io_request *wreq) { struct netfs_io_subrequest *front, *remove; struct netfs_io_stream *stream; unsigned long long collected_to, issued_to; unsigned int notes; int s; _enter("%llx-%llx", wreq->start, wreq->start + wreq->len); trace_netfs_collect(wreq); trace_netfs_rreq(wreq, netfs_rreq_trace_collect); reassess_streams: issued_to = atomic64_read(&wreq->issued_to); smp_rmb(); collected_to = ULLONG_MAX; if (wreq->origin == NETFS_WRITEBACK || wreq->origin == NETFS_WRITETHROUGH || wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) notes = NEED_UNLOCK; else notes = 0; /* Remove completed subrequests from the front of the streams and * advance the completion point on each stream. We stop when we hit * something that's in progress. The issuer thread may be adding stuff * to the tail whilst we're doing this. */ for (s = 0; s < NR_IO_STREAMS; s++) { stream = &wreq->io_streams[s]; /* Read active flag before list pointers */ if (!smp_load_acquire(&stream->active)) continue; front = stream->front; while (front) { trace_netfs_collect_sreq(wreq, front); //_debug("sreq [%x] %llx %zx/%zx", // front->debug_index, front->start, front->transferred, front->len); if (stream->collected_to < front->start) { trace_netfs_collect_gap(wreq, stream, issued_to, 'F'); stream->collected_to = front->start; } /* Stall if the front is still undergoing I/O. */ if (netfs_check_subreq_in_progress(front)) { notes |= HIT_PENDING; break; } smp_rmb(); /* Read counters after I-P flag. */ if (stream->failed) { stream->collected_to = front->start + front->len; notes |= MADE_PROGRESS | SAW_FAILURE; goto cancel; } if (front->start + front->transferred > stream->collected_to) { stream->collected_to = front->start + front->transferred; stream->transferred = stream->collected_to - wreq->start; notes |= MADE_PROGRESS; } if (test_bit(NETFS_SREQ_FAILED, &front->flags)) { stream->failed = true; stream->error = front->error; if (stream->source == NETFS_UPLOAD_TO_SERVER) mapping_set_error(wreq->mapping, front->error); notes |= NEED_REASSESS | SAW_FAILURE; break; } if (front->transferred < front->len) { stream->need_retry = true; notes |= NEED_RETRY | MADE_PROGRESS; break; } cancel: /* Remove if completely consumed. */ spin_lock(&wreq->lock); remove = front; list_del_init(&front->rreq_link); front = list_first_entry_or_null(&stream->subrequests, struct netfs_io_subrequest, rreq_link); stream->front = front; spin_unlock(&wreq->lock); netfs_put_subrequest(remove, notes & SAW_FAILURE ? netfs_sreq_trace_put_cancel : netfs_sreq_trace_put_done); } /* If we have an empty stream, we need to jump it forward * otherwise the collection point will never advance. */ if (!front && issued_to > stream->collected_to) { trace_netfs_collect_gap(wreq, stream, issued_to, 'E'); stream->collected_to = issued_to; } if (stream->collected_to < collected_to) collected_to = stream->collected_to; } if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to) wreq->collected_to = collected_to; for (s = 0; s < NR_IO_STREAMS; s++) { stream = &wreq->io_streams[s]; if (stream->active) trace_netfs_collect_stream(wreq, stream); } trace_netfs_collect_state(wreq, wreq->collected_to, notes); /* Unlock any folios that we have now finished with. */ if (notes & NEED_UNLOCK) { if (wreq->cleaned_to < wreq->collected_to) netfs_writeback_unlock_folios(wreq, &notes); } else { wreq->cleaned_to = wreq->collected_to; } // TODO: Discard encryption buffers if (notes & NEED_RETRY) goto need_retry; if (notes & MADE_PROGRESS) { netfs_wake_rreq_flag(wreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause); //cond_resched(); goto reassess_streams; } if (notes & NEED_REASSESS) { //cond_resched(); goto reassess_streams; } out: netfs_put_group_many(wreq->group, wreq->nr_group_rel); wreq->nr_group_rel = 0; _leave(" = %x", notes); return; need_retry: /* Okay... We're going to have to retry one or both streams. Note * that any partially completed op will have had any wholly transferred * folios removed from it. */ _debug("retry"); netfs_retry_writes(wreq); goto out; } /* * Perform the collection of subrequests, folios and encryption buffers. */ bool netfs_write_collection(struct netfs_io_request *wreq) { struct netfs_inode *ictx = netfs_inode(wreq->inode); size_t transferred; int s; _enter("R=%x", wreq->debug_id); netfs_collect_write_results(wreq); /* We're done when the app thread has finished posting subreqs and all * the queues in all the streams are empty. */ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) return false; smp_rmb(); /* Read ALL_QUEUED before lists. */ transferred = LONG_MAX; for (s = 0; s < NR_IO_STREAMS; s++) { struct netfs_io_stream *stream = &wreq->io_streams[s]; if (!stream->active) continue; if (!list_empty(&stream->subrequests)) return false; if (stream->transferred < transferred) transferred = stream->transferred; } /* Okay, declare that all I/O is complete. */ wreq->transferred = transferred; trace_netfs_rreq(wreq, netfs_rreq_trace_write_done); if (wreq->io_streams[1].active && wreq->io_streams[1].failed && ictx->ops->invalidate_cache) { /* Cache write failure doesn't prevent writeback completion * unless we're in disconnected mode. */ ictx->ops->invalidate_cache(wreq); } if ((wreq->origin == NETFS_UNBUFFERED_WRITE || wreq->origin == NETFS_DIO_WRITE) && !wreq->error) netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred); if (wreq->origin == NETFS_DIO_WRITE && wreq->mapping->nrpages) { /* mmap may have got underfoot and we may now have folios * locally covering the region we just wrote. Attempt to * discard the folios, but leave in place any modified locally. * ->write_iter() is prevented from interfering by the DIO * counter. */ pgoff_t first = wreq->start >> PAGE_SHIFT; pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT; invalidate_inode_pages2_range(wreq->mapping, first, last); } if (wreq->origin == NETFS_DIO_WRITE) inode_dio_end(wreq->inode); _debug("finished"); netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip); /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */ if (wreq->iocb) { size_t written = min(wreq->transferred, wreq->len); wreq->iocb->ki_pos += written; if (wreq->iocb->ki_complete) { trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete); wreq->iocb->ki_complete( wreq->iocb, wreq->error ? wreq->error : written); } wreq->iocb = VFS_PTR_POISON; } netfs_clear_subrequests(wreq); return true; } void netfs_write_collection_worker(struct work_struct *work) { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); netfs_see_request(rreq, netfs_rreq_trace_see_work); if (netfs_check_rreq_in_progress(rreq)) { if (netfs_write_collection(rreq)) /* Drop the ref from the IN_PROGRESS flag. */ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip); else netfs_see_request(rreq, netfs_rreq_trace_see_work_complete); } } /** * netfs_write_subrequest_terminated - Note the termination of a write operation. * @_op: The I/O request that has terminated. * @transferred_or_error: The amount of data transferred or an error code. * * This tells the library that a contributory write I/O operation has * terminated, one way or another, and that it should collect the results. * * The caller indicates in @transferred_or_error the outcome of the operation, * supplying a positive value to indicate the number of bytes transferred or a * negative error code. The library will look after reissuing I/O operations * as appropriate and writing downloaded data to the cache. * * When this is called, ownership of the subrequest is transferred back to the * library, along with a ref. * * Note that %_op is a void* so that the function can be passed to * kiocb::term_func without the need for a casting wrapper. */ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error) { struct netfs_io_subrequest *subreq = _op; struct netfs_io_request *wreq = subreq->rreq; _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); switch (subreq->source) { case NETFS_UPLOAD_TO_SERVER: netfs_stat(&netfs_n_wh_upload_done); break; case NETFS_WRITE_TO_CACHE: netfs_stat(&netfs_n_wh_write_done); break; default: BUG(); } if (IS_ERR_VALUE(transferred_or_error)) { subreq->error = transferred_or_error; if (subreq->error == -EAGAIN) set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); else set_bit(NETFS_SREQ_FAILED, &subreq->flags); trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write); switch (subreq->source) { case NETFS_WRITE_TO_CACHE: netfs_stat(&netfs_n_wh_write_failed); break; case NETFS_UPLOAD_TO_SERVER: netfs_stat(&netfs_n_wh_upload_failed); break; default: break; } trace_netfs_rreq(wreq, netfs_rreq_trace_set_pause); set_bit(NETFS_RREQ_PAUSE, &wreq->flags); } else { if (WARN(transferred_or_error > subreq->len - subreq->transferred, "Subreq excess write: R=%x[%x] %zd > %zu - %zu", wreq->debug_id, subreq->debug_index, transferred_or_error, subreq->len, subreq->transferred)) transferred_or_error = subreq->len - subreq->transferred; subreq->error = 0; subreq->transferred += transferred_or_error; if (subreq->transferred < subreq->len) set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); } trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); netfs_subreq_clear_in_progress(subreq); netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated); } EXPORT_SYMBOL(netfs_write_subrequest_terminated);
19 3 16 1 1 1 1 1 1 1 1 1 10 10 19 19 2 17 19 45 1 45 16 3 16 1 19 2 2 2 2 2 2 7 7 7 7 12 13 13 20 20 1 18 1 1 1 18 18 18 1 10 8 9 10 22 22 12 12 12 1 9 2 1 9 10 7 3 10 10 10 10 2 8 8 8 1 7 7 22 13 7 2 22 3 18 7 15 12 10 12 4 10 8 6 12 15 7 23 22 1 1 1 1 1 1 1 23 21 23 22 21 2 21 2 1 22 17 7 22 1 23 54 22 51 23 23 23 8 15 9 6 4 46 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/fileattr.h> #include <linux/splice.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/uaccess.h> #include <linux/sched/signal.h> #include <linux/cred.h> #include <linux/namei.h> #include <linux/ratelimit.h> #include <linux/exportfs.h> #include "overlayfs.h" #define OVL_COPY_UP_CHUNK_SIZE (1 << 20) static int ovl_ccup_set(const char *buf, const struct kernel_param *param) { pr_warn("\"check_copy_up\" module option is obsolete\n"); return 0; } static int ovl_ccup_get(char *buf, const struct kernel_param *param) { return sprintf(buf, "N\n"); } module_param_call(check_copy_up, ovl_ccup_set, ovl_ccup_get, NULL, 0644); MODULE_PARM_DESC(check_copy_up, "Obsolete; does nothing"); static bool ovl_must_copy_xattr(const char *name) { return !strcmp(name, XATTR_POSIX_ACL_ACCESS) || !strcmp(name, XATTR_POSIX_ACL_DEFAULT) || !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); } static int ovl_copy_acl(struct ovl_fs *ofs, const struct path *path, struct dentry *dentry, const char *acl_name) { int err; struct posix_acl *clone, *real_acl = NULL; real_acl = ovl_get_acl_path(path, acl_name, false); if (!real_acl) return 0; if (IS_ERR(real_acl)) { err = PTR_ERR(real_acl); if (err == -ENODATA || err == -EOPNOTSUPP) return 0; return err; } clone = posix_acl_clone(real_acl, GFP_KERNEL); posix_acl_release(real_acl); /* release original acl */ if (!clone) return -ENOMEM; err = ovl_do_set_acl(ofs, dentry, acl_name, clone); /* release cloned acl */ posix_acl_release(clone); return err; } int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct dentry *new) { struct dentry *old = oldpath->dentry; ssize_t list_size, size, value_size = 0; char *buf, *name, *value = NULL; int error = 0; size_t slen; if (!old->d_inode->i_op->listxattr || !new->d_inode->i_op->listxattr) return 0; list_size = vfs_listxattr(old, NULL, 0); if (list_size <= 0) { if (list_size == -EOPNOTSUPP) return 0; return list_size; } buf = kvzalloc(list_size, GFP_KERNEL); if (!buf) return -ENOMEM; list_size = vfs_listxattr(old, buf, list_size); if (list_size <= 0) { error = list_size; goto out; } for (name = buf; list_size; name += slen) { slen = strnlen(name, list_size) + 1; /* underlying fs providing us with an broken xattr list? */ if (WARN_ON(slen > list_size)) { error = -EIO; break; } list_size -= slen; if (ovl_is_private_xattr(sb, name)) continue; error = security_inode_copy_up_xattr(old, name); if (error == -ECANCELED) { error = 0; continue; /* Discard */ } if (error < 0 && error != -EOPNOTSUPP) break; if (is_posix_acl_xattr(name)) { error = ovl_copy_acl(OVL_FS(sb), oldpath, new, name); if (!error) continue; /* POSIX ACLs must be copied. */ break; } retry: size = ovl_do_getxattr(oldpath, name, value, value_size); if (size == -ERANGE) size = ovl_do_getxattr(oldpath, name, NULL, 0); if (size < 0) { error = size; break; } if (size > value_size) { void *new; new = kvmalloc(size, GFP_KERNEL); if (!new) { error = -ENOMEM; break; } kvfree(value); value = new; value_size = size; goto retry; } error = ovl_do_setxattr(OVL_FS(sb), new, name, value, size, 0); if (error) { if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name)) break; /* Ignore failure to copy unknown xattrs */ error = 0; } } kvfree(value); out: kvfree(buf); return error; } static int ovl_copy_fileattr(struct inode *inode, const struct path *old, const struct path *new) { struct file_kattr oldfa = { .flags_valid = true }; struct file_kattr newfa = { .flags_valid = true }; int err; err = ovl_real_fileattr_get(old, &oldfa); if (err) { /* Ntfs-3g returns -EINVAL for "no fileattr support" */ if (err == -EOPNOTSUPP || err == -EINVAL) return 0; pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n", old->dentry, err); return err; } /* * We cannot set immutable and append-only flags on upper inode, * because we would not be able to link upper inode to upper dir * not set overlay private xattr on upper inode. * Store these flags in overlay.protattr xattr instead. */ if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) { err = ovl_set_protattr(inode, new->dentry, &oldfa); if (err == -EPERM) pr_warn_once("copying fileattr: no xattr on upper\n"); else if (err) return err; } /* Don't bother copying flags if none are set */ if (!(oldfa.flags & OVL_COPY_FS_FLAGS_MASK)) return 0; err = ovl_real_fileattr_get(new, &newfa); if (err) { /* * Returning an error if upper doesn't support fileattr will * result in a regression, so revert to the old behavior. */ if (err == -ENOTTY || err == -EINVAL) { pr_warn_once("copying fileattr: no support on upper\n"); return 0; } pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n", new->dentry, err); return err; } BUILD_BUG_ON(OVL_COPY_FS_FLAGS_MASK & ~FS_COMMON_FL); newfa.flags &= ~OVL_COPY_FS_FLAGS_MASK; newfa.flags |= (oldfa.flags & OVL_COPY_FS_FLAGS_MASK); BUILD_BUG_ON(OVL_COPY_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON); newfa.fsx_xflags &= ~OVL_COPY_FSX_FLAGS_MASK; newfa.fsx_xflags |= (oldfa.fsx_xflags & OVL_COPY_FSX_FLAGS_MASK); return ovl_real_fileattr_set(new, &newfa); } static int ovl_verify_area(loff_t pos, loff_t pos2, loff_t len, loff_t totlen) { loff_t tmp; if (pos != pos2) return -EIO; if (pos < 0 || len < 0 || totlen < 0) return -EIO; if (check_add_overflow(pos, len, &tmp)) return -EIO; return 0; } static int ovl_sync_file(struct path *path) { struct file *new_file; int err; new_file = ovl_path_open(path, O_LARGEFILE | O_RDONLY); if (IS_ERR(new_file)) return PTR_ERR(new_file); err = vfs_fsync(new_file, 0); fput(new_file); return err; } static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry, struct file *new_file, loff_t len, bool datasync) { struct path datapath; struct file *old_file; loff_t old_pos = 0; loff_t new_pos = 0; loff_t cloned; loff_t data_pos = -1; loff_t hole_len; bool skip_hole = false; int error = 0; ovl_path_lowerdata(dentry, &datapath); if (WARN_ON_ONCE(datapath.dentry == NULL) || WARN_ON_ONCE(len < 0)) return -EIO; old_file = ovl_path_open(&datapath, O_LARGEFILE | O_RDONLY); if (IS_ERR(old_file)) return PTR_ERR(old_file); /* Try to use clone_file_range to clone up within the same fs */ cloned = vfs_clone_file_range(old_file, 0, new_file, 0, len, 0); if (cloned == len) goto out_fput; /* Couldn't clone, so now we try to copy the data */ error = rw_verify_area(READ, old_file, &old_pos, len); if (!error) error = rw_verify_area(WRITE, new_file, &new_pos, len); if (error) goto out_fput; /* Check if lower fs supports seek operation */ if (old_file->f_mode & FMODE_LSEEK) skip_hole = true; while (len) { size_t this_len = OVL_COPY_UP_CHUNK_SIZE; ssize_t bytes; if (len < this_len) this_len = len; if (signal_pending_state(TASK_KILLABLE, current)) { error = -EINTR; break; } /* * Fill zero for hole will cost unnecessary disk space * and meanwhile slow down the copy-up speed, so we do * an optimization for hole during copy-up, it relies * on SEEK_DATA implementation in lower fs so if lower * fs does not support it, copy-up will behave as before. * * Detail logic of hole detection as below: * When we detect next data position is larger than current * position we will skip that hole, otherwise we copy * data in the size of OVL_COPY_UP_CHUNK_SIZE. Actually, * it may not recognize all kind of holes and sometimes * only skips partial of hole area. However, it will be * enough for most of the use cases. * * We do not hold upper sb_writers throughout the loop to avert * lockdep warning with llseek of lower file in nested overlay: * - upper sb_writers * -- lower ovl_inode_lock (ovl_llseek) */ if (skip_hole && data_pos < old_pos) { data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA); if (data_pos > old_pos) { hole_len = data_pos - old_pos; len -= hole_len; old_pos = new_pos = data_pos; continue; } else if (data_pos == -ENXIO) { break; } else if (data_pos < 0) { skip_hole = false; } } error = ovl_verify_area(old_pos, new_pos, this_len, len); if (error) break; bytes = do_splice_direct(old_file, &old_pos, new_file, &new_pos, this_len, SPLICE_F_MOVE); if (bytes <= 0) { error = bytes; break; } WARN_ON(old_pos != new_pos); len -= bytes; } /* call fsync once, either now or later along with metadata */ if (!error && ovl_should_sync(ofs) && datasync) error = vfs_fsync(new_file, 0); out_fput: fput(old_file); return error; } static int ovl_set_size(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_SIZE, .ia_size = stat->size, }; return ovl_do_notify_change(ofs, upperdentry, &attr); } static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { struct iattr attr = { .ia_valid = ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME, .ia_atime = stat->atime, .ia_mtime = stat->mtime, }; return ovl_do_notify_change(ofs, upperdentry, &attr); } int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upperdentry, struct kstat *stat) { int err = 0; if (!S_ISLNK(stat->mode)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = stat->mode, }; err = ovl_do_notify_change(ofs, upperdentry, &attr); } if (!err) { struct iattr attr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_vfsuid = VFSUIDT_INIT(stat->uid), .ia_vfsgid = VFSGIDT_INIT(stat->gid), }; err = ovl_do_notify_change(ofs, upperdentry, &attr); } if (!err) ovl_set_timestamps(ofs, upperdentry, stat); return err; } struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct inode *realinode, bool is_upper) { struct ovl_fh *fh; int fh_type, dwords; int buflen = MAX_HANDLE_SZ; uuid_t *uuid = &realinode->i_sb->s_uuid; int err; /* Make sure the real fid stays 32bit aligned */ BUILD_BUG_ON(OVL_FH_FID_OFFSET % 4); BUILD_BUG_ON(MAX_HANDLE_SZ + OVL_FH_FID_OFFSET > 255); fh = kzalloc(buflen + OVL_FH_FID_OFFSET, GFP_KERNEL); if (!fh) return ERR_PTR(-ENOMEM); /* * We encode a non-connectable file handle for non-dir, because we * only need to find the lower inode number and we don't want to pay * the price or reconnecting the dentry. */ dwords = buflen >> 2; fh_type = exportfs_encode_inode_fh(realinode, (void *)fh->fb.fid, &dwords, NULL, 0); buflen = (dwords << 2); err = -EIO; if (fh_type < 0 || fh_type == FILEID_INVALID || WARN_ON(buflen > MAX_HANDLE_SZ)) goto out_err; fh->fb.version = OVL_FH_VERSION; fh->fb.magic = OVL_FH_MAGIC; fh->fb.type = fh_type; fh->fb.flags = OVL_FH_FLAG_CPU_ENDIAN; /* * When we will want to decode an overlay dentry from this handle * and all layers are on the same fs, if we get a disconncted real * dentry when we decode fid, the only way to tell if we should assign * it to upperdentry or to lowerstack is by checking this flag. */ if (is_upper) fh->fb.flags |= OVL_FH_FLAG_PATH_UPPER; fh->fb.len = sizeof(fh->fb) + buflen; if (ovl_origin_uuid(ofs)) fh->fb.uuid = *uuid; return fh; out_err: kfree(fh); return ERR_PTR(err); } struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin) { /* * When lower layer doesn't support export operations store a 'null' fh, * so we can use the overlay.origin xattr to distignuish between a copy * up and a pure upper inode. */ if (!ovl_can_decode_fh(origin->d_sb)) return NULL; return ovl_encode_real_fh(ofs, d_inode(origin), false); } int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh, struct dentry *upper) { int err; /* * Do not fail when upper doesn't support xattrs. */ err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf, fh ? fh->fb.len : 0, 0); /* Ignore -EPERM from setting "user.*" on symlink/special */ return err == -EPERM ? 0 : err; } /* Store file handle of @upper dir in @index dir entry */ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper, struct dentry *index) { const struct ovl_fh *fh; int err; fh = ovl_encode_real_fh(ofs, d_inode(upper), true); if (IS_ERR(fh)) return PTR_ERR(fh); err = ovl_setxattr(ofs, index, OVL_XATTR_UPPER, fh->buf, fh->fb.len); kfree(fh); return err; } /* * Create and install index entry. */ static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh, struct dentry *upper) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *indexdir = ovl_indexdir(dentry->d_sb); struct dentry *index = NULL; struct dentry *temp = NULL; struct qstr name = { }; int err; /* * For now this is only used for creating index entry for directories, * because non-dir are copied up directly to index and then hardlinked * to upper dir. * * TODO: implement create index for non-dir, so we can call it when * encoding file handle for non-dir in case index does not exist. */ if (WARN_ON(!d_is_dir(dentry))) return -EIO; /* Directory not expected to be indexed before copy up */ if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry)))) return -EIO; err = ovl_get_index_name_fh(fh, &name); if (err) return err; temp = ovl_create_temp(ofs, indexdir, OVL_CATTR(S_IFDIR | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) goto free_name; err = ovl_set_upper_fh(ofs, upper, temp); if (err) goto out; err = ovl_parent_lock(indexdir, temp); if (err) goto out; index = ovl_lookup_upper(ofs, name.name, indexdir, name.len); if (IS_ERR(index)) { err = PTR_ERR(index); } else { err = ovl_do_rename(ofs, indexdir, temp, indexdir, index, 0); dput(index); } ovl_parent_unlock(indexdir); out: if (err) ovl_cleanup(ofs, indexdir, temp); dput(temp); free_name: kfree(name.name); return err; } struct ovl_copy_up_ctx { struct dentry *parent; struct dentry *dentry; struct path lowerpath; struct kstat stat; struct kstat pstat; const char *link; struct dentry *destdir; struct qstr destname; struct dentry *workdir; const struct ovl_fh *origin_fh; bool origin; bool indexed; bool metacopy; bool metacopy_digest; bool metadata_fsync; }; static int ovl_link_up(struct ovl_copy_up_ctx *c) { int err; struct dentry *upper; struct dentry *upperdir = ovl_dentry_upper(c->parent); struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *udir = d_inode(upperdir); ovl_start_write(c->dentry); /* Mark parent "impure" because it may now contain non-pure upper */ err = ovl_set_impure(c->parent, upperdir); if (err) goto out; err = ovl_set_nlink_lower(c->dentry); if (err) goto out; inode_lock_nested(udir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir, c->dentry->d_name.len); err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper); if (!err) { /* Restore timestamps on parent (best effort) */ ovl_set_timestamps(ofs, upperdir, &c->pstat); ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, upper); } dput(upper); } inode_unlock(udir); if (err) goto out; err = ovl_set_nlink_upper(c->dentry); out: ovl_end_write(c->dentry); return err; } static int ovl_copy_up_data(struct ovl_copy_up_ctx *c, const struct path *temp) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct file *new_file; int err; if (!S_ISREG(c->stat.mode) || c->metacopy || !c->stat.size) return 0; new_file = ovl_path_open(temp, O_LARGEFILE | O_WRONLY); if (IS_ERR(new_file)) return PTR_ERR(new_file); err = ovl_copy_up_file(ofs, c->dentry, new_file, c->stat.size, !c->metadata_fsync); fput(new_file); return err; } static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode = d_inode(c->dentry); struct path upperpath = { .mnt = ovl_upper_mnt(ofs), .dentry = temp }; int err; err = ovl_copy_xattr(c->dentry->d_sb, &c->lowerpath, temp); if (err) return err; if (inode->i_flags & OVL_COPY_I_FLAGS_MASK && (S_ISREG(c->stat.mode) || S_ISDIR(c->stat.mode))) { /* * Copy the fileattr inode flags that are the source of already * copied i_flags */ err = ovl_copy_fileattr(inode, &c->lowerpath, &upperpath); if (err) return err; } /* * Store identifier of lower inode in upper inode xattr to * allow lookup of the copy up origin inode. * * Don't set origin when we are breaking the association with a lower * hard link. */ if (c->origin) { err = ovl_set_origin_fh(ofs, c->origin_fh, temp); if (err) return err; } if (c->metacopy) { struct path lowerdatapath; struct ovl_metacopy metacopy_data = OVL_METACOPY_INIT; ovl_path_lowerdata(c->dentry, &lowerdatapath); if (WARN_ON_ONCE(lowerdatapath.dentry == NULL)) return -EIO; err = ovl_get_verity_digest(ofs, &lowerdatapath, &metacopy_data); if (err) return err; if (metacopy_data.digest_algo) c->metacopy_digest = true; err = ovl_set_metacopy_xattr(ofs, temp, &metacopy_data); if (err) return err; } inode_lock(temp->d_inode); if (S_ISREG(c->stat.mode)) err = ovl_set_size(ofs, temp, &c->stat); if (!err) err = ovl_set_attr(ofs, temp, &c->stat); inode_unlock(temp->d_inode); /* fsync metadata before moving it into upper dir */ if (!err && ovl_should_sync(ofs) && c->metadata_fsync) err = ovl_sync_file(&upperpath); return err; } struct ovl_cu_creds { const struct cred *old; struct cred *new; }; static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc) { int err; cc->old = cc->new = NULL; err = security_inode_copy_up(dentry, &cc->new); if (err < 0) return err; if (cc->new) cc->old = override_creds(cc->new); return 0; } static void ovl_revert_cu_creds(struct ovl_cu_creds *cc) { if (cc->new) { revert_creds(cc->old); put_cred(cc->new); } } /* * Copyup using workdir to prepare temp file. Used when copying up directories, * special files or when upper fs doesn't support O_TMPFILE. */ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *inode; struct path path = { .mnt = ovl_upper_mnt(ofs) }; struct dentry *temp, *upper, *trap; struct ovl_cu_creds cc; int err; struct ovl_cattr cattr = { /* Can't properly set mode on creation because of the umask */ .mode = c->stat.mode & S_IFMT, .rdev = c->stat.rdev, .link = c->link }; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) return err; ovl_start_write(c->dentry); temp = ovl_create_temp(ofs, c->workdir, &cattr); ovl_end_write(c->dentry); ovl_revert_cu_creds(&cc); if (IS_ERR(temp)) return PTR_ERR(temp); /* * Copy up data first and then xattrs. Writing data after * xattrs will remove security.capability xattr automatically. */ path.dentry = temp; err = ovl_copy_up_data(c, &path); ovl_start_write(c->dentry); if (err) goto cleanup_unlocked; if (S_ISDIR(c->stat.mode) && c->indexed) { err = ovl_create_index(c->dentry, c->origin_fh, temp); if (err) goto cleanup_unlocked; } /* * We cannot hold lock_rename() throughout this helper, because of * lock ordering with sb_writers, which shouldn't be held when calling * ovl_copy_up_data(), so lock workdir and destdir and make sure that * temp wasn't moved before copy up completion or cleanup. */ trap = lock_rename(c->workdir, c->destdir); if (trap || temp->d_parent != c->workdir) { /* temp or workdir moved underneath us? abort without cleanup */ dput(temp); err = -EIO; if (!IS_ERR(trap)) unlock_rename(c->workdir, c->destdir); goto out; } err = ovl_copy_up_metadata(c, temp); if (err) goto cleanup; upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto cleanup; err = ovl_do_rename(ofs, c->workdir, temp, c->destdir, upper, 0); unlock_rename(c->workdir, c->destdir); dput(upper); if (err) goto cleanup_unlocked; inode = d_inode(c->dentry); if (c->metacopy_digest) ovl_set_flag(OVL_HAS_DIGEST, inode); else ovl_clear_flag(OVL_HAS_DIGEST, inode); ovl_clear_flag(OVL_VERIFIED_DIGEST, inode); if (!c->metacopy) ovl_set_upperdata(inode); ovl_inode_update(inode, temp); if (S_ISDIR(inode->i_mode)) ovl_set_flag(OVL_WHITEOUTS, inode); out: ovl_end_write(c->dentry); return err; cleanup: unlock_rename(c->workdir, c->destdir); cleanup_unlocked: ovl_cleanup(ofs, c->workdir, temp); dput(temp); goto out; } /* Copyup using O_TMPFILE which does not require cross dir locking */ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct inode *udir = d_inode(c->destdir); struct dentry *temp, *upper; struct file *tmpfile; struct ovl_cu_creds cc; int err; err = ovl_prep_cu_creds(c->dentry, &cc); if (err) return err; ovl_start_write(c->dentry); tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode); ovl_end_write(c->dentry); ovl_revert_cu_creds(&cc); if (IS_ERR(tmpfile)) return PTR_ERR(tmpfile); temp = tmpfile->f_path.dentry; if (!c->metacopy && c->stat.size) { err = ovl_copy_up_file(ofs, c->dentry, tmpfile, c->stat.size, !c->metadata_fsync); if (err) goto out_fput; } ovl_start_write(c->dentry); err = ovl_copy_up_metadata(c, temp); if (err) goto out; inode_lock_nested(udir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, c->destname.name, c->destdir, c->destname.len); err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, temp, udir, upper); dput(upper); } inode_unlock(udir); if (err) goto out; if (c->metacopy_digest) ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); else ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry)); if (!c->metacopy) ovl_set_upperdata(d_inode(c->dentry)); ovl_inode_update(d_inode(c->dentry), dget(temp)); out: ovl_end_write(c->dentry); out_fput: fput(tmpfile); return err; } /* * Copy up a single dentry * * All renames start with copy up of source if necessary. The actual * rename will only proceed once the copy up was successful. Copy up uses * upper parent i_mutex for exclusion. Since rename can change d_parent it * is possible that the copy up will lock the old parent. At that point * the file will have already been copied up anyway. */ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) { int err; struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct dentry *origin = c->lowerpath.dentry; struct ovl_fh *fh = NULL; bool to_index = false; /* * Indexed non-dir is copied up directly to the index entry and then * hardlinked to upper dir. Indexed dir is copied up to indexdir, * then index entry is created and then copied up dir installed. * Copying dir up to indexdir instead of workdir simplifies locking. */ if (ovl_need_index(c->dentry)) { c->indexed = true; if (S_ISDIR(c->stat.mode)) c->workdir = ovl_indexdir(c->dentry->d_sb); else to_index = true; } if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) { fh = ovl_get_origin_fh(ofs, origin); if (IS_ERR(fh)) return PTR_ERR(fh); /* origin_fh may be NULL */ c->origin_fh = fh; c->origin = true; } if (to_index) { c->destdir = ovl_indexdir(c->dentry->d_sb); err = ovl_get_index_name(ofs, origin, &c->destname); if (err) goto out_free_fh; } else if (WARN_ON(!c->parent)) { /* Disconnected dentry must be copied up to index dir */ err = -EIO; goto out_free_fh; } else { /* * c->dentry->d_name is stabilzed by ovl_copy_up_start(), * because if we got here, it means that c->dentry has no upper * alias and changing ->d_name means going through ovl_rename() * that will call ovl_copy_up() on source and target dentry. */ c->destname = c->dentry->d_name; /* * Mark parent "impure" because it may now contain non-pure * upper */ ovl_start_write(c->dentry); err = ovl_set_impure(c->parent, c->destdir); ovl_end_write(c->dentry); if (err) goto out_free_fh; } /* Should we copyup with O_TMPFILE or with workdir? */ if (S_ISREG(c->stat.mode) && ofs->tmpfile) err = ovl_copy_up_tmpfile(c); else err = ovl_copy_up_workdir(c); if (err) goto out; if (c->indexed) ovl_set_flag(OVL_INDEX, d_inode(c->dentry)); ovl_start_write(c->dentry); if (to_index) { /* Initialize nlink for copy up of disconnected dentry */ err = ovl_set_nlink_upper(c->dentry); } else { struct inode *udir = d_inode(c->destdir); /* Restore timestamps on parent (best effort) */ inode_lock(udir); ovl_set_timestamps(ofs, c->destdir, &c->pstat); inode_unlock(udir); ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry)); } ovl_end_write(c->dentry); out: if (to_index) kfree(c->destname.name); out_free_fh: kfree(fh); return err; } static bool ovl_need_meta_copy_up(struct dentry *dentry, umode_t mode, int flags) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); if (!ofs->config.metacopy) return false; if (!S_ISREG(mode)) return false; if (flags && ((OPEN_FMODE(flags) & FMODE_WRITE) || (flags & O_TRUNC))) return false; /* Fall back to full copy if no fsverity on source data and we require verity */ if (ofs->config.verity_mode == OVL_VERITY_REQUIRE) { struct path lowerdata; ovl_path_lowerdata(dentry, &lowerdata); if (WARN_ON_ONCE(lowerdata.dentry == NULL) || ovl_ensure_verity_loaded(&lowerdata) || !fsverity_active(d_inode(lowerdata.dentry))) { return false; } } return true; } static ssize_t ovl_getxattr_value(const struct path *path, char *name, char **value) { ssize_t res; char *buf; res = ovl_do_getxattr(path, name, NULL, 0); if (res == -ENODATA || res == -EOPNOTSUPP) res = 0; if (res > 0) { buf = kzalloc(res, GFP_KERNEL); if (!buf) return -ENOMEM; res = ovl_do_getxattr(path, name, buf, res); if (res < 0) kfree(buf); else *value = buf; } return res; } /* Copy up data of an inode which was copied up metadata only in the past. */ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c) { struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb); struct path upperpath; int err; char *capability = NULL; ssize_t cap_size; ovl_path_upper(c->dentry, &upperpath); if (WARN_ON(upperpath.dentry == NULL)) return -EIO; if (c->stat.size) { err = cap_size = ovl_getxattr_value(&upperpath, XATTR_NAME_CAPS, &capability); if (cap_size < 0) goto out; } err = ovl_copy_up_data(c, &upperpath); if (err) goto out_free; /* * Writing to upper file will clear security.capability xattr. We * don't want that to happen for normal copy-up operation. */ ovl_start_write(c->dentry); if (capability) { err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS, capability, cap_size, 0); } if (!err) { err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY); } ovl_end_write(c->dentry); if (err) goto out_free; ovl_clear_flag(OVL_HAS_DIGEST, d_inode(c->dentry)); ovl_clear_flag(OVL_VERIFIED_DIGEST, d_inode(c->dentry)); ovl_set_upperdata(d_inode(c->dentry)); out_free: kfree(capability); out: return err; } static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, int flags) { int err; DEFINE_DELAYED_CALL(done); struct path parentpath; struct ovl_copy_up_ctx ctx = { .parent = parent, .dentry = dentry, .workdir = ovl_workdir(dentry), }; if (WARN_ON(!ctx.workdir)) return -EROFS; ovl_path_lower(dentry, &ctx.lowerpath); err = vfs_getattr(&ctx.lowerpath, &ctx.stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) return err; if (!kuid_has_mapping(current_user_ns(), ctx.stat.uid) || !kgid_has_mapping(current_user_ns(), ctx.stat.gid)) return -EOVERFLOW; /* * With metacopy disabled, we fsync after final metadata copyup, for * both regular files and directories to get atomic copyup semantics * on filesystems that do not use strict metadata ordering (e.g. ubifs). * * With metacopy enabled we want to avoid fsync on all meta copyup * that will hurt performance of workloads such as chown -R, so we * only fsync on data copyup as legacy behavior. */ ctx.metadata_fsync = !OVL_FS(dentry->d_sb)->config.metacopy && (S_ISREG(ctx.stat.mode) || S_ISDIR(ctx.stat.mode)); ctx.metacopy = ovl_need_meta_copy_up(dentry, ctx.stat.mode, flags); if (parent) { ovl_path_upper(parent, &parentpath); ctx.destdir = parentpath.dentry; err = vfs_getattr(&parentpath, &ctx.pstat, STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); if (err) return err; } /* maybe truncate regular file. this has no effect on dirs */ if (flags & O_TRUNC) ctx.stat.size = 0; if (S_ISLNK(ctx.stat.mode)) { ctx.link = vfs_get_link(ctx.lowerpath.dentry, &done); if (IS_ERR(ctx.link)) return PTR_ERR(ctx.link); } err = ovl_copy_up_start(dentry, flags); /* err < 0: interrupted, err > 0: raced with another copy-up */ if (unlikely(err)) { if (err > 0) err = 0; } else { if (!ovl_dentry_upper(dentry)) err = ovl_do_copy_up(&ctx); if (!err && parent && !ovl_dentry_has_upper_alias(dentry)) err = ovl_link_up(&ctx); if (!err && ovl_dentry_needs_data_copy_up_locked(dentry, flags)) err = ovl_copy_up_meta_inode_data(&ctx); ovl_copy_up_end(dentry); } do_delayed_call(&done); return err; } static int ovl_copy_up_flags(struct dentry *dentry, int flags) { int err = 0; const struct cred *old_cred; bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED); /* * With NFS export, copy up can get called for a disconnected non-dir. * In this case, we will copy up lower inode to index dir without * linking it to upper dir. */ if (WARN_ON(disconnected && d_is_dir(dentry))) return -EIO; /* * We may not need lowerdata if we are only doing metacopy up, but it is * not very important to optimize this case, so do lazy lowerdata lookup * before any copy up, so we can do it before taking ovl_inode_lock(). */ err = ovl_verify_lowerdata(dentry); if (err) return err; old_cred = ovl_override_creds(dentry->d_sb); while (!err) { struct dentry *next; struct dentry *parent = NULL; if (ovl_already_copied_up(dentry, flags)) break; next = dget(dentry); /* find the topmost dentry not yet copied up */ for (; !disconnected;) { parent = dget_parent(next); if (ovl_dentry_upper(parent)) break; dput(next); next = parent; } err = ovl_copy_up_one(parent, next, flags); dput(parent); dput(next); } ovl_revert_creds(old_cred); return err; } static bool ovl_open_need_copy_up(struct dentry *dentry, int flags) { /* Copy up of disconnected dentry does not set upper alias */ if (ovl_already_copied_up(dentry, flags)) return false; if (special_file(d_inode(dentry)->i_mode)) return false; if (!ovl_open_flags_need_copy_up(flags)) return false; return true; } int ovl_maybe_copy_up(struct dentry *dentry, int flags) { if (!ovl_open_need_copy_up(dentry, flags)) return 0; return ovl_copy_up_flags(dentry, flags); } int ovl_copy_up_with_data(struct dentry *dentry) { return ovl_copy_up_flags(dentry, O_WRONLY); } int ovl_copy_up(struct dentry *dentry) { return ovl_copy_up_flags(dentry, 0); }
87 22 1 7 79 87 4 1 1 2 4 3 91 6 1 4 1 1 3 3 5 95 1 29 93 88 93 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 // SPDX-License-Identifier: GPL-2.0 #include <linux/proc_fs.h> #include <linux/nsproxy.h> #include <linux/ptrace.h> #include <linux/namei.h> #include <linux/file.h> #include <linux/utsname.h> #include <net/net_namespace.h> #include <linux/ipc_namespace.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include "internal.h" static const struct proc_ns_operations *ns_entries[] = { #ifdef CONFIG_NET_NS &netns_operations, #endif #ifdef CONFIG_UTS_NS &utsns_operations, #endif #ifdef CONFIG_IPC_NS &ipcns_operations, #endif #ifdef CONFIG_PID_NS &pidns_operations, &pidns_for_children_operations, #endif #ifdef CONFIG_USER_NS &userns_operations, #endif &mntns_operations, #ifdef CONFIG_CGROUPS &cgroupns_operations, #endif #ifdef CONFIG_TIME_NS &timens_operations, &timens_for_children_operations, #endif }; static const char *proc_ns_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops; struct task_struct *task; struct path ns_path; int error = -EACCES; if (!dentry) return ERR_PTR(-ECHILD); task = get_proc_task(inode); if (!task) return ERR_PTR(-EACCES); if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) goto out; error = ns_get_path(&ns_path, task, ns_ops); if (error) goto out; error = nd_jump_link(&ns_path); out: put_task_struct(task); return ERR_PTR(error); } static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct inode *inode = d_inode(dentry); const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops; struct task_struct *task; char name[50]; int res = -EACCES; task = get_proc_task(inode); if (!task) return res; if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { res = ns_get_name(name, sizeof(name), task, ns_ops); if (res >= 0) res = readlink_copy(buffer, buflen, name, strlen(name)); } put_task_struct(task); return res; } static const struct inode_operations proc_ns_link_inode_operations = { .readlink = proc_ns_readlink, .get_link = proc_ns_get_link, .setattr = proc_setattr, }; static struct dentry *proc_ns_instantiate(struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct proc_ns_operations *ns_ops = ptr; struct inode *inode; struct proc_inode *ei; inode = proc_pid_make_inode(dentry->d_sb, task, S_IFLNK | S_IRWXUGO); if (!inode) return ERR_PTR(-ENOENT); ei = PROC_I(inode); inode->i_op = &proc_ns_link_inode_operations; ei->ns_ops = ns_ops; pid_update_inode(task, inode); return d_splice_alias_ops(inode, dentry, &pid_dentry_operations); } static int proc_ns_dir_readdir(struct file *file, struct dir_context *ctx) { struct task_struct *task = get_proc_task(file_inode(file)); const struct proc_ns_operations **entry, **last; if (!task) return -ENOENT; if (!dir_emit_dots(file, ctx)) goto out; if (ctx->pos >= 2 + ARRAY_SIZE(ns_entries)) goto out; entry = ns_entries + (ctx->pos - 2); last = &ns_entries[ARRAY_SIZE(ns_entries) - 1]; while (entry <= last) { const struct proc_ns_operations *ops = *entry; if (!proc_fill_cache(file, ctx, ops->name, strlen(ops->name), proc_ns_instantiate, task, ops)) break; ctx->pos++; entry++; } out: put_task_struct(task); return 0; } const struct file_operations proc_ns_dir_operations = { .read = generic_read_dir, .iterate_shared = proc_ns_dir_readdir, .llseek = generic_file_llseek, }; static struct dentry *proc_ns_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct task_struct *task = get_proc_task(dir); const struct proc_ns_operations **entry, **last; unsigned int len = dentry->d_name.len; struct dentry *res = ERR_PTR(-ENOENT); if (!task) goto out_no_task; last = &ns_entries[ARRAY_SIZE(ns_entries)]; for (entry = ns_entries; entry < last; entry++) { if (strlen((*entry)->name) != len) continue; if (!memcmp(dentry->d_name.name, (*entry)->name, len)) break; } if (entry == last) goto out; res = proc_ns_instantiate(dentry, task, *entry); out: put_task_struct(task); out_no_task: return res; } const struct inode_operations proc_ns_dir_inode_operations = { .lookup = proc_ns_dir_lookup, .getattr = pid_getattr, .setattr = proc_setattr, };
12 12 11 12 33 35 35 13 13 14 10 10 9 4 4 4 4 4 4 14 40 7 36 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include "peer.h" #include "device.h" #include "queueing.h" #include "timers.h" #include "peerlookup.h" #include "noise.h" #include <linux/kref.h> #include <linux/lockdep.h> #include <linux/rcupdate.h> #include <linux/list.h> static struct kmem_cache *peer_cache; static atomic64_t peer_counter = ATOMIC64_INIT(0); struct wg_peer *wg_peer_create(struct wg_device *wg, const u8 public_key[NOISE_PUBLIC_KEY_LEN], const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) { struct wg_peer *peer; int ret = -ENOMEM; lockdep_assert_held(&wg->device_update_lock); if (wg->num_peers >= MAX_PEERS_PER_DEVICE) return ERR_PTR(ret); peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); if (unlikely(!peer)) return ERR_PTR(ret); if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) goto err; peer->device = wg; wg_noise_handshake_init(&peer->handshake, &wg->static_identity, public_key, preshared_key, peer); peer->internal_id = atomic64_inc_return(&peer_counter); peer->serial_work_cpu = nr_cpumask_bits; wg_cookie_init(&peer->latest_cookie); wg_timers_init(peer); wg_cookie_checker_precompute_peer_keys(peer); spin_lock_init(&peer->keypairs.keypair_update_lock); INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); wg_prev_queue_init(&peer->tx_queue); wg_prev_queue_init(&peer->rx_queue); rwlock_init(&peer->endpoint_lock); kref_init(&peer->refcount); skb_queue_head_init(&peer->staged_packet_queue); wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll); napi_enable(&peer->napi); list_add_tail(&peer->peer_list, &wg->peer_list); INIT_LIST_HEAD(&peer->allowedips_list); wg_pubkey_hashtable_add(wg->peer_hashtable, peer); ++wg->num_peers; pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); return peer; err: kmem_cache_free(peer_cache, peer); return ERR_PTR(ret); } struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) { RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), "Taking peer reference without holding the RCU read lock"); if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) return NULL; return peer; } static void peer_make_dead(struct wg_peer *peer) { /* Remove from configuration-time lookup structures. */ list_del_init(&peer->peer_list); wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, &peer->device->device_update_lock); wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); /* Mark as dead, so that we don't allow jumping contexts after. */ WRITE_ONCE(peer->is_dead, true); /* The caller must now synchronize_net() for this to take effect. */ } static void peer_remove_after_dead(struct wg_peer *peer) { WARN_ON(!peer->is_dead); /* No more keypairs can be created for this peer, since is_dead protects * add_new_keypair, so we can now destroy existing ones. */ wg_noise_keypairs_clear(&peer->keypairs); /* Destroy all ongoing timers that were in-flight at the beginning of * this function. */ wg_timers_stop(peer); /* The transition between packet encryption/decryption queues isn't * guarded by is_dead, but each reference's life is strictly bounded by * two generations: once for parallel crypto and once for serial * ingestion, so we can simply flush twice, and be sure that we no * longer have references inside these queues. */ /* a) For encrypt/decrypt. */ flush_workqueue(peer->device->packet_crypt_wq); /* b.1) For send (but not receive, since that's napi). */ flush_workqueue(peer->device->packet_crypt_wq); /* b.2.1) For receive (but not send, since that's wq). */ napi_disable(&peer->napi); /* b.2.1) It's now safe to remove the napi struct, which must be done * here from process context. */ netif_napi_del(&peer->napi); /* Ensure any workstructs we own (like transmit_handshake_work or * clear_peer_work) no longer are in use. */ flush_workqueue(peer->device->handshake_send_wq); /* After the above flushes, a peer might still be active in a few * different contexts: 1) from xmit(), before hitting is_dead and * returning, 2) from wg_packet_consume_data(), before hitting is_dead * and returning, 3) from wg_receive_handshake_packet() after a point * where it has processed an incoming handshake packet, but where * all calls to pass it off to timers fails because of is_dead. We won't * have new references in (1) eventually, because we're removed from * allowedips; we won't have new references in (2) eventually, because * wg_index_hashtable_lookup will always return NULL, since we removed * all existing keypairs and no more can be created; we won't have new * references in (3) eventually, because we're removed from the pubkey * hash table, which allows for a maximum of one handshake response, * via the still-uncleared index hashtable entry, but not more than one, * and in wg_cookie_message_consume, the lookup eventually gets a peer * with a refcount of zero, so no new reference is taken. */ --peer->device->num_peers; wg_peer_put(peer); } /* We have a separate "remove" function make sure that all active places where * a peer is currently operating will eventually come to an end and not pass * their reference onto another context. */ void wg_peer_remove(struct wg_peer *peer) { if (unlikely(!peer)) return; lockdep_assert_held(&peer->device->device_update_lock); peer_make_dead(peer); synchronize_net(); peer_remove_after_dead(peer); } void wg_peer_remove_all(struct wg_device *wg) { struct wg_peer *peer, *temp; LIST_HEAD(dead_peers); lockdep_assert_held(&wg->device_update_lock); /* Avoid having to traverse individually for each one. */ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { peer_make_dead(peer); list_add_tail(&peer->peer_list, &dead_peers); } synchronize_net(); list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) peer_remove_after_dead(peer); } static void rcu_release(struct rcu_head *rcu) { struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); dst_cache_destroy(&peer->endpoint_cache); WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); /* The final zeroing takes care of clearing any remaining handshake key * material and other potentially sensitive information. */ memzero_explicit(peer, sizeof(*peer)); kmem_cache_free(peer_cache, peer); } static void kref_release(struct kref *refcount) { struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", peer->device->dev->name, peer->internal_id, &peer->endpoint.addr); /* Remove ourself from dynamic runtime lookup structures, now that the * last reference is gone. */ wg_index_hashtable_remove(peer->device->index_hashtable, &peer->handshake.entry); /* Remove any lingering packets that didn't have a chance to be * transmitted. */ wg_packet_purge_staged_packets(peer); /* Free the memory used. */ call_rcu(&peer->rcu, rcu_release); } void wg_peer_put(struct wg_peer *peer) { if (unlikely(!peer)) return; kref_put(&peer->refcount, kref_release); } int __init wg_peer_init(void) { peer_cache = KMEM_CACHE(wg_peer, 0); return peer_cache ? 0 : -ENOMEM; } void wg_peer_uninit(void) { kmem_cache_destroy(peer_cache); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2023 Isovalent */ #ifndef __BPF_MPROG_H #define __BPF_MPROG_H #include <linux/bpf.h> /* bpf_mprog framework: * * bpf_mprog is a generic layer for multi-program attachment. In-kernel users * of the bpf_mprog don't need to care about the dependency resolution * internals, they can just consume it with few API calls. Currently available * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of * a BPF program or BPF link relative to an existing BPF program or BPF link * inside the multi-program array as well as prepend and append behavior if * no relative object was specified, see corresponding selftests for concrete * examples (e.g. tc_links and tc_opts test cases of test_progs). * * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code: * * Attach case: * * struct bpf_mprog_entry *entry, *entry_new; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_attach(entry, &entry_new, [...]); * if (!ret) { * if (entry != entry_new) { * // swap @entry to @entry_new at attach location * // ensure there are no inflight users of @entry: * synchronize_rcu(); * } * bpf_mprog_commit(entry); * } else { * // error path, bail out, propagate @ret * } * // bpf_mprog user-side unlock * * Detach case: * * struct bpf_mprog_entry *entry, *entry_new; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_detach(entry, &entry_new, [...]); * if (!ret) { * // all (*) marked is optional and depends on the use-case * // whether bpf_mprog_bundle should be freed or not * if (!bpf_mprog_total(entry_new)) (*) * entry_new = NULL (*) * // swap @entry to @entry_new at attach location * // ensure there are no inflight users of @entry: * synchronize_rcu(); * bpf_mprog_commit(entry); * if (!entry_new) (*) * // free bpf_mprog_bundle (*) * } else { * // error path, bail out, propagate @ret * } * // bpf_mprog user-side unlock * * Query case: * * struct bpf_mprog_entry *entry; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_query(attr, uattr, entry); * // bpf_mprog user-side unlock * * Data/fast path: * * struct bpf_mprog_entry *entry; * struct bpf_mprog_fp *fp; * struct bpf_prog *prog; * int ret = [...]; * * rcu_read_lock(); * // fetch active @entry from attach location * [...] * bpf_mprog_foreach_prog(entry, fp, prog) { * ret = bpf_prog_run(prog, [...]); * // process @ret from program * } * [...] * rcu_read_unlock(); * * bpf_mprog locking considerations: * * bpf_mprog_{attach,detach,query}() must be protected by an external lock * (like RTNL in case of tcx). * * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of * the bpf_mprog_bundle. * * Fast path accesses the active bpf_mprog_entry within RCU critical section * (in case of tcx it runs in NAPI which provides RCU protection there, * other users might need explicit rcu_read_lock()). The bpf_mprog_commit() * assumes that for the old bpf_mprog_entry there are no inflight users * anymore. * * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for * the replacement case where we don't swap the bpf_mprog_entry. */ #define bpf_mprog_foreach_tuple(entry, fp, cp, t) \ for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\ ({ \ t.prog = READ_ONCE(fp->prog); \ t.link = cp->link; \ t.prog; \ }); \ fp++, cp++) #define bpf_mprog_foreach_prog(entry, fp, p) \ for (fp = &entry->fp_items[0]; \ (p = READ_ONCE(fp->prog)); \ fp++) #define BPF_MPROG_MAX 64 struct bpf_mprog_fp { struct bpf_prog *prog; }; struct bpf_mprog_cp { struct bpf_link *link; }; struct bpf_mprog_entry { struct bpf_mprog_fp fp_items[BPF_MPROG_MAX]; struct bpf_mprog_bundle *parent; }; struct bpf_mprog_bundle { struct bpf_mprog_entry a; struct bpf_mprog_entry b; struct bpf_mprog_cp cp_items[BPF_MPROG_MAX]; struct bpf_prog *ref; atomic64_t revision; u32 count; }; struct bpf_tuple { struct bpf_prog *prog; struct bpf_link *link; }; static inline struct bpf_mprog_entry * bpf_mprog_peer(const struct bpf_mprog_entry *entry) { if (entry == &entry->parent->a) return &entry->parent->b; else return &entry->parent->a; } static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle) { BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64)); BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) != ARRAY_SIZE(bundle->cp_items)); memset(bundle, 0, sizeof(*bundle)); atomic64_set(&bundle->revision, 1); bundle->a.parent = bundle; bundle->b.parent = bundle; } static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry) { entry->parent->count++; } static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry) { entry->parent->count--; } static inline int bpf_mprog_max(void) { return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1; } static inline int bpf_mprog_total(struct bpf_mprog_entry *entry) { int total = entry->parent->count; WARN_ON_ONCE(total > bpf_mprog_max()); return total; } static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry, struct bpf_prog *prog) { const struct bpf_mprog_fp *fp; const struct bpf_prog *tmp; bpf_mprog_foreach_prog(entry, fp, tmp) { if (tmp == prog) return true; } return false; } static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry, struct bpf_tuple *tuple) { WARN_ON_ONCE(entry->parent->ref); if (!tuple->link) entry->parent->ref = tuple->prog; } static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry) { /* In the non-link case prog deletions can only drop the reference * to the prog after the bpf_mprog_entry got swapped and the * bpf_mprog ensured that there are no inflight users anymore. * * Paired with bpf_mprog_mark_for_release(). */ if (entry->parent->ref) { bpf_prog_put(entry->parent->ref); entry->parent->ref = NULL; } } static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry) { atomic64_inc(&entry->parent->revision); } static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry) { bpf_mprog_complete_release(entry); bpf_mprog_revision_new(entry); } static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry) { return atomic64_read(&entry->parent->revision); } static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst, struct bpf_mprog_entry *src) { memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items)); } static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst) { memset(dst->fp_items, 0, sizeof(dst->fp_items)); } static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new) { struct bpf_mprog_entry *peer; peer = bpf_mprog_peer(entry); bpf_mprog_entry_clear(peer); peer->parent->count = 0; *entry_new = peer; } static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx) { int total = bpf_mprog_total(entry); memmove(entry->fp_items + idx + 1, entry->fp_items + idx, (total - idx) * sizeof(struct bpf_mprog_fp)); memmove(entry->parent->cp_items + idx + 1, entry->parent->cp_items + idx, (total - idx) * sizeof(struct bpf_mprog_cp)); } static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx) { /* Total array size is needed in this case to enure the NULL * entry is copied at the end. */ int total = ARRAY_SIZE(entry->fp_items); memmove(entry->fp_items + idx, entry->fp_items + idx + 1, (total - idx - 1) * sizeof(struct bpf_mprog_fp)); memmove(entry->parent->cp_items + idx, entry->parent->cp_items + idx + 1, (total - idx - 1) * sizeof(struct bpf_mprog_cp)); } static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx, struct bpf_mprog_fp **fp, struct bpf_mprog_cp **cp) { *fp = &entry->fp_items[idx]; *cp = &entry->parent->cp_items[idx]; } static inline void bpf_mprog_write(struct bpf_mprog_fp *fp, struct bpf_mprog_cp *cp, struct bpf_tuple *tuple) { WRITE_ONCE(fp->prog, tuple->prog); cp->link = tuple->link; } int bpf_mprog_attach(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new, struct bpf_prog *prog_new, struct bpf_link *link, struct bpf_prog *prog_old, u32 flags, u32 id_or_fd, u64 revision); int bpf_mprog_detach(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new, struct bpf_prog *prog, struct bpf_link *link, u32 flags, u32 id_or_fd, u64 revision); int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr, struct bpf_mprog_entry *entry); static inline bool bpf_mprog_supported(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SCHED_CLS: return true; default: return false; } } #endif /* __BPF_MPROG_H */
2 2 6 4 1 1 4 4 1 3 1 1 2 2 2 2 2 2 4 2 1 1 2 4 3 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/adfs/super.c * * Copyright (C) 1997-1999 Russell King */ #include <linux/module.h> #include <linux/init.h> #include <linux/fs_parser.h> #include <linux/fs_context.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/user_namespace.h> #include <linux/blkdev.h> #include "adfs.h" #include "dir_f.h" #include "dir_fplus.h" #define ADFS_SB_FLAGS SB_NOATIME #define ADFS_DEFAULT_OWNER_MASK S_IRWXU #define ADFS_DEFAULT_OTHER_MASK (S_IRWXG | S_IRWXO) void __adfs_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "ADFS-fs error (device %s)%s%s: %pV\n", sb->s_id, function ? ": " : "", function ? function : "", &vaf); va_end(args); } void adfs_msg(struct super_block *sb, const char *pfx, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sADFS-fs (%s): %pV\n", pfx, sb->s_id, &vaf); va_end(args); } static int adfs_checkdiscrecord(struct adfs_discrecord *dr) { unsigned int max_idlen; int i; /* sector size must be 256, 512 or 1024 bytes */ if (dr->log2secsize != 8 && dr->log2secsize != 9 && dr->log2secsize != 10) return 1; /* idlen must be at least log2secsize + 3 */ if (dr->idlen < dr->log2secsize + 3) return 1; /* we cannot have such a large disc that we * are unable to represent sector offsets in * 32 bits. This works out at 2.0 TB. */ if (le32_to_cpu(dr->disc_size_high) >> dr->log2secsize) return 1; /* * Maximum idlen is limited to 16 bits for new directories by * the three-byte storage of an indirect disc address. For * big directories, idlen must be no greater than 19 v2 [1.0] */ max_idlen = dr->format_version ? 19 : 16; if (dr->idlen > max_idlen) return 1; /* reserved bytes should be zero */ for (i = 0; i < sizeof(dr->unused52); i++) if (dr->unused52[i] != 0) return 1; return 0; } static void adfs_put_super(struct super_block *sb) { struct adfs_sb_info *asb = ADFS_SB(sb); adfs_free_map(sb); kfree_rcu(asb, rcu); } static int adfs_show_options(struct seq_file *seq, struct dentry *root) { struct adfs_sb_info *asb = ADFS_SB(root->d_sb); if (!uid_eq(asb->s_uid, GLOBAL_ROOT_UID)) seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, asb->s_uid)); if (!gid_eq(asb->s_gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, asb->s_gid)); if (asb->s_owner_mask != ADFS_DEFAULT_OWNER_MASK) seq_printf(seq, ",ownmask=%o", asb->s_owner_mask); if (asb->s_other_mask != ADFS_DEFAULT_OTHER_MASK) seq_printf(seq, ",othmask=%o", asb->s_other_mask); if (asb->s_ftsuffix != 0) seq_printf(seq, ",ftsuffix=%u", asb->s_ftsuffix); return 0; } enum {Opt_uid, Opt_gid, Opt_ownmask, Opt_othmask, Opt_ftsuffix}; static const struct fs_parameter_spec adfs_param_spec[] = { fsparam_uid ("uid", Opt_uid), fsparam_gid ("gid", Opt_gid), fsparam_u32oct ("ownmask", Opt_ownmask), fsparam_u32oct ("othmask", Opt_othmask), fsparam_u32 ("ftsuffix", Opt_ftsuffix), {} }; static int adfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct adfs_sb_info *asb = fc->s_fs_info; struct fs_parse_result result; int opt; opt = fs_parse(fc, adfs_param_spec, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_uid: asb->s_uid = result.uid; break; case Opt_gid: asb->s_gid = result.gid; break; case Opt_ownmask: asb->s_owner_mask = result.uint_32; break; case Opt_othmask: asb->s_other_mask = result.uint_32; break; case Opt_ftsuffix: asb->s_ftsuffix = result.uint_32; break; default: return -EINVAL; } return 0; } static int adfs_reconfigure(struct fs_context *fc) { struct adfs_sb_info *new_asb = fc->s_fs_info; struct adfs_sb_info *asb = ADFS_SB(fc->root->d_sb); sync_filesystem(fc->root->d_sb); fc->sb_flags |= ADFS_SB_FLAGS; /* Structure copy newly parsed options */ *asb = *new_asb; return 0; } static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct adfs_sb_info *sbi = ADFS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); adfs_map_statfs(sb, buf); buf->f_type = ADFS_SUPER_MAGIC; buf->f_namelen = sbi->s_namelen; buf->f_bsize = sb->s_blocksize; buf->f_ffree = (long)(buf->f_bfree * buf->f_files) / (long)buf->f_blocks; buf->f_fsid = u64_to_fsid(id); return 0; } static struct kmem_cache *adfs_inode_cachep; static struct inode *adfs_alloc_inode(struct super_block *sb) { struct adfs_inode_info *ei; ei = alloc_inode_sb(sb, adfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void adfs_free_inode(struct inode *inode) { kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); } static int adfs_drop_inode(struct inode *inode) { /* always drop inodes if we are read-only */ return !IS_ENABLED(CONFIG_ADFS_FS_RW) || IS_RDONLY(inode); } static void init_once(void *foo) { struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int __init init_inodecache(void) { adfs_inode_cachep = kmem_cache_create("adfs_inode_cache", sizeof(struct adfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), init_once); if (adfs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(adfs_inode_cachep); } static const struct super_operations adfs_sops = { .alloc_inode = adfs_alloc_inode, .free_inode = adfs_free_inode, .drop_inode = adfs_drop_inode, .write_inode = adfs_write_inode, .put_super = adfs_put_super, .statfs = adfs_statfs, .show_options = adfs_show_options, }; static int adfs_probe(struct super_block *sb, unsigned int offset, int silent, int (*validate)(struct super_block *sb, struct buffer_head *bh, struct adfs_discrecord **bhp)) { struct adfs_sb_info *asb = ADFS_SB(sb); struct adfs_discrecord *dr; struct buffer_head *bh; unsigned int blocksize = BLOCK_SIZE; int ret, try; for (try = 0; try < 2; try++) { /* try to set the requested block size */ if (sb->s_blocksize != blocksize && !sb_set_blocksize(sb, blocksize)) { if (!silent) adfs_msg(sb, KERN_ERR, "error: unsupported blocksize"); return -EINVAL; } /* read the buffer */ bh = sb_bread(sb, offset >> sb->s_blocksize_bits); if (!bh) { adfs_msg(sb, KERN_ERR, "error: unable to read block %u, try %d", offset >> sb->s_blocksize_bits, try); return -EIO; } /* validate it */ ret = validate(sb, bh, &dr); if (ret) { brelse(bh); return ret; } /* does the block size match the filesystem block size? */ blocksize = 1 << dr->log2secsize; if (sb->s_blocksize == blocksize) { asb->s_map = adfs_read_map(sb, dr); brelse(bh); return PTR_ERR_OR_ZERO(asb->s_map); } brelse(bh); } return -EIO; } static int adfs_validate_bblk(struct super_block *sb, struct buffer_head *bh, struct adfs_discrecord **drp) { struct adfs_discrecord *dr; unsigned char *b_data; b_data = bh->b_data + (ADFS_DISCRECORD % sb->s_blocksize); if (adfs_checkbblk(b_data)) return -EILSEQ; /* Do some sanity checks on the ADFS disc record */ dr = (struct adfs_discrecord *)(b_data + ADFS_DR_OFFSET); if (adfs_checkdiscrecord(dr)) return -EILSEQ; *drp = dr; return 0; } static int adfs_validate_dr0(struct super_block *sb, struct buffer_head *bh, struct adfs_discrecord **drp) { struct adfs_discrecord *dr; /* Do some sanity checks on the ADFS disc record */ dr = (struct adfs_discrecord *)(bh->b_data + 4); if (adfs_checkdiscrecord(dr) || dr->nzones_high || dr->nzones != 1) return -EILSEQ; *drp = dr; return 0; } static int adfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct adfs_discrecord *dr; struct object_info root_obj; struct adfs_sb_info *asb = sb->s_fs_info; struct inode *root; int ret = -EINVAL; int silent = fc->sb_flags & SB_SILENT; sb->s_flags |= ADFS_SB_FLAGS; sb->s_fs_info = asb; sb->s_magic = ADFS_SUPER_MAGIC; sb->s_time_gran = 10000000; /* Try to probe the filesystem boot block */ ret = adfs_probe(sb, ADFS_DISCRECORD, 1, adfs_validate_bblk); if (ret == -EILSEQ) ret = adfs_probe(sb, 0, silent, adfs_validate_dr0); if (ret == -EILSEQ) { if (!silent) adfs_msg(sb, KERN_ERR, "error: can't find an ADFS filesystem on dev %s.", sb->s_id); ret = -EINVAL; } if (ret) goto error; /* set up enough so that we can read an inode */ sb->s_op = &adfs_sops; dr = adfs_map_discrecord(asb->s_map); root_obj.parent_id = root_obj.indaddr = le32_to_cpu(dr->root); root_obj.name_len = 0; /* Set root object date as 01 Jan 1987 00:00:00 */ root_obj.loadaddr = 0xfff0003f; root_obj.execaddr = 0xec22c000; root_obj.size = ADFS_NEWDIR_SIZE; root_obj.attr = ADFS_NDA_DIRECTORY | ADFS_NDA_OWNER_READ | ADFS_NDA_OWNER_WRITE | ADFS_NDA_PUBLIC_READ; /* * If this is a F+ disk with variable length directories, * get the root_size from the disc record. */ if (dr->format_version) { root_obj.size = le32_to_cpu(dr->root_size); asb->s_dir = &adfs_fplus_dir_ops; asb->s_namelen = ADFS_FPLUS_NAME_LEN; } else { asb->s_dir = &adfs_f_dir_ops; asb->s_namelen = ADFS_F_NAME_LEN; } /* * ,xyz hex filetype suffix may be added by driver * to files that have valid RISC OS filetype */ if (asb->s_ftsuffix) asb->s_namelen += 4; set_default_d_op(sb, &adfs_dentry_operations); root = adfs_iget(sb, &root_obj); sb->s_root = d_make_root(root); if (!sb->s_root) { adfs_free_map(sb); adfs_error(sb, "get root inode failed\n"); ret = -EIO; goto error; } return 0; error: sb->s_fs_info = NULL; kfree(asb); return ret; } static int adfs_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, adfs_fill_super); } static void adfs_free_fc(struct fs_context *fc) { struct adfs_context *asb = fc->s_fs_info; kfree(asb); } static const struct fs_context_operations adfs_context_ops = { .parse_param = adfs_parse_param, .get_tree = adfs_get_tree, .reconfigure = adfs_reconfigure, .free = adfs_free_fc, }; static int adfs_init_fs_context(struct fs_context *fc) { struct adfs_sb_info *asb; asb = kzalloc(sizeof(struct adfs_sb_info), GFP_KERNEL); if (!asb) return -ENOMEM; if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { struct super_block *sb = fc->root->d_sb; struct adfs_sb_info *old_asb = ADFS_SB(sb); /* structure copy existing options before parsing */ *asb = *old_asb; } else { /* set default options */ asb->s_uid = GLOBAL_ROOT_UID; asb->s_gid = GLOBAL_ROOT_GID; asb->s_owner_mask = ADFS_DEFAULT_OWNER_MASK; asb->s_other_mask = ADFS_DEFAULT_OTHER_MASK; asb->s_ftsuffix = 0; } fc->ops = &adfs_context_ops; fc->s_fs_info = asb; return 0; } static struct file_system_type adfs_fs_type = { .owner = THIS_MODULE, .name = "adfs", .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, .init_fs_context = adfs_init_fs_context, .parameters = adfs_param_spec, }; MODULE_ALIAS_FS("adfs"); static int __init init_adfs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&adfs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_adfs_fs(void) { unregister_filesystem(&adfs_fs_type); destroy_inodecache(); } module_init(init_adfs_fs) module_exit(exit_adfs_fs) MODULE_DESCRIPTION("Acorn Disc Filing System"); MODULE_LICENSE("GPL");
1 9 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 /* SPDX-License-Identifier: GPL-2.0 */ /* XDP user-space ring structure * Copyright(c) 2018 Intel Corporation. */ #ifndef _LINUX_XSK_QUEUE_H #define _LINUX_XSK_QUEUE_H #include <linux/types.h> #include <linux/if_xdp.h> #include <net/xdp_sock.h> #include <net/xsk_buff_pool.h> #include "xsk.h" struct xdp_ring { u32 producer ____cacheline_aligned_in_smp; /* Hinder the adjacent cache prefetcher to prefetch the consumer * pointer if the producer pointer is touched and vice versa. */ u32 pad1 ____cacheline_aligned_in_smp; u32 consumer ____cacheline_aligned_in_smp; u32 pad2 ____cacheline_aligned_in_smp; u32 flags; u32 pad3 ____cacheline_aligned_in_smp; }; /* Used for the RX and TX queues for packets */ struct xdp_rxtx_ring { struct xdp_ring ptrs; struct xdp_desc desc[] ____cacheline_aligned_in_smp; }; /* Used for the fill and completion queues for buffers */ struct xdp_umem_ring { struct xdp_ring ptrs; u64 desc[] ____cacheline_aligned_in_smp; }; struct xsk_queue { u32 ring_mask; u32 nentries; u32 cached_prod; u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; u64 queue_empty_descs; size_t ring_vmalloc_size; }; struct parsed_desc { u32 mb; u32 valid; }; /* The structure of the shared state of the rings are a simple * circular buffer, as outlined in * Documentation/core-api/circular-buffers.rst. For the Rx and * completion ring, the kernel is the producer and user space is the * consumer. For the Tx and fill rings, the kernel is the consumer and * user space is the producer. * * producer consumer * * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C) * STORE $data LOAD $data * STORE.rel ->producer (B) STORE.rel ->consumer (D) * } * * (A) pairs with (D), and (B) pairs with (C). * * Starting with (B), it protects the data from being written after * the producer pointer. If this barrier was missing, the consumer * could observe the producer pointer being set and thus load the data * before the producer has written the new data. The consumer would in * this case load the old data. * * (C) protects the consumer from speculatively loading the data before * the producer pointer actually has been read. If we do not have this * barrier, some architectures could load old data as speculative loads * are not discarded as the CPU does not know there is a dependency * between ->producer and data. * * (A) is a control dependency that separates the load of ->consumer * from the stores of $data. In case ->consumer indicates there is no * room in the buffer to store $data we do not. The dependency will * order both of the stores after the loads. So no barrier is needed. * * (D) protects the load of the data to be observed to happen after the * store of the consumer pointer. If we did not have this memory * barrier, the producer could observe the consumer pointer being set * and overwrite the data with a new value before the consumer got the * chance to read the old value. The consumer would thus miss reading * the old entry and very likely read the new entry twice, once right * now and again after circling through the ring. */ /* The operations on the rings are the following: * * producer consumer * * RESERVE entries PEEK in the ring for entries * WRITE data into the ring READ data from the ring * SUBMIT entries RELEASE entries * * The producer reserves one or more entries in the ring. It can then * fill in these entries and finally submit them so that they can be * seen and read by the consumer. * * The consumer peeks into the ring to see if the producer has written * any new entries. If so, the consumer can then read these entries * and when it is done reading them release them back to the producer * so that the producer can use these slots to fill in new entries. * * The function names below reflect these operations. */ /* Functions that read and validate content from consumer rings. */ static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; u32 idx = cached_cons & q->ring_mask; *addr = ring->desc[idx]; } static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_cons != q->cached_prod) { __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); return true; } return false; } static inline bool xp_unused_options_set(u32 options) { return options & ~(XDP_PKT_CONTD | XDP_TX_METADATA); } static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { u64 addr = desc->addr - pool->tx_metadata_len; u64 len = desc->len + pool->tx_metadata_len; u64 offset = addr & (pool->chunk_size - 1); if (!desc->len) return false; if (offset + len > pool->chunk_size) return false; if (addr >= pool->addrs_cnt) return false; if (xp_unused_options_set(desc->options)) return false; return true; } static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { u64 addr = xp_unaligned_add_offset_to_addr(desc->addr) - pool->tx_metadata_len; u64 len = desc->len + pool->tx_metadata_len; if (!desc->len) return false; if (len > pool->chunk_size) return false; if (addr >= pool->addrs_cnt || addr + len > pool->addrs_cnt || xp_desc_crosses_non_contig_pg(pool, addr, len)) return false; if (xp_unused_options_set(desc->options)) return false; return true; } static inline bool xp_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : xp_aligned_validate_desc(pool, desc); } static inline bool xskq_has_descs(struct xsk_queue *q) { return q->cached_cons != q->cached_prod; } static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, struct xsk_buff_pool *pool) { if (!xp_validate_desc(pool, d)) { q->invalid_descs++; return false; } return true; } static inline bool xskq_cons_read_desc(struct xsk_queue *q, struct xdp_desc *desc, struct xsk_buff_pool *pool) { if (q->cached_cons != q->cached_prod) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx = q->cached_cons & q->ring_mask; *desc = ring->desc[idx]; return xskq_cons_is_valid_desc(q, desc, pool); } q->queue_empty_descs++; return false; } static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) { q->cached_cons += cnt; } static inline void parse_desc(struct xsk_queue *q, struct xsk_buff_pool *pool, struct xdp_desc *desc, struct parsed_desc *parsed) { parsed->valid = xskq_cons_is_valid_desc(q, desc, pool); parsed->mb = xp_mb_desc(desc); } static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, u32 max) { u32 cached_cons = q->cached_cons, nb_entries = 0; struct xdp_desc *descs = pool->tx_descs; u32 total_descs = 0, nr_frags = 0; /* track first entry, if stumble upon *any* invalid descriptor, rewind * current packet that consists of frags and stop the processing */ while (cached_cons != q->cached_prod && nb_entries < max) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx = cached_cons & q->ring_mask; struct parsed_desc parsed; descs[nb_entries] = ring->desc[idx]; cached_cons++; parse_desc(q, pool, &descs[nb_entries], &parsed); if (unlikely(!parsed.valid)) break; if (likely(!parsed.mb)) { total_descs += (nr_frags + 1); nr_frags = 0; } else { nr_frags++; if (nr_frags == pool->xdp_zc_max_segs) { nr_frags = 0; break; } } nb_entries++; } cached_cons -= nr_frags; /* Release valid plus any invalid entries */ xskq_cons_release_n(q, cached_cons - q->cached_cons); return total_descs; } /* Functions for consumers */ static inline void __xskq_cons_release(struct xsk_queue *q) { smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ } static inline void __xskq_cons_peek(struct xsk_queue *q) { /* Refresh the local pointer */ q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ } static inline void xskq_cons_get_entries(struct xsk_queue *q) { __xskq_cons_release(q); __xskq_cons_peek(q); } static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) { u32 entries = q->cached_prod - q->cached_cons; if (entries >= max) return max; __xskq_cons_peek(q); entries = q->cached_prod - q->cached_cons; return entries >= max ? max : entries; } static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_prod == q->cached_cons) xskq_cons_get_entries(q); return xskq_cons_read_addr_unchecked(q, addr); } static inline bool xskq_cons_peek_desc(struct xsk_queue *q, struct xdp_desc *desc, struct xsk_buff_pool *pool) { if (q->cached_prod == q->cached_cons) xskq_cons_get_entries(q); return xskq_cons_read_desc(q, desc, pool); } /* To improve performance in the xskq_cons_release functions, only update local state here. * Reflect this to global state when we get new entries from the ring in * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. */ static inline void xskq_cons_release(struct xsk_queue *q) { q->cached_cons++; } static inline void xskq_cons_cancel_n(struct xsk_queue *q, u32 cnt) { q->cached_cons -= cnt; } static inline u32 xskq_cons_present_entries(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); } /* Functions for producers */ static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) { u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); if (free_entries >= max) return max; /* Refresh the local tail pointer */ q->cached_cons = READ_ONCE(q->ring->consumer); free_entries = q->nentries - (q->cached_prod - q->cached_cons); return free_entries >= max ? max : free_entries; } static inline bool xskq_prod_is_full(struct xsk_queue *q) { return xskq_prod_nb_free(q, 1) ? false : true; } static inline void xskq_prod_cancel_n(struct xsk_queue *q, u32 cnt) { q->cached_prod -= cnt; } static inline int xskq_prod_reserve(struct xsk_queue *q) { if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ q->cached_prod++; return 0; } static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ ring->desc[q->cached_prod++ & q->ring_mask] = addr; return 0; } static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, u32 nb_entries) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; u32 i, cached_prod; /* A, matches D */ cached_prod = q->cached_prod; for (i = 0; i < nb_entries; i++) ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; q->cached_prod = cached_prod; } static inline int xskq_prod_reserve_desc(struct xsk_queue *q, u64 addr, u32 len, u32 flags) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx; if (xskq_prod_is_full(q)) return -ENOBUFS; /* A, matches D */ idx = q->cached_prod++ & q->ring_mask; ring->desc[idx].addr = addr; ring->desc[idx].len = len; ring->desc[idx].options = flags; return 0; } static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) { smp_store_release(&q->ring->producer, idx); /* B, matches C */ } static inline void xskq_prod_submit(struct xsk_queue *q) { __xskq_prod_submit(q, q->cached_prod); } static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) { __xskq_prod_submit(q, q->ring->producer + nb_entries); } static inline bool xskq_prod_is_empty(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); } /* For both producers and consumers */ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) { return q ? q->invalid_descs : 0; } static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) { return q ? q->queue_empty_descs : 0; } struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); #endif /* _LINUX_XSK_QUEUE_H */
3 3 3 3 3 3 3 3 3 3 3 3 5 5 5 1 1 1 1 1 2 2 4 4 4 4 1 2 1 1 2 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 8 8 8 3 2 5 3 1 4 2 2 2 1 1 1 1 1 2 2 2 1 1 3 3 3 3 3 2 2 2 1 1 1 1 1 1 1 1 1 7 4 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include <linux/device.h> #include <net/genetlink.h> #include <net/sock.h> #include "devl_internal.h" struct devlink_info_req { struct sk_buff *msg; void (*version_cb)(const char *version_name, enum devlink_info_version_type version_type, void *version_cb_priv); void *version_cb_priv; }; struct devlink_reload_combination { enum devlink_reload_action action; enum devlink_reload_limit limit; }; static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = { { /* can't reinitialize driver with no down time */ .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT, .limit = DEVLINK_RELOAD_LIMIT_NO_RESET, }, }; static bool devlink_reload_combination_is_invalid(enum devlink_reload_action action, enum devlink_reload_limit limit) { int i; for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) if (devlink_reload_invalid_combinations[i].action == action && devlink_reload_invalid_combinations[i].limit == limit) return true; return false; } static bool devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action) { return test_bit(action, &devlink->ops->reload_actions); } static bool devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit) { return test_bit(limit, &devlink->ops->reload_limits); } static int devlink_reload_stat_put(struct sk_buff *msg, enum devlink_reload_limit limit, u32 value) { struct nlattr *reload_stats_entry; reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY); if (!reload_stats_entry) return -EMSGSIZE; if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) || nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value)) goto nla_put_failure; nla_nest_end(msg, reload_stats_entry); return 0; nla_put_failure: nla_nest_cancel(msg, reload_stats_entry); return -EMSGSIZE; } static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote) { struct nlattr *reload_stats_attr, *act_info, *act_stats; int i, j, stat_idx; u32 value; if (!is_remote) reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS); else reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS); if (!reload_stats_attr) return -EMSGSIZE; for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) { if ((!is_remote && !devlink_reload_action_is_supported(devlink, i)) || i == DEVLINK_RELOAD_ACTION_UNSPEC) continue; act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO); if (!act_info) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i)) goto action_info_nest_cancel; act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS); if (!act_stats) goto action_info_nest_cancel; for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) { /* Remote stats are shown even if not locally supported. * Stats of actions with unspecified limit are shown * though drivers don't need to register unspecified * limit. */ if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC && !devlink_reload_limit_is_supported(devlink, j)) || devlink_reload_combination_is_invalid(i, j)) continue; stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i; if (!is_remote) value = devlink->stats.reload_stats[stat_idx]; else value = devlink->stats.remote_reload_stats[stat_idx]; if (devlink_reload_stat_put(msg, j, value)) goto action_stats_nest_cancel; } nla_nest_end(msg, act_stats); nla_nest_end(msg, act_info); } nla_nest_end(msg, reload_stats_attr); return 0; action_stats_nest_cancel: nla_nest_cancel(msg, act_stats); action_info_nest_cancel: nla_nest_cancel(msg, act_info); nla_put_failure: nla_nest_cancel(msg, reload_stats_attr); return -EMSGSIZE; } static int devlink_nl_nested_fill(struct sk_buff *msg, struct devlink *devlink) { unsigned long rel_index; void *unused; int err; xa_for_each(&devlink->nested_rels, rel_index, unused) { err = devlink_rel_devlink_handle_put(msg, devlink, rel_index, DEVLINK_ATTR_NESTED_DEVLINK, NULL); if (err) return err; } return 0; } static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags) { struct nlattr *dev_stats; void *hdr; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed)) goto nla_put_failure; dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS); if (!dev_stats) goto nla_put_failure; if (devlink_reload_stats_put(msg, devlink, false)) goto dev_stats_nest_cancel; if (devlink_reload_stats_put(msg, devlink, true)) goto dev_stats_nest_cancel; nla_nest_end(msg, dev_stats); if (devlink_nl_nested_fill(msg, devlink)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; dev_stats_nest_cancel: nla_nest_cancel(msg, dev_stats); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void devlink_notify(struct devlink *devlink, enum devlink_command cmd) { struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_NEW && cmd != DEVLINK_CMD_DEL); WARN_ON(!devl_is_registered(devlink)); if (!devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_fill(msg, devlink, cmd, 0, 0, 0); if (err) { nlmsg_free(msg); return; } devlink_nl_notify_send(devlink, msg); } int devlink_nl_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { return devlink_nl_fill(msg, devlink, DEVLINK_CMD_NEW, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); } int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { return devlink_nl_dumpit(msg, cb, devlink_nl_get_dump_one); } static void devlink_rel_notify_cb(struct devlink *devlink, u32 obj_index) { devlink_notify(devlink, DEVLINK_CMD_NEW); } static void devlink_rel_cleanup_cb(struct devlink *devlink, u32 obj_index, u32 rel_index) { xa_erase(&devlink->nested_rels, rel_index); } int devl_nested_devlink_set(struct devlink *devlink, struct devlink *nested_devlink) { u32 rel_index; int err; err = devlink_rel_nested_in_add(&rel_index, devlink->index, 0, devlink_rel_notify_cb, devlink_rel_cleanup_cb, nested_devlink); if (err) return err; return xa_insert(&devlink->nested_rels, rel_index, xa_mk_value(0), GFP_KERNEL); } EXPORT_SYMBOL_GPL(devl_nested_devlink_set); void devlink_notify_register(struct devlink *devlink) { devlink_notify(devlink, DEVLINK_CMD_NEW); devlink_linecards_notify_register(devlink); devlink_ports_notify_register(devlink); devlink_trap_policers_notify_register(devlink); devlink_trap_groups_notify_register(devlink); devlink_traps_notify_register(devlink); devlink_rates_notify_register(devlink); devlink_regions_notify_register(devlink); devlink_params_notify_register(devlink); } void devlink_notify_unregister(struct devlink *devlink) { devlink_params_notify_unregister(devlink); devlink_regions_notify_unregister(devlink); devlink_rates_notify_unregister(devlink); devlink_traps_notify_unregister(devlink); devlink_trap_groups_notify_unregister(devlink); devlink_trap_policers_notify_unregister(devlink); devlink_ports_notify_unregister(devlink); devlink_linecards_notify_unregister(devlink); devlink_notify(devlink, DEVLINK_CMD_DEL); } static void devlink_reload_failed_set(struct devlink *devlink, bool reload_failed) { if (devlink->reload_failed == reload_failed) return; devlink->reload_failed = reload_failed; devlink_notify(devlink, DEVLINK_CMD_NEW); } bool devlink_is_reload_failed(const struct devlink *devlink) { return devlink->reload_failed; } EXPORT_SYMBOL_GPL(devlink_is_reload_failed); static void __devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats, enum devlink_reload_limit limit, u32 actions_performed) { unsigned long actions = actions_performed; int stat_idx; int action; for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) { stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action; reload_stats[stat_idx]++; } devlink_notify(devlink, DEVLINK_CMD_NEW); } static void devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit, u32 actions_performed) { __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit, actions_performed); } /** * devlink_remote_reload_actions_performed - Update devlink on reload actions * performed which are not a direct result of devlink reload call. * * This should be called by a driver after performing reload actions in case it was not * a result of devlink reload call. For example fw_activate was performed as a result * of devlink reload triggered fw_activate on another host. * The motivation for this function is to keep data on reload actions performed on this * function whether it was done due to direct devlink reload call or not. * * @devlink: devlink * @limit: reload limit * @actions_performed: bitmask of actions performed */ void devlink_remote_reload_actions_performed(struct devlink *devlink, enum devlink_reload_limit limit, u32 actions_performed) { if (WARN_ON(!actions_performed || actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) || actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) || limit > DEVLINK_RELOAD_LIMIT_MAX)) return; __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit, actions_performed); } EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed); static struct net *devlink_netns_get(struct sk_buff *skb, struct genl_info *info) { struct nlattr *netns_pid_attr = info->attrs[DEVLINK_ATTR_NETNS_PID]; struct nlattr *netns_fd_attr = info->attrs[DEVLINK_ATTR_NETNS_FD]; struct nlattr *netns_id_attr = info->attrs[DEVLINK_ATTR_NETNS_ID]; struct net *net; if (!!netns_pid_attr + !!netns_fd_attr + !!netns_id_attr > 1) { NL_SET_ERR_MSG(info->extack, "multiple netns identifying attributes specified"); return ERR_PTR(-EINVAL); } if (netns_pid_attr) { net = get_net_ns_by_pid(nla_get_u32(netns_pid_attr)); } else if (netns_fd_attr) { net = get_net_ns_by_fd(nla_get_u32(netns_fd_attr)); } else if (netns_id_attr) { net = get_net_ns_by_id(sock_net(skb->sk), nla_get_u32(netns_id_attr)); if (!net) net = ERR_PTR(-EINVAL); } else { WARN_ON(1); net = ERR_PTR(-EINVAL); } if (IS_ERR(net)) { NL_SET_ERR_MSG(info->extack, "Unknown network namespace"); return ERR_PTR(-EINVAL); } if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { put_net(net); return ERR_PTR(-EPERM); } return net; } static void devlink_reload_netns_change(struct devlink *devlink, struct net *curr_net, struct net *dest_net) { /* Userspace needs to be notified about devlink objects * removed from original and entering new network namespace. * The rest of the devlink objects are re-created during * reload process so the notifications are generated separatelly. */ devlink_notify_unregister(devlink); write_pnet(&devlink->_net, dest_net); devlink_notify_register(devlink); devlink_rel_nested_in_notify(devlink); } static void devlink_reload_reinit_sanity_check(struct devlink *devlink) { WARN_ON(!list_empty(&devlink->trap_policer_list)); WARN_ON(!list_empty(&devlink->trap_group_list)); WARN_ON(!list_empty(&devlink->trap_list)); WARN_ON(!list_empty(&devlink->dpipe_table_list)); WARN_ON(!list_empty(&devlink->sb_list)); WARN_ON(!list_empty(&devlink->rate_list)); WARN_ON(!list_empty(&devlink->linecard_list)); WARN_ON(!xa_empty(&devlink->ports)); } int devlink_reload(struct devlink *devlink, struct net *dest_net, enum devlink_reload_action action, enum devlink_reload_limit limit, u32 *actions_performed, struct netlink_ext_ack *extack) { u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE]; struct net *curr_net; int err; /* Make sure the reload operations are invoked with the device lock * held to allow drivers to trigger functionality that expects it * (e.g., PCI reset) and to close possible races between these * operations and probe/remove. */ device_lock_assert(devlink->dev); memcpy(remote_reload_stats, devlink->stats.remote_reload_stats, sizeof(remote_reload_stats)); err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack); if (err) return err; curr_net = devlink_net(devlink); if (dest_net && !net_eq(dest_net, curr_net)) devlink_reload_netns_change(devlink, curr_net, dest_net); if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) { devlink_params_driverinit_load_new(devlink); devlink_reload_reinit_sanity_check(devlink); } err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack); devlink_reload_failed_set(devlink, !!err); if (err) return err; WARN_ON(!(*actions_performed & BIT(action))); /* Catch driver on updating the remote action within devlink reload */ WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats, sizeof(remote_reload_stats))); devlink_reload_stats_update(devlink, limit, *actions_performed); return 0; } static int devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed, enum devlink_command cmd, struct genl_info *info) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd); if (!hdr) goto free_msg; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed, actions_performed)) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return -EMSGSIZE; } int devlink_nl_reload_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; enum devlink_reload_action action; enum devlink_reload_limit limit; struct net *dest_net = NULL; u32 actions_performed; int err; err = devlink_resources_validate(devlink, NULL, info); if (err) { NL_SET_ERR_MSG(info->extack, "resources size validation failed"); return err; } action = nla_get_u8_default(info->attrs[DEVLINK_ATTR_RELOAD_ACTION], DEVLINK_RELOAD_ACTION_DRIVER_REINIT); if (!devlink_reload_action_is_supported(devlink, action)) { NL_SET_ERR_MSG(info->extack, "Requested reload action is not supported by the driver"); return -EOPNOTSUPP; } limit = DEVLINK_RELOAD_LIMIT_UNSPEC; if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) { struct nla_bitfield32 limits; u32 limits_selected; limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]); limits_selected = limits.value & limits.selector; if (!limits_selected) { NL_SET_ERR_MSG(info->extack, "Invalid limit selected"); return -EINVAL; } for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++) if (limits_selected & BIT(limit)) break; /* UAPI enables multiselection, but currently it is not used */ if (limits_selected != BIT(limit)) { NL_SET_ERR_MSG(info->extack, "Multiselection of limit is not supported"); return -EOPNOTSUPP; } if (!devlink_reload_limit_is_supported(devlink, limit)) { NL_SET_ERR_MSG(info->extack, "Requested limit is not supported by the driver"); return -EOPNOTSUPP; } if (devlink_reload_combination_is_invalid(action, limit)) { NL_SET_ERR_MSG(info->extack, "Requested limit is invalid for this action"); return -EINVAL; } } if (info->attrs[DEVLINK_ATTR_NETNS_PID] || info->attrs[DEVLINK_ATTR_NETNS_FD] || info->attrs[DEVLINK_ATTR_NETNS_ID]) { dest_net = devlink_netns_get(skb, info); if (IS_ERR(dest_net)) return PTR_ERR(dest_net); if (!net_eq(dest_net, devlink_net(devlink)) && action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT) { NL_SET_ERR_MSG_MOD(info->extack, "Changing namespace is only supported for reinit action"); return -EOPNOTSUPP; } } err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack); if (dest_net) put_net(dest_net); if (err) return err; /* For backward compatibility generate reply only if attributes used by user */ if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) return 0; return devlink_nl_reload_actions_performed_snd(devlink, actions_performed, DEVLINK_CMD_RELOAD, info); } bool devlink_reload_actions_valid(const struct devlink_ops *ops) { const struct devlink_reload_combination *comb; int i; if (!devlink_reload_supported(ops)) { if (WARN_ON(ops->reload_actions)) return false; return true; } if (WARN_ON(!ops->reload_actions || ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) || ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX))) return false; if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) || ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX))) return false; for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) { comb = &devlink_reload_invalid_combinations[i]; if (ops->reload_actions == BIT(comb->action) && ops->reload_limits == BIT(comb->limit)) return false; } return true; } static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags) { const struct devlink_ops *ops = devlink->ops; enum devlink_eswitch_encap_mode encap_mode; u8 inline_mode; void *hdr; int err = 0; u16 mode; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; err = devlink_nl_put_handle(msg, devlink); if (err) goto nla_put_failure; if (ops->eswitch_mode_get) { err = ops->eswitch_mode_get(devlink, &mode); if (err) goto nla_put_failure; err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode); if (err) goto nla_put_failure; } if (ops->eswitch_inline_mode_get) { err = ops->eswitch_inline_mode_get(devlink, &inline_mode); if (err) goto nla_put_failure; err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE, inline_mode); if (err) goto nla_put_failure; } if (ops->eswitch_encap_mode_get) { err = ops->eswitch_encap_mode_get(devlink, &encap_mode); if (err) goto nla_put_failure; err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode); if (err) goto nla_put_failure; } genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } int devlink_nl_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; const struct devlink_ops *ops = devlink->ops; enum devlink_eswitch_encap_mode encap_mode; u8 inline_mode; int err = 0; u16 mode; if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) { if (!ops->eswitch_mode_set) return -EOPNOTSUPP; mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]); err = devlink_rate_nodes_check(devlink, mode, info->extack); if (err) return err; err = ops->eswitch_mode_set(devlink, mode, info->extack); if (err) return err; } if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) { if (!ops->eswitch_inline_mode_set) return -EOPNOTSUPP; inline_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]); err = ops->eswitch_inline_mode_set(devlink, inline_mode, info->extack); if (err) return err; } if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) { if (!ops->eswitch_encap_mode_set) return -EOPNOTSUPP; encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]); err = ops->eswitch_encap_mode_set(devlink, encap_mode, info->extack); if (err) return err; } return 0; } int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) { if (!req->msg) return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_SERIAL_NUMBER, sn); } EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); int devlink_info_board_serial_number_put(struct devlink_info_req *req, const char *bsn) { if (!req->msg) return 0; return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, bsn); } EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, const char *version_value, enum devlink_info_version_type version_type) { struct nlattr *nest; int err; if (req->version_cb) req->version_cb(version_name, version_type, req->version_cb_priv); if (!req->msg || !*version_value) return 0; nest = nla_nest_start_noflag(req->msg, attr); if (!nest) return -EMSGSIZE; err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_NAME, version_name); if (err) goto nla_put_failure; err = nla_put_string(req->msg, DEVLINK_ATTR_INFO_VERSION_VALUE, version_value); if (err) goto nla_put_failure; nla_nest_end(req->msg, nest); return 0; nla_put_failure: nla_nest_cancel(req->msg, nest); return err; } int devlink_info_version_fixed_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_FIXED, version_name, version_value, DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_fixed_put); int devlink_info_version_stored_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, version_name, version_value, DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_stored_put); int devlink_info_version_stored_put_ext(struct devlink_info_req *req, const char *version_name, const char *version_value, enum devlink_info_version_type version_type) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_STORED, version_name, version_value, version_type); } EXPORT_SYMBOL_GPL(devlink_info_version_stored_put_ext); int devlink_info_version_running_put(struct devlink_info_req *req, const char *version_name, const char *version_value) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, version_name, version_value, DEVLINK_INFO_VERSION_TYPE_NONE); } EXPORT_SYMBOL_GPL(devlink_info_version_running_put); int devlink_info_version_running_put_ext(struct devlink_info_req *req, const char *version_name, const char *version_value, enum devlink_info_version_type version_type) { return devlink_info_version_put(req, DEVLINK_ATTR_INFO_VERSION_RUNNING, version_name, version_value, version_type); } EXPORT_SYMBOL_GPL(devlink_info_version_running_put_ext); static int devlink_nl_driver_info_get(struct device_driver *drv, struct devlink_info_req *req) { if (!drv) return 0; if (drv->name[0]) return nla_put_string(req->msg, DEVLINK_ATTR_INFO_DRIVER_NAME, drv->name); return 0; } static int devlink_nl_info_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { struct device *dev = devlink_to_dev(devlink); struct devlink_info_req req = {}; void *hdr; int err; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; err = -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto err_cancel_msg; req.msg = msg; if (devlink->ops->info_get) { err = devlink->ops->info_get(devlink, &req, extack); if (err) goto err_cancel_msg; } err = devlink_nl_driver_info_get(dev->driver, &req); if (err) goto err_cancel_msg; genlmsg_end(msg, hdr); return 0; err_cancel_msg: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_info_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, info->snd_portid, info->snd_seq, 0, info->extack); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_info_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { int err; err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, cb->extack); if (err == -EOPNOTSUPP) err = 0; return err; } int devlink_nl_info_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { return devlink_nl_dumpit(msg, cb, devlink_nl_info_get_dump_one); } static int devlink_nl_flash_update_fill(struct sk_buff *msg, struct devlink *devlink, enum devlink_command cmd, struct devlink_flash_notify *params) { void *hdr; hdr = genlmsg_put(msg, 0, 0, &devlink_nl_family, 0, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS) goto out; if (params->status_msg && nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG, params->status_msg)) goto nla_put_failure; if (params->component && nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT, params->component)) goto nla_put_failure; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE, params->done)) goto nla_put_failure; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL, params->total)) goto nla_put_failure; if (devlink_nl_put_u64(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT, params->timeout)) goto nla_put_failure; out: genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void __devlink_flash_update_notify(struct devlink *devlink, enum devlink_command cmd, struct devlink_flash_notify *params) { struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_FLASH_UPDATE && cmd != DEVLINK_CMD_FLASH_UPDATE_END && cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS); if (!devl_is_registered(devlink) || !devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_flash_update_fill(msg, devlink, cmd, params); if (err) goto out_free_msg; devlink_nl_notify_send(devlink, msg); return; out_free_msg: nlmsg_free(msg); } static void devlink_flash_update_begin_notify(struct devlink *devlink) { struct devlink_flash_notify params = {}; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE, &params); } static void devlink_flash_update_end_notify(struct devlink *devlink) { struct devlink_flash_notify params = {}; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE_END, &params); } void devlink_flash_update_status_notify(struct devlink *devlink, const char *status_msg, const char *component, unsigned long done, unsigned long total) { struct devlink_flash_notify params = { .status_msg = status_msg, .component = component, .done = done, .total = total, }; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE_STATUS, &params); } EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify); void devlink_flash_update_timeout_notify(struct devlink *devlink, const char *status_msg, const char *component, unsigned long timeout) { struct devlink_flash_notify params = { .status_msg = status_msg, .component = component, .timeout = timeout, }; __devlink_flash_update_notify(devlink, DEVLINK_CMD_FLASH_UPDATE_STATUS, &params); } EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify); struct devlink_flash_component_lookup_ctx { const char *lookup_name; bool lookup_name_found; }; static void devlink_flash_component_lookup_cb(const char *version_name, enum devlink_info_version_type version_type, void *version_cb_priv) { struct devlink_flash_component_lookup_ctx *lookup_ctx = version_cb_priv; if (version_type != DEVLINK_INFO_VERSION_TYPE_COMPONENT || lookup_ctx->lookup_name_found) return; lookup_ctx->lookup_name_found = !strcmp(lookup_ctx->lookup_name, version_name); } static int devlink_flash_component_get(struct devlink *devlink, struct nlattr *nla_component, const char **p_component, struct netlink_ext_ack *extack) { struct devlink_flash_component_lookup_ctx lookup_ctx = {}; struct devlink_info_req req = {}; const char *component; int ret; if (!nla_component) return 0; component = nla_data(nla_component); if (!devlink->ops->info_get) { NL_SET_ERR_MSG_ATTR(extack, nla_component, "component update is not supported by this device"); return -EOPNOTSUPP; } lookup_ctx.lookup_name = component; req.version_cb = devlink_flash_component_lookup_cb; req.version_cb_priv = &lookup_ctx; ret = devlink->ops->info_get(devlink, &req, NULL); if (ret) return ret; if (!lookup_ctx.lookup_name_found) { NL_SET_ERR_MSG_ATTR(extack, nla_component, "selected component is not supported by this device"); return -EINVAL; } *p_component = component; return 0; } int devlink_nl_flash_update_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *nla_overwrite_mask, *nla_file_name; struct devlink_flash_update_params params = {}; struct devlink *devlink = info->user_ptr[0]; const char *file_name; u32 supported_params; int ret; if (!devlink->ops->flash_update) return -EOPNOTSUPP; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME)) return -EINVAL; ret = devlink_flash_component_get(devlink, info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT], &params.component, info->extack); if (ret) return ret; supported_params = devlink->ops->supported_flash_update_params; nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK]; if (nla_overwrite_mask) { struct nla_bitfield32 sections; if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) { NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask, "overwrite settings are not supported by this device"); return -EOPNOTSUPP; } sections = nla_get_bitfield32(nla_overwrite_mask); params.overwrite_mask = sections.value & sections.selector; } nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]; file_name = nla_data(nla_file_name); ret = request_firmware(&params.fw, file_name, devlink->dev); if (ret) { NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file"); return ret; } devlink_flash_update_begin_notify(devlink); ret = devlink->ops->flash_update(devlink, &params, info->extack); devlink_flash_update_end_notify(devlink); release_firmware(params.fw); return ret; } static void __devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { struct devlink_info_req req = {}; const struct nlattr *nlattr; struct sk_buff *msg; int rem, err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; req.msg = msg; err = devlink->ops->info_get(devlink, &req, NULL); if (err) goto free_msg; nla_for_each_attr_type(nlattr, DEVLINK_ATTR_INFO_VERSION_RUNNING, (void *)msg->data, msg->len, rem) { const struct nlattr *kv; int rem_kv; nla_for_each_nested_type(kv, DEVLINK_ATTR_INFO_VERSION_VALUE, nlattr, rem_kv) { strlcat(buf, nla_data(kv), len); strlcat(buf, " ", len); } } free_msg: nlmsg_consume(msg); } void devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { if (!devlink->ops->info_get) return; devl_lock(devlink); if (devl_is_registered(devlink)) __devlink_compat_running_version(devlink, buf, len); devl_unlock(devlink); } int devlink_compat_flash_update(struct devlink *devlink, const char *file_name) { struct devlink_flash_update_params params = {}; int ret; devl_lock(devlink); if (!devl_is_registered(devlink)) { ret = -ENODEV; goto out_unlock; } if (!devlink->ops->flash_update) { ret = -EOPNOTSUPP; goto out_unlock; } ret = request_firmware(&params.fw, file_name, devlink->dev); if (ret) goto out_unlock; devlink_flash_update_begin_notify(devlink); ret = devlink->ops->flash_update(devlink, &params, NULL); devlink_flash_update_end_notify(devlink); release_firmware(params.fw); out_unlock: devl_unlock(devlink); return ret; } static int devlink_nl_selftests_fill(struct sk_buff *msg, struct devlink *devlink, u32 portid, u32 seq, int flags, struct netlink_ext_ack *extack) { struct nlattr *selftests; void *hdr; int err; int i; hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, DEVLINK_CMD_SELFTESTS_GET); if (!hdr) return -EMSGSIZE; err = -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto err_cancel_msg; selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS); if (!selftests) goto err_cancel_msg; for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1; i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) { if (devlink->ops->selftest_check(devlink, i, extack)) { err = nla_put_flag(msg, i); if (err) goto err_cancel_msg; } } nla_nest_end(msg, selftests); genlmsg_end(msg, hdr); return 0; err_cancel_msg: genlmsg_cancel(msg, hdr); return err; } int devlink_nl_selftests_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct sk_buff *msg; int err; if (!devlink->ops->selftest_check) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_selftests_fill(msg, devlink, info->snd_portid, info->snd_seq, 0, info->extack); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int devlink_nl_selftests_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { if (!devlink->ops->selftest_check) return 0; return devlink_nl_selftests_fill(msg, devlink, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags, cb->extack); } int devlink_nl_selftests_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_selftests_get_dump_one); } static int devlink_selftest_result_put(struct sk_buff *skb, unsigned int id, enum devlink_selftest_status test_status) { struct nlattr *result_attr; result_attr = nla_nest_start(skb, DEVLINK_ATTR_SELFTEST_RESULT); if (!result_attr) return -EMSGSIZE; if (nla_put_u32(skb, DEVLINK_ATTR_SELFTEST_RESULT_ID, id) || nla_put_u8(skb, DEVLINK_ATTR_SELFTEST_RESULT_STATUS, test_status)) goto nla_put_failure; nla_nest_end(skb, result_attr); return 0; nla_put_failure: nla_nest_cancel(skb, result_attr); return -EMSGSIZE; } static const struct nla_policy devlink_selftest_nl_policy[DEVLINK_ATTR_SELFTEST_ID_MAX + 1] = { [DEVLINK_ATTR_SELFTEST_ID_FLASH] = { .type = NLA_FLAG }, }; int devlink_nl_selftests_run_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[DEVLINK_ATTR_SELFTEST_ID_MAX + 1]; struct devlink *devlink = info->user_ptr[0]; struct nlattr *attrs, *selftests; struct sk_buff *msg; void *hdr; int err; int i; if (!devlink->ops->selftest_run || !devlink->ops->selftest_check) return -EOPNOTSUPP; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SELFTESTS)) return -EINVAL; attrs = info->attrs[DEVLINK_ATTR_SELFTESTS]; err = nla_parse_nested(tb, DEVLINK_ATTR_SELFTEST_ID_MAX, attrs, devlink_selftest_nl_policy, info->extack); if (err < 0) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = -EMSGSIZE; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, DEVLINK_CMD_SELFTESTS_RUN); if (!hdr) goto free_msg; if (devlink_nl_put_handle(msg, devlink)) goto genlmsg_cancel; selftests = nla_nest_start(msg, DEVLINK_ATTR_SELFTESTS); if (!selftests) goto genlmsg_cancel; for (i = DEVLINK_ATTR_SELFTEST_ID_UNSPEC + 1; i <= DEVLINK_ATTR_SELFTEST_ID_MAX; i++) { enum devlink_selftest_status test_status; if (nla_get_flag(tb[i])) { if (!devlink->ops->selftest_check(devlink, i, info->extack)) { if (devlink_selftest_result_put(msg, i, DEVLINK_SELFTEST_STATUS_SKIP)) goto selftests_nest_cancel; continue; } test_status = devlink->ops->selftest_run(devlink, i, info->extack); if (devlink_selftest_result_put(msg, i, test_status)) goto selftests_nest_cancel; } } nla_nest_end(msg, selftests); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); selftests_nest_cancel: nla_nest_cancel(msg, selftests); genlmsg_cancel: genlmsg_cancel(msg, hdr); free_msg: nlmsg_free(msg); return err; }
1 2 4 3 1 2 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 4 2 2 2 2 2 4 4 4 1 1 2 2 2 2 2 4 6 4 2 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 // SPDX-License-Identifier: GPL-2.0-or-later /* * USB IBM C-It Video Camera driver * * Supports Xirlink C-It Video Camera, IBM PC Camera, * IBM NetCamera and Veo Stingray. * * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com> * * This driver is based on earlier work of: * * (C) Copyright 1999 Johannes Erdfelt * (C) Copyright 1999 Randy Dunlap */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "xirlink-cit" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Xirlink C-IT"); MODULE_LICENSE("GPL"); /* FIXME we should autodetect this */ static int ibm_netcam_pro; module_param(ibm_netcam_pro, int, 0); MODULE_PARM_DESC(ibm_netcam_pro, "Use IBM Netcamera Pro init sequences for Model 3 cams"); /* FIXME this should be handled through the V4L2 input selection API */ static int rca_input; module_param(rca_input, int, 0644); MODULE_PARM_DESC(rca_input, "Use rca input instead of ccd sensor on Model 3 cams"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *lighting; u8 model; #define CIT_MODEL0 0 /* bcd version 0.01 cams ie the xvp-500 */ #define CIT_MODEL1 1 /* The model 1 - 4 nomenclature comes from the old */ #define CIT_MODEL2 2 /* ibmcam driver */ #define CIT_MODEL3 3 #define CIT_MODEL4 4 #define CIT_IBM_NETCAM_PRO 5 u8 input_index; u8 button_state; u8 stop_on_control_change; u8 sof_read; u8 sof_len; }; static void sd_stop0(struct gspca_dev *gspca_dev); static const struct v4l2_pix_format cif_yuv_mode[] = { {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {352, 288, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format vga_yuv_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {640, 480, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format model0_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format model2_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {352, 288, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; /* * 01.01.08 - Added for RCA video in support -LO * This struct is used to init the Model3 cam to use the RCA video in port * instead of the CCD sensor. */ static const u16 rca_initdata[][3] = { {0, 0x0000, 0x010c}, {0, 0x0006, 0x012c}, {0, 0x0078, 0x012d}, {0, 0x0046, 0x012f}, {0, 0xd141, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfea8, 0x0124}, {1, 0x0000, 0x0116}, {0, 0x0064, 0x0116}, {1, 0x0000, 0x0115}, {0, 0x0003, 0x0115}, {0, 0x0008, 0x0123}, {0, 0x0000, 0x0117}, {0, 0x0000, 0x0112}, {0, 0x0080, 0x0100}, {0, 0x0000, 0x0100}, {1, 0x0000, 0x0116}, {0, 0x0060, 0x0116}, {0, 0x0002, 0x0112}, {0, 0x0000, 0x0123}, {0, 0x0001, 0x0117}, {0, 0x0040, 0x0108}, {0, 0x0019, 0x012c}, {0, 0x0040, 0x0116}, {0, 0x000a, 0x0115}, {0, 0x000b, 0x0115}, {0, 0x0078, 0x012d}, {0, 0x0046, 0x012f}, {0, 0xd141, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfea8, 0x0124}, {0, 0x0064, 0x0116}, {0, 0x0000, 0x0115}, {0, 0x0001, 0x0115}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00aa, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f2, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x000f, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f8, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00fc, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f9, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x003c, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0027, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0019, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0021, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0006, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0045, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002a, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x000e, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002b, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f4, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002c, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0004, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002d, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0014, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002e, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0003, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002f, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0003, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0014, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0053, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0x0000, 0x0101}, {0, 0x00a0, 0x0103}, {0, 0x0078, 0x0105}, {0, 0x0000, 0x010a}, {0, 0x0024, 0x010b}, {0, 0x0028, 0x0119}, {0, 0x0088, 0x011b}, {0, 0x0002, 0x011d}, {0, 0x0003, 0x011e}, {0, 0x0000, 0x0129}, {0, 0x00fc, 0x012b}, {0, 0x0008, 0x0102}, {0, 0x0000, 0x0104}, {0, 0x0008, 0x011a}, {0, 0x0028, 0x011c}, {0, 0x0021, 0x012a}, {0, 0x0000, 0x0118}, {0, 0x0000, 0x0132}, {0, 0x0000, 0x0109}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0031, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00dc, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0032, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0020, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0030, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0008, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0x0003, 0x0111}, }; /* TESTME the old ibmcam driver repeats certain commands to Model1 cameras, we do the same for now (testing needed to see if this is really necessary) */ static const int cit_model1_ntries = 5; static const int cit_model1_ntries2 = 2; static int cit_write_reg(struct gspca_dev *gspca_dev, u16 value, u16 index) { struct usb_device *udev = gspca_dev->dev; int err; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, value, index, NULL, 0, 1000); if (err < 0) pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n", index, value, err); return 0; } static int cit_read_reg(struct gspca_dev *gspca_dev, u16 index, int verbose) { struct usb_device *udev = gspca_dev->dev; __u8 *buf = gspca_dev->usb_buf; int res; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x00, index, buf, 8, 1000); if (res < 0) { pr_err("Failed to read a register (index 0x%04X, error %d)\n", index, res); return res; } if (verbose) gspca_dbg(gspca_dev, D_PROBE, "Register %04x value: %02x\n", index, buf[0]); return 0; } /* * cit_send_FF_04_02() * * This procedure sends magic 3-command prefix to the camera. * The purpose of this prefix is not known. * * History: * 1/2/00 Created. */ static void cit_send_FF_04_02(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x00FF, 0x0127); cit_write_reg(gspca_dev, 0x0004, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); } static void cit_send_00_04_06(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x0004, 0x0124); cit_write_reg(gspca_dev, 0x0006, 0x0124); } static void cit_send_x_00(struct gspca_dev *gspca_dev, unsigned short x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); } static void cit_send_x_00_05(struct gspca_dev *gspca_dev, unsigned short x) { cit_send_x_00(gspca_dev, x); cit_write_reg(gspca_dev, 0x0005, 0x0124); } static void cit_send_x_00_05_02(struct gspca_dev *gspca_dev, unsigned short x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); } static void cit_send_x_01_00_05(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0001, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); } static void cit_send_x_00_05_02_01(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0124); } static void cit_send_x_00_05_02_08_01(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); cit_write_reg(gspca_dev, 0x0008, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0124); } static void cit_Packet_Format1(struct gspca_dev *gspca_dev, u16 fkey, u16 val) { cit_send_x_01_00_05(gspca_dev, 0x0088); cit_send_x_00_05(gspca_dev, fkey); cit_send_x_00_05_02_08_01(gspca_dev, val); cit_send_x_00_05(gspca_dev, 0x0088); cit_send_x_00_05_02_01(gspca_dev, fkey); cit_send_x_00_05(gspca_dev, 0x0089); cit_send_x_00(gspca_dev, fkey); cit_send_00_04_06(gspca_dev); cit_read_reg(gspca_dev, 0x0126, 0); cit_send_FF_04_02(gspca_dev); } static void cit_PacketFormat2(struct gspca_dev *gspca_dev, u16 fkey, u16 val) { cit_send_x_01_00_05(gspca_dev, 0x0088); cit_send_x_00_05(gspca_dev, fkey); cit_send_x_00_05_02(gspca_dev, val); } static void cit_model2_Packet2(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x00ff, 0x012d); cit_write_reg(gspca_dev, 0xfea3, 0x0124); } static void cit_model2_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x012e); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xc719, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_model2_Packet2(gspca_dev); } /* * cit_model3_Packet1() * * 00_0078_012d * 00_0097_012f * 00_d141_0124 * 00_0096_0127 * 00_fea8_0124 */ static void cit_model3_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); } static void cit_model4_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); } static void cit_model4_BrightnessPacket(struct gspca_dev *gspca_dev, u16 val) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0026, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, val, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0038, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->model = id->driver_info; if (sd->model == CIT_MODEL3 && ibm_netcam_pro) sd->model = CIT_IBM_NETCAM_PRO; cam = &gspca_dev->cam; switch (sd->model) { case CIT_MODEL0: cam->cam_mode = model0_mode; cam->nmodes = ARRAY_SIZE(model0_mode); sd->sof_len = 4; break; case CIT_MODEL1: cam->cam_mode = cif_yuv_mode; cam->nmodes = ARRAY_SIZE(cif_yuv_mode); sd->sof_len = 4; break; case CIT_MODEL2: cam->cam_mode = model2_mode + 1; /* no 160x120 */ cam->nmodes = 3; break; case CIT_MODEL3: cam->cam_mode = vga_yuv_mode; cam->nmodes = ARRAY_SIZE(vga_yuv_mode); sd->stop_on_control_change = 1; sd->sof_len = 4; break; case CIT_MODEL4: cam->cam_mode = model2_mode; cam->nmodes = ARRAY_SIZE(model2_mode); break; case CIT_IBM_NETCAM_PRO: cam->cam_mode = vga_yuv_mode; cam->nmodes = 2; /* no 640 x 480 */ cam->input_flags = V4L2_IN_ST_VFLIP; sd->stop_on_control_change = 1; sd->sof_len = 4; break; } return 0; } static int cit_init_model0(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */ cit_write_reg(gspca_dev, 0x0001, 0x0112); /* turn on autogain ? */ cit_write_reg(gspca_dev, 0x0000, 0x0400); cit_write_reg(gspca_dev, 0x0001, 0x0400); cit_write_reg(gspca_dev, 0x0000, 0x0420); cit_write_reg(gspca_dev, 0x0001, 0x0420); cit_write_reg(gspca_dev, 0x000d, 0x0409); cit_write_reg(gspca_dev, 0x0002, 0x040a); cit_write_reg(gspca_dev, 0x0018, 0x0405); cit_write_reg(gspca_dev, 0x0008, 0x0435); cit_write_reg(gspca_dev, 0x0026, 0x040b); cit_write_reg(gspca_dev, 0x0007, 0x0437); cit_write_reg(gspca_dev, 0x0015, 0x042f); cit_write_reg(gspca_dev, 0x002b, 0x0439); cit_write_reg(gspca_dev, 0x0026, 0x043a); cit_write_reg(gspca_dev, 0x0008, 0x0438); cit_write_reg(gspca_dev, 0x001e, 0x042b); cit_write_reg(gspca_dev, 0x0041, 0x042c); return 0; } static int cit_init_ibm_netcam_pro(struct gspca_dev *gspca_dev) { cit_read_reg(gspca_dev, 0x128, 1); cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x0000, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0115); cit_write_reg(gspca_dev, 0x000b, 0x0115); cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0079, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xcd41, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_read_reg(gspca_dev, 0x0126, 1); cit_model3_Packet1(gspca_dev, 0x0000, 0x0000); cit_model3_Packet1(gspca_dev, 0x0000, 0x0001); cit_model3_Packet1(gspca_dev, 0x000b, 0x0000); cit_model3_Packet1(gspca_dev, 0x000c, 0x0008); cit_model3_Packet1(gspca_dev, 0x000d, 0x003a); cit_model3_Packet1(gspca_dev, 0x000e, 0x0060); cit_model3_Packet1(gspca_dev, 0x000f, 0x0060); cit_model3_Packet1(gspca_dev, 0x0010, 0x0008); cit_model3_Packet1(gspca_dev, 0x0011, 0x0004); cit_model3_Packet1(gspca_dev, 0x0012, 0x0028); cit_model3_Packet1(gspca_dev, 0x0013, 0x0002); cit_model3_Packet1(gspca_dev, 0x0014, 0x0000); cit_model3_Packet1(gspca_dev, 0x0015, 0x00fb); cit_model3_Packet1(gspca_dev, 0x0016, 0x0002); cit_model3_Packet1(gspca_dev, 0x0017, 0x0037); cit_model3_Packet1(gspca_dev, 0x0018, 0x0036); cit_model3_Packet1(gspca_dev, 0x001e, 0x0000); cit_model3_Packet1(gspca_dev, 0x001f, 0x0008); cit_model3_Packet1(gspca_dev, 0x0020, 0x00c1); cit_model3_Packet1(gspca_dev, 0x0021, 0x0034); cit_model3_Packet1(gspca_dev, 0x0022, 0x0034); cit_model3_Packet1(gspca_dev, 0x0025, 0x0002); cit_model3_Packet1(gspca_dev, 0x0028, 0x0022); cit_model3_Packet1(gspca_dev, 0x0029, 0x000a); cit_model3_Packet1(gspca_dev, 0x002b, 0x0000); cit_model3_Packet1(gspca_dev, 0x002c, 0x0000); cit_model3_Packet1(gspca_dev, 0x002d, 0x00ff); cit_model3_Packet1(gspca_dev, 0x002e, 0x00ff); cit_model3_Packet1(gspca_dev, 0x002f, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0030, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0031, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0032, 0x0007); cit_model3_Packet1(gspca_dev, 0x0033, 0x0005); cit_model3_Packet1(gspca_dev, 0x0037, 0x0040); cit_model3_Packet1(gspca_dev, 0x0039, 0x0000); cit_model3_Packet1(gspca_dev, 0x003a, 0x0000); cit_model3_Packet1(gspca_dev, 0x003b, 0x0001); cit_model3_Packet1(gspca_dev, 0x003c, 0x0000); cit_model3_Packet1(gspca_dev, 0x0040, 0x000c); cit_model3_Packet1(gspca_dev, 0x0041, 0x00fb); cit_model3_Packet1(gspca_dev, 0x0042, 0x0002); cit_model3_Packet1(gspca_dev, 0x0043, 0x0000); cit_model3_Packet1(gspca_dev, 0x0045, 0x0000); cit_model3_Packet1(gspca_dev, 0x0046, 0x0000); cit_model3_Packet1(gspca_dev, 0x0047, 0x0000); cit_model3_Packet1(gspca_dev, 0x0048, 0x0000); cit_model3_Packet1(gspca_dev, 0x0049, 0x0000); cit_model3_Packet1(gspca_dev, 0x004a, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004b, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004c, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0050, 0x0000); cit_model3_Packet1(gspca_dev, 0x0051, 0x0002); cit_model3_Packet1(gspca_dev, 0x0055, 0x0000); cit_model3_Packet1(gspca_dev, 0x0056, 0x0000); cit_model3_Packet1(gspca_dev, 0x0057, 0x0000); cit_model3_Packet1(gspca_dev, 0x0058, 0x0002); cit_model3_Packet1(gspca_dev, 0x0059, 0x0000); cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); cit_model3_Packet1(gspca_dev, 0x005d, 0x0022); cit_model3_Packet1(gspca_dev, 0x005e, 0x003c); cit_model3_Packet1(gspca_dev, 0x005f, 0x0050); cit_model3_Packet1(gspca_dev, 0x0060, 0x0044); cit_model3_Packet1(gspca_dev, 0x0061, 0x0005); cit_model3_Packet1(gspca_dev, 0x006a, 0x007e); cit_model3_Packet1(gspca_dev, 0x006f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0072, 0x001b); cit_model3_Packet1(gspca_dev, 0x0073, 0x0005); cit_model3_Packet1(gspca_dev, 0x0074, 0x000a); cit_model3_Packet1(gspca_dev, 0x0075, 0x001b); cit_model3_Packet1(gspca_dev, 0x0076, 0x002a); cit_model3_Packet1(gspca_dev, 0x0077, 0x003c); cit_model3_Packet1(gspca_dev, 0x0078, 0x0050); cit_model3_Packet1(gspca_dev, 0x007b, 0x0000); cit_model3_Packet1(gspca_dev, 0x007c, 0x0011); cit_model3_Packet1(gspca_dev, 0x007d, 0x0024); cit_model3_Packet1(gspca_dev, 0x007e, 0x0043); cit_model3_Packet1(gspca_dev, 0x007f, 0x005a); cit_model3_Packet1(gspca_dev, 0x0084, 0x0020); cit_model3_Packet1(gspca_dev, 0x0085, 0x0033); cit_model3_Packet1(gspca_dev, 0x0086, 0x000a); cit_model3_Packet1(gspca_dev, 0x0087, 0x0030); cit_model3_Packet1(gspca_dev, 0x0088, 0x0070); cit_model3_Packet1(gspca_dev, 0x008b, 0x0008); cit_model3_Packet1(gspca_dev, 0x008f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0090, 0x0006); cit_model3_Packet1(gspca_dev, 0x0091, 0x0028); cit_model3_Packet1(gspca_dev, 0x0092, 0x005a); cit_model3_Packet1(gspca_dev, 0x0093, 0x0082); cit_model3_Packet1(gspca_dev, 0x0096, 0x0014); cit_model3_Packet1(gspca_dev, 0x0097, 0x0020); cit_model3_Packet1(gspca_dev, 0x0098, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b0, 0x0046); cit_model3_Packet1(gspca_dev, 0x00b1, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b2, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b3, 0x0004); cit_model3_Packet1(gspca_dev, 0x00b4, 0x0007); cit_model3_Packet1(gspca_dev, 0x00b6, 0x0002); cit_model3_Packet1(gspca_dev, 0x00b7, 0x0004); cit_model3_Packet1(gspca_dev, 0x00bb, 0x0000); cit_model3_Packet1(gspca_dev, 0x00bc, 0x0001); cit_model3_Packet1(gspca_dev, 0x00bd, 0x0000); cit_model3_Packet1(gspca_dev, 0x00bf, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c0, 0x00c8); cit_model3_Packet1(gspca_dev, 0x00c1, 0x0014); cit_model3_Packet1(gspca_dev, 0x00c2, 0x0001); cit_model3_Packet1(gspca_dev, 0x00c3, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c4, 0x0004); cit_model3_Packet1(gspca_dev, 0x00cb, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00cc, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00cd, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00ce, 0x0000); cit_model3_Packet1(gspca_dev, 0x00cf, 0x0020); cit_model3_Packet1(gspca_dev, 0x00d0, 0x0040); cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d2, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d3, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00ea, 0x0008); cit_model3_Packet1(gspca_dev, 0x00eb, 0x0000); cit_model3_Packet1(gspca_dev, 0x00ec, 0x00e8); cit_model3_Packet1(gspca_dev, 0x00ed, 0x0001); cit_model3_Packet1(gspca_dev, 0x00ef, 0x0022); cit_model3_Packet1(gspca_dev, 0x00f0, 0x0000); cit_model3_Packet1(gspca_dev, 0x00f2, 0x0028); cit_model3_Packet1(gspca_dev, 0x00f4, 0x0002); cit_model3_Packet1(gspca_dev, 0x00f5, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fa, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fb, 0x0001); cit_model3_Packet1(gspca_dev, 0x00fc, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fd, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fe, 0x0000); cit_model3_Packet1(gspca_dev, 0x00ff, 0x0000); cit_model3_Packet1(gspca_dev, 0x00be, 0x0003); cit_model3_Packet1(gspca_dev, 0x00c8, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c9, 0x0020); cit_model3_Packet1(gspca_dev, 0x00ca, 0x0040); cit_model3_Packet1(gspca_dev, 0x0053, 0x0001); cit_model3_Packet1(gspca_dev, 0x0082, 0x000e); cit_model3_Packet1(gspca_dev, 0x0083, 0x0020); cit_model3_Packet1(gspca_dev, 0x0034, 0x003c); cit_model3_Packet1(gspca_dev, 0x006e, 0x0055); cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); cit_model3_Packet1(gspca_dev, 0x0063, 0x0008); cit_model3_Packet1(gspca_dev, 0x0066, 0x000a); cit_model3_Packet1(gspca_dev, 0x0067, 0x0006); cit_model3_Packet1(gspca_dev, 0x006b, 0x0010); cit_model3_Packet1(gspca_dev, 0x005a, 0x0001); cit_model3_Packet1(gspca_dev, 0x005b, 0x000a); cit_model3_Packet1(gspca_dev, 0x0023, 0x0006); cit_model3_Packet1(gspca_dev, 0x0026, 0x0004); cit_model3_Packet1(gspca_dev, 0x0036, 0x0069); cit_model3_Packet1(gspca_dev, 0x0038, 0x0064); cit_model3_Packet1(gspca_dev, 0x003d, 0x0003); cit_model3_Packet1(gspca_dev, 0x003e, 0x0001); cit_model3_Packet1(gspca_dev, 0x00b8, 0x0014); cit_model3_Packet1(gspca_dev, 0x00b9, 0x0014); cit_model3_Packet1(gspca_dev, 0x00e6, 0x0004); cit_model3_Packet1(gspca_dev, 0x00e8, 0x0001); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: cit_init_model0(gspca_dev); sd_stop0(gspca_dev); break; case CIT_MODEL1: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: break; /* All is done in sd_start */ case CIT_IBM_NETCAM_PRO: cit_init_ibm_netcam_pro(gspca_dev); sd_stop0(gspca_dev); break; } return 0; } static int cit_set_brightness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; int i; switch (sd->model) { case CIT_MODEL0: case CIT_IBM_NETCAM_PRO: /* No (known) brightness control for these */ break; case CIT_MODEL1: /* Model 1: Brightness range 0 - 63 */ cit_Packet_Format1(gspca_dev, 0x0031, val); cit_Packet_Format1(gspca_dev, 0x0032, val); cit_Packet_Format1(gspca_dev, 0x0033, val); break; case CIT_MODEL2: /* Model 2: Brightness range 0x60 - 0xee */ /* Scale 0 - 63 to 0x60 - 0xee */ i = 0x60 + val * 2254 / 1000; cit_model2_Packet1(gspca_dev, 0x001a, i); break; case CIT_MODEL3: /* Model 3: Brightness range 'i' in [0x0C..0x3F] */ i = val; if (i < 0x0c) i = 0x0c; cit_model3_Packet1(gspca_dev, 0x0036, i); break; case CIT_MODEL4: /* Model 4: Brightness range 'i' in [0x04..0xb4] */ /* Scale 0 - 63 to 0x04 - 0xb4 */ i = 0x04 + val * 2794 / 1000; cit_model4_BrightnessPacket(gspca_dev, i); break; } return 0; } static int cit_set_contrast(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: { int i; /* gain 0-15, 0-20 -> 0-15 */ i = val * 1000 / 1333; cit_write_reg(gspca_dev, i, 0x0422); /* gain 0-31, may not be lower then 0x0422, 0-20 -> 0-31 */ i = val * 2000 / 1333; cit_write_reg(gspca_dev, i, 0x0423); /* gain 0-127, may not be lower then 0x0423, 0-20 -> 0-63 */ i = val * 4000 / 1333; cit_write_reg(gspca_dev, i, 0x0424); /* gain 0-127, may not be lower then 0x0424, , 0-20 -> 0-127 */ i = val * 8000 / 1333; cit_write_reg(gspca_dev, i, 0x0425); break; } case CIT_MODEL2: case CIT_MODEL4: /* These models do not have this control. */ break; case CIT_MODEL1: { /* Scale 0 - 20 to 15 - 0 */ int i, new_contrast = (20 - val) * 1000 / 1333; for (i = 0; i < cit_model1_ntries; i++) { cit_Packet_Format1(gspca_dev, 0x0014, new_contrast); cit_send_FF_04_02(gspca_dev); } break; } case CIT_MODEL3: { /* Preset hardware values */ static const struct { unsigned short cv1; unsigned short cv2; unsigned short cv3; } cv[7] = { { 0x05, 0x05, 0x0f }, /* Minimum */ { 0x04, 0x04, 0x16 }, { 0x02, 0x03, 0x16 }, { 0x02, 0x08, 0x16 }, { 0x01, 0x0c, 0x16 }, { 0x01, 0x0e, 0x16 }, { 0x01, 0x10, 0x16 } /* Maximum */ }; int i = val / 3; cit_model3_Packet1(gspca_dev, 0x0067, cv[i].cv1); cit_model3_Packet1(gspca_dev, 0x005b, cv[i].cv2); cit_model3_Packet1(gspca_dev, 0x005c, cv[i].cv3); break; } case CIT_IBM_NETCAM_PRO: cit_model3_Packet1(gspca_dev, 0x005b, val + 1); break; } return 0; } static int cit_set_hue(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: case CIT_IBM_NETCAM_PRO: /* No hue control for these models */ break; case CIT_MODEL2: cit_model2_Packet1(gspca_dev, 0x0024, val); /* cit_model2_Packet1(gspca_dev, 0x0020, sat); */ break; case CIT_MODEL3: { /* Model 3: Brightness range 'i' in [0x05..0x37] */ /* TESTME according to the ibmcam driver this does not work */ if (0) { /* Scale 0 - 127 to 0x05 - 0x37 */ int i = 0x05 + val * 1000 / 2540; cit_model3_Packet1(gspca_dev, 0x007e, i); } break; } case CIT_MODEL4: /* HDG: taken from ibmcam, setting the color gains does not * really belong here. * * I am not sure r/g/b_gain variables exactly control gain * of those channels. Most likely they subtly change some * very internal image processing settings in the camera. * In any case, here is what they do, and feel free to tweak: * * r_gain: seriously affects red gain * g_gain: seriously affects green gain * b_gain: seriously affects blue gain * hue: changes average color from violet (0) to red (0xFF) */ cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 160, 0x0127); /* Green gain */ cit_write_reg(gspca_dev, 160, 0x012e); /* Red gain */ cit_write_reg(gspca_dev, 160, 0x0130); /* Blue gain */ cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, val, 0x012d); /* Hue */ cit_write_reg(gspca_dev, 0xf545, 0x0124); break; } return 0; } static int cit_set_sharpness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL2: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: /* These models do not have this control */ break; case CIT_MODEL1: { int i; static const unsigned short sa[] = { 0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a }; for (i = 0; i < cit_model1_ntries; i++) cit_PacketFormat2(gspca_dev, 0x0013, sa[val]); break; } case CIT_MODEL3: { /* * "Use a table of magic numbers. * This setting doesn't really change much. * But that's how Windows does it." */ static const struct { unsigned short sv1; unsigned short sv2; unsigned short sv3; unsigned short sv4; } sv[7] = { { 0x00, 0x00, 0x05, 0x14 }, /* Smoothest */ { 0x01, 0x04, 0x05, 0x14 }, { 0x02, 0x04, 0x05, 0x14 }, { 0x03, 0x04, 0x05, 0x14 }, { 0x03, 0x05, 0x05, 0x14 }, { 0x03, 0x06, 0x05, 0x14 }, { 0x03, 0x07, 0x05, 0x14 } /* Sharpest */ }; cit_model3_Packet1(gspca_dev, 0x0060, sv[val].sv1); cit_model3_Packet1(gspca_dev, 0x0061, sv[val].sv2); cit_model3_Packet1(gspca_dev, 0x0062, sv[val].sv3); cit_model3_Packet1(gspca_dev, 0x0063, sv[val].sv4); break; } } return 0; } /* * cit_set_lighting() * * Camera model 1: * We have 3 levels of lighting conditions: 0=Bright, 1=Medium, 2=Low. * * Camera model 2: * We have 16 levels of lighting, 0 for bright light and up to 15 for * low light. But values above 5 or so are useless because camera is * not really capable to produce anything worth viewing at such light. * This setting may be altered only in certain camera state. * * Low lighting forces slower FPS. * * History: * 1/5/00 Created. * 2/20/00 Added support for Model 2 cameras. */ static void cit_set_lighting(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: break; case CIT_MODEL1: { int i; for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x0027, val); break; } } } static void cit_set_hflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: if (val) cit_write_reg(gspca_dev, 0x0020, 0x0115); else cit_write_reg(gspca_dev, 0x0040, 0x0115); break; case CIT_MODEL1: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: break; } } static int cit_restart_stream(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: cit_write_reg(gspca_dev, 0x0001, 0x0114); fallthrough; case CIT_MODEL2: case CIT_MODEL4: cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); break; case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: cit_write_reg(gspca_dev, 0x0001, 0x0114); cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); /* Clear button events from while we were not streaming */ cit_write_reg(gspca_dev, 0x0001, 0x0113); break; } sd->sof_read = 0; return 0; } static int cit_get_packet_size(struct gspca_dev *gspca_dev) { struct usb_host_interface *alt; struct usb_interface *intf; intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); alt = usb_altnum_to_altsetting(intf, gspca_dev->alt); if (!alt) { pr_err("Couldn't get altsetting\n"); return -EIO; } if (alt->desc.bNumEndpoints < 1) return -ENODEV; return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); } /* Calculate the clockdiv giving us max fps given the available bandwidth */ static int cit_get_clock_div(struct gspca_dev *gspca_dev) { int clock_div = 7; /* 0=30 1=25 2=20 3=15 4=12 5=7.5 6=6 7=3fps ?? */ int fps[8] = { 30, 25, 20, 15, 12, 8, 6, 3 }; int packet_size; packet_size = cit_get_packet_size(gspca_dev); if (packet_size < 0) return packet_size; while (clock_div > 3 && 1000 * packet_size > gspca_dev->pixfmt.width * gspca_dev->pixfmt.height * fps[clock_div - 1] * 3 / 2) clock_div--; gspca_dbg(gspca_dev, D_PROBE, "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)\n", packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height, clock_div, fps[clock_div]); return clock_div; } static int cit_start_model0(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */ cit_write_reg(gspca_dev, 0x0003, 0x0438); cit_write_reg(gspca_dev, 0x001e, 0x042b); cit_write_reg(gspca_dev, 0x0041, 0x042c); cit_write_reg(gspca_dev, 0x0008, 0x0436); cit_write_reg(gspca_dev, 0x0024, 0x0403); cit_write_reg(gspca_dev, 0x002c, 0x0404); cit_write_reg(gspca_dev, 0x0002, 0x0426); cit_write_reg(gspca_dev, 0x0014, 0x0427); switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0004, 0x010b); cit_write_reg(gspca_dev, 0x0001, 0x010a); cit_write_reg(gspca_dev, 0x0010, 0x0102); cit_write_reg(gspca_dev, 0x00a0, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0078, 0x0105); break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0006, 0x010b); cit_write_reg(gspca_dev, 0x0000, 0x010a); cit_write_reg(gspca_dev, 0x0005, 0x0102); cit_write_reg(gspca_dev, 0x00b0, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0090, 0x0105); break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0008, 0x010b); cit_write_reg(gspca_dev, 0x0004, 0x010a); cit_write_reg(gspca_dev, 0x0005, 0x0102); cit_write_reg(gspca_dev, 0x00a0, 0x0103); cit_write_reg(gspca_dev, 0x0010, 0x0104); cit_write_reg(gspca_dev, 0x0078, 0x0105); break; } cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, clock_div, 0x0111); return 0; } static int cit_start_model1(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_read_reg(gspca_dev, 0x0128, 1); cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */ cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_write_reg(gspca_dev, 0x01, 0x0108); cit_write_reg(gspca_dev, 0x03, 0x0112); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x06, 0x0115); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x44, 0x0116); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x40, 0x0116); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x0e, 0x0115); cit_write_reg(gspca_dev, 0x19, 0x012c); cit_Packet_Format1(gspca_dev, 0x00, 0x1e); cit_Packet_Format1(gspca_dev, 0x39, 0x0d); cit_Packet_Format1(gspca_dev, 0x39, 0x09); cit_Packet_Format1(gspca_dev, 0x3b, 0x00); cit_Packet_Format1(gspca_dev, 0x28, 0x22); cit_Packet_Format1(gspca_dev, 0x27, 0x00); cit_Packet_Format1(gspca_dev, 0x2b, 0x1f); cit_Packet_Format1(gspca_dev, 0x39, 0x08); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x2c, 0x00); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x30, 0x14); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x01, 0xe1); cit_PacketFormat2(gspca_dev, 0x02, 0xcd); cit_PacketFormat2(gspca_dev, 0x03, 0xcd); cit_PacketFormat2(gspca_dev, 0x04, 0xfa); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x0a, 0x37); cit_PacketFormat2(gspca_dev, 0x0b, 0xb8); cit_PacketFormat2(gspca_dev, 0x0c, 0xf3); cit_PacketFormat2(gspca_dev, 0x0d, 0xe3); cit_PacketFormat2(gspca_dev, 0x0e, 0x0d); cit_PacketFormat2(gspca_dev, 0x0f, 0xf2); cit_PacketFormat2(gspca_dev, 0x10, 0xd5); cit_PacketFormat2(gspca_dev, 0x11, 0xba); cit_PacketFormat2(gspca_dev, 0x12, 0x53); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x16, 0x00); cit_PacketFormat2(gspca_dev, 0x17, 0x28); cit_PacketFormat2(gspca_dev, 0x18, 0x7d); cit_PacketFormat2(gspca_dev, 0x19, 0xbe); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x00, 0x18); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x13, 0x18); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x14, 0x06); /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) { /* This is default brightness */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x31, 0x37); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x32, 0x46); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x33, 0x55); } cit_Packet_Format1(gspca_dev, 0x2e, 0x04); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x2d, 0x04); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x29, 0x80); cit_Packet_Format1(gspca_dev, 0x2c, 0x01); cit_Packet_Format1(gspca_dev, 0x30, 0x17); cit_Packet_Format1(gspca_dev, 0x39, 0x08); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x34, 0x00); cit_write_reg(gspca_dev, 0x00, 0x0101); cit_write_reg(gspca_dev, 0x00, 0x010a); switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x80, 0x0103); cit_write_reg(gspca_dev, 0x60, 0x0105); cit_write_reg(gspca_dev, 0x0c, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x0b, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x00, 0x0129); break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0xb0, 0x0103); cit_write_reg(gspca_dev, 0x8f, 0x0105); cit_write_reg(gspca_dev, 0x06, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x0d, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x03, 0x0129); break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0xb0, 0x0103); cit_write_reg(gspca_dev, 0x90, 0x0105); cit_write_reg(gspca_dev, 0x02, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x05, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x00, 0x0129); break; } cit_write_reg(gspca_dev, 0xff, 0x012b); /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) { /* This is another brightness - don't know why */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x31, 0xc3); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x32, 0xd2); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x33, 0xe1); /* Default contrast */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x14, 0x0a); /* Default sharpness */ for (i = 0; i < cit_model1_ntries2; i++) cit_PacketFormat2(gspca_dev, 0x13, 0x1a); /* Default lighting conditions */ cit_Packet_Format1(gspca_dev, 0x0027, v4l2_ctrl_g_ctrl(sd->lighting)); } /* Assorted init */ switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x36, 0x0102); cit_write_reg(gspca_dev, 0x1a, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2b, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; case 176: /* 176x144 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x04, 0x0102); cit_write_reg(gspca_dev, 0x02, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2b, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; case 352: /* 352x288 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x08, 0x0102); cit_write_reg(gspca_dev, 0x01, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2f, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; } cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_write_reg(gspca_dev, clock_div, 0x0111); return 0; } static int cit_start_model2(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int clock_div = 0; cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x00bc, 0x012c); cit_write_reg(gspca_dev, 0x0008, 0x012b); cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x0001, 0x0102); switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */ cit_write_reg(gspca_dev, 0x00b9, 0x010a); /* Unique to this mode */ cit_write_reg(gspca_dev, 0x0038, 0x0119); /* Unique to this mode */ /* TESTME HDG: this does not seem right (it is 2 for all other resolutions) */ sd->sof_len = 10; break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0028, 0x0103); /* Unique to this mode */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; #if 0 case VIDEOSIZE_352x240: cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; #endif case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; } cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0050, 0x0111); cit_write_reg(gspca_dev, 0x00d0, 0x0111); break; case 320: /* 320x240 */ case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x0040, 0x0111); cit_write_reg(gspca_dev, 0x00c0, 0x0111); break; } cit_write_reg(gspca_dev, 0x009b, 0x010f); cit_write_reg(gspca_dev, 0x00bb, 0x010f); /* * Hardware settings, may affect CMOS sensor; not user controls! * ------------------------------------------------------------- * 0x0004: no effect * 0x0006: hardware effect * 0x0008: no effect * 0x000a: stops video stream, probably important h/w setting * 0x000c: changes color in hardware manner (not user setting) * 0x0012: changes number of colors (does not affect speed) * 0x002a: no effect * 0x002c: hardware setting (related to scan lines) * 0x002e: stops video stream, probably important h/w setting */ cit_model2_Packet1(gspca_dev, 0x000a, 0x005c); cit_model2_Packet1(gspca_dev, 0x0004, 0x0000); cit_model2_Packet1(gspca_dev, 0x0006, 0x00fb); cit_model2_Packet1(gspca_dev, 0x0008, 0x0000); cit_model2_Packet1(gspca_dev, 0x000c, 0x0009); cit_model2_Packet1(gspca_dev, 0x0012, 0x000a); cit_model2_Packet1(gspca_dev, 0x002a, 0x0000); cit_model2_Packet1(gspca_dev, 0x002c, 0x0000); cit_model2_Packet1(gspca_dev, 0x002e, 0x0008); /* * Function 0x0030 pops up all over the place. Apparently * it is a hardware control register, with every bit assigned to * do something. */ cit_model2_Packet1(gspca_dev, 0x0030, 0x0000); /* * Magic control of CMOS sensor. Only lower values like * 0-3 work, and picture shifts left or right. Don't change. */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */ clock_div = 6; break; case 320: /* 320x240 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); cit_model2_Packet1(gspca_dev, 0x0016, 0x0005); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Another hardware setting */ clock_div = 8; break; #if 0 case VIDEOSIZE_352x240: /* This mode doesn't work as Windows programs it; changed to work */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); /* Windows sets this to 8 */ cit_model2_Packet1(gspca_dev, 0x0016, 0x0003); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Windows sets this to 0x0045 */ clock_div = 10; break; #endif case 352: /* 352x288 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0003); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */ clock_div = 16; break; } /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) cit_model2_Packet1(gspca_dev, 0x001a, 0x005a); /* * We have our own frame rate setting varying from 0 (slowest) to 6 * (fastest). The camera model 2 allows frame rate in range [0..0x1F] # where 0 is also the slowest setting. However for all practical # reasons high settings make no sense because USB is not fast enough # to support high FPS. Be aware that the picture datastream will be # severely disrupted if you ask for frame rate faster than allowed # for the video size - see below: * * Allowable ranges (obtained experimentally on OHCI, K6-3, 450 MHz): * ----------------------------------------------------------------- * 176x144: [6..31] * 320x240: [8..31] * 352x240: [10..31] * 352x288: [16..31] I have to raise lower threshold for stability... * * As usual, slower FPS provides better sensitivity. */ cit_model2_Packet1(gspca_dev, 0x001c, clock_div); /* * This setting does not visibly affect pictures; left it here * because it was present in Windows USB data stream. This function * does not allow arbitrary values and apparently is a bit mask, to * be activated only at appropriate time. Don't change it randomly! */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2); break; case 320: /* 320x240 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0044); break; #if 0 case VIDEOSIZE_352x240: cit_model2_Packet1(gspca_dev, 0x0026, 0x0046); break; #endif case 352: /* 352x288 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0048); break; } cit_model2_Packet1(gspca_dev, 0x0028, v4l2_ctrl_g_ctrl(sd->lighting)); /* model2 cannot change the backlight compensation while streaming */ v4l2_ctrl_grab(sd->lighting, true); /* color balance rg2 */ cit_model2_Packet1(gspca_dev, 0x001e, 0x002f); /* saturation */ cit_model2_Packet1(gspca_dev, 0x0020, 0x0034); /* color balance yb */ cit_model2_Packet1(gspca_dev, 0x0022, 0x00a0); /* Hardware control command */ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004); return 0; } static int cit_start_model3(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int i, clock_div = 0; /* HDG not in ibmcam driver, added to see if it helps with auto-detecting between model3 and ibm netcamera pro */ cit_read_reg(gspca_dev, 0x128, 1); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0115); cit_write_reg(gspca_dev, 0x0003, 0x0115); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x000b, 0x0115); /* TESTME HDG not in ibmcam driver, added to see if it helps with auto-detecting between model3 and ibm netcamera pro */ if (0) { cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0079, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xcd41, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_read_reg(gspca_dev, 0x0126, 1); } cit_model3_Packet1(gspca_dev, 0x000a, 0x0040); cit_model3_Packet1(gspca_dev, 0x000b, 0x00f6); cit_model3_Packet1(gspca_dev, 0x000c, 0x0002); cit_model3_Packet1(gspca_dev, 0x000d, 0x0020); cit_model3_Packet1(gspca_dev, 0x000e, 0x0033); cit_model3_Packet1(gspca_dev, 0x000f, 0x0007); cit_model3_Packet1(gspca_dev, 0x0010, 0x0000); cit_model3_Packet1(gspca_dev, 0x0011, 0x0070); cit_model3_Packet1(gspca_dev, 0x0012, 0x0030); cit_model3_Packet1(gspca_dev, 0x0013, 0x0000); cit_model3_Packet1(gspca_dev, 0x0014, 0x0001); cit_model3_Packet1(gspca_dev, 0x0015, 0x0001); cit_model3_Packet1(gspca_dev, 0x0016, 0x0001); cit_model3_Packet1(gspca_dev, 0x0017, 0x0001); cit_model3_Packet1(gspca_dev, 0x0018, 0x0000); cit_model3_Packet1(gspca_dev, 0x001e, 0x00c3); cit_model3_Packet1(gspca_dev, 0x0020, 0x0000); cit_model3_Packet1(gspca_dev, 0x0028, 0x0010); cit_model3_Packet1(gspca_dev, 0x0029, 0x0054); cit_model3_Packet1(gspca_dev, 0x002a, 0x0013); cit_model3_Packet1(gspca_dev, 0x002b, 0x0007); cit_model3_Packet1(gspca_dev, 0x002d, 0x0028); cit_model3_Packet1(gspca_dev, 0x002e, 0x0000); cit_model3_Packet1(gspca_dev, 0x0031, 0x0000); cit_model3_Packet1(gspca_dev, 0x0032, 0x0000); cit_model3_Packet1(gspca_dev, 0x0033, 0x0000); cit_model3_Packet1(gspca_dev, 0x0034, 0x0000); cit_model3_Packet1(gspca_dev, 0x0035, 0x0038); cit_model3_Packet1(gspca_dev, 0x003a, 0x0001); cit_model3_Packet1(gspca_dev, 0x003c, 0x001e); cit_model3_Packet1(gspca_dev, 0x003f, 0x000a); cit_model3_Packet1(gspca_dev, 0x0041, 0x0000); cit_model3_Packet1(gspca_dev, 0x0046, 0x003f); cit_model3_Packet1(gspca_dev, 0x0047, 0x0000); cit_model3_Packet1(gspca_dev, 0x0050, 0x0005); cit_model3_Packet1(gspca_dev, 0x0052, 0x001a); cit_model3_Packet1(gspca_dev, 0x0053, 0x0003); cit_model3_Packet1(gspca_dev, 0x005a, 0x006b); cit_model3_Packet1(gspca_dev, 0x005d, 0x001e); cit_model3_Packet1(gspca_dev, 0x005e, 0x0030); cit_model3_Packet1(gspca_dev, 0x005f, 0x0041); cit_model3_Packet1(gspca_dev, 0x0064, 0x0008); cit_model3_Packet1(gspca_dev, 0x0065, 0x0015); cit_model3_Packet1(gspca_dev, 0x0068, 0x000f); cit_model3_Packet1(gspca_dev, 0x0079, 0x0000); cit_model3_Packet1(gspca_dev, 0x007a, 0x0000); cit_model3_Packet1(gspca_dev, 0x007c, 0x003f); cit_model3_Packet1(gspca_dev, 0x0082, 0x000f); cit_model3_Packet1(gspca_dev, 0x0085, 0x0000); cit_model3_Packet1(gspca_dev, 0x0099, 0x0000); cit_model3_Packet1(gspca_dev, 0x009b, 0x0023); cit_model3_Packet1(gspca_dev, 0x009c, 0x0022); cit_model3_Packet1(gspca_dev, 0x009d, 0x0096); cit_model3_Packet1(gspca_dev, 0x009e, 0x0096); cit_model3_Packet1(gspca_dev, 0x009f, 0x000a); switch (gspca_dev->pixfmt.width) { case 160: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x00a9, 0x0119); cit_write_reg(gspca_dev, 0x0016, 0x011b); cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0018, 0x0102); cit_write_reg(gspca_dev, 0x0004, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x0028, 0x011c); cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_write_reg(gspca_dev, 0x0000, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); clock_div = 3; break; case 320: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0028, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same */ cit_write_reg(gspca_dev, 0x0000, 0x011e); cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ /* 4 commands from 160x120 skipped */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, 0x00d9, 0x0119); cit_write_reg(gspca_dev, 0x0006, 0x011b); cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0010, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x003f, 0x011c); cit_write_reg(gspca_dev, 0x001c, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); clock_div = 5; break; case 640: cit_write_reg(gspca_dev, 0x00f0, 0x0105); cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0038, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x00d9, 0x0119); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0006, 0x011b); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0004, 0x011d); /* NC */ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0016, 0x0104); /* NC */ cit_write_reg(gspca_dev, 0x0004, 0x011a); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x003f, 0x011c); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_write_reg(gspca_dev, 0x001c, 0x0118); /* Same on 320x240, 640x480 */ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, 0x0040, 0x0101); cit_write_reg(gspca_dev, 0x0040, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0132); /* Same on 320x240, 640x480 */ clock_div = 7; break; } cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); /* Hue */ cit_model3_Packet1(gspca_dev, 0x0036, 0x0011); /* Brightness */ cit_model3_Packet1(gspca_dev, 0x0060, 0x0002); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0061, 0x0004); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0063, 0x0014); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */ cit_model3_Packet1(gspca_dev, 0x0067, 0x0001); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x005b, 0x000c); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b); cit_model3_Packet1(gspca_dev, 0x002c, 0x0003); /* Was 1, broke 640x480 */ cit_model3_Packet1(gspca_dev, 0x002f, 0x002a); cit_model3_Packet1(gspca_dev, 0x0030, 0x0029); cit_model3_Packet1(gspca_dev, 0x0037, 0x0002); cit_model3_Packet1(gspca_dev, 0x0038, 0x0059); cit_model3_Packet1(gspca_dev, 0x003d, 0x002e); cit_model3_Packet1(gspca_dev, 0x003e, 0x0028); cit_model3_Packet1(gspca_dev, 0x0078, 0x0005); cit_model3_Packet1(gspca_dev, 0x007b, 0x0011); cit_model3_Packet1(gspca_dev, 0x007d, 0x004b); cit_model3_Packet1(gspca_dev, 0x007f, 0x0022); cit_model3_Packet1(gspca_dev, 0x0080, 0x000c); cit_model3_Packet1(gspca_dev, 0x0081, 0x000b); cit_model3_Packet1(gspca_dev, 0x0083, 0x00fd); cit_model3_Packet1(gspca_dev, 0x0086, 0x000b); cit_model3_Packet1(gspca_dev, 0x0087, 0x000b); cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b); /* FIXME we should probably use cit_get_clock_div() here (in combination with isoc negotiation using the programmable isoc size) like with the IBM netcam pro). */ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */ switch (gspca_dev->pixfmt.width) { case 160: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x000a); cit_model3_Packet1(gspca_dev, 0x0051, 0x000a); break; case 320: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008); cit_model3_Packet1(gspca_dev, 0x0051, 0x000b); break; case 640: cit_model3_Packet1(gspca_dev, 0x001f, 0x0002); /* !Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x003e); /* !Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008); cit_model3_Packet1(gspca_dev, 0x0051, 0x000a); break; } /* if (sd->input_index) { */ if (rca_input) { for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) { if (rca_initdata[i][0]) cit_read_reg(gspca_dev, rca_initdata[i][2], 0); else cit_write_reg(gspca_dev, rca_initdata[i][1], rca_initdata[i][2]); } } return 0; } static int cit_start_model4(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_write_reg(gspca_dev, 0x00c0, 0x0111); cit_write_reg(gspca_dev, 0x00bc, 0x012c); cit_write_reg(gspca_dev, 0x0080, 0x012b); cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x009b, 0x010f); cit_write_reg(gspca_dev, 0x00bb, 0x010f); cit_model4_Packet1(gspca_dev, 0x0038, 0x0000); cit_model4_Packet1(gspca_dev, 0x000a, 0x005c); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00fb, 0x012e); cit_write_reg(gspca_dev, 0x0000, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x000c, 0x0127); cit_write_reg(gspca_dev, 0x0009, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0012, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0008, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x002a, 0x012d); cit_write_reg(gspca_dev, 0x0000, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_model4_Packet1(gspca_dev, 0x0034, 0x0000); switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000a, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00eb, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0031, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x012d); cit_write_reg(gspca_dev, 0x0078, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0038, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x00b9, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000b, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00c7, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0025, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0048, 0x0127); cit_write_reg(gspca_dev, 0x0035, 0x012e); cit_write_reg(gspca_dev, 0x00d0, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0048, 0x012d); cit_write_reg(gspca_dev, 0x0090, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0038, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x00b9, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x002c, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0024, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0007, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005e, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0049, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00c7, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0028, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x002a, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x012d); cit_write_reg(gspca_dev, 0x006d, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); /* TESTME HDG: this does not seem right (it is 2 for all other resolutions) */ sd->sof_len = 10; break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000a, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00eb, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0031, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x012d); cit_write_reg(gspca_dev, 0x0078, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00c0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x002c, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0024, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0006, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0002, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005e, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0049, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00cf, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0025, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x012d); cit_write_reg(gspca_dev, 0x0048, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; } cit_model4_Packet1(gspca_dev, 0x0038, 0x0004); return 0; } static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int i, clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_write_reg(gspca_dev, 0x0060, 0x0116); /* cit_write_reg(gspca_dev, 0x0002, 0x0112); see sd_stop0 */ cit_write_reg(gspca_dev, 0x0000, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); /* cit_write_reg(gspca_dev, 0x000b, 0x0115); see sd_stop0 */ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000); cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x003a, 0x0102); /* Hstart */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0024, 0x010b); cit_write_reg(gspca_dev, 0x0089, 0x0119); cit_write_reg(gspca_dev, 0x000a, 0x011b); cit_write_reg(gspca_dev, 0x0003, 0x011e); cit_write_reg(gspca_dev, 0x0007, 0x0104); cit_write_reg(gspca_dev, 0x0009, 0x011a); cit_write_reg(gspca_dev, 0x008b, 0x011c); cit_write_reg(gspca_dev, 0x0008, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0028, 0x010b); cit_write_reg(gspca_dev, 0x00d9, 0x0119); cit_write_reg(gspca_dev, 0x0006, 0x011b); cit_write_reg(gspca_dev, 0x0000, 0x011e); cit_write_reg(gspca_dev, 0x000e, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x003f, 0x011c); cit_write_reg(gspca_dev, 0x000c, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); break; } cit_model3_Packet1(gspca_dev, 0x0019, 0x0031); cit_model3_Packet1(gspca_dev, 0x001a, 0x0003); cit_model3_Packet1(gspca_dev, 0x001b, 0x0038); cit_model3_Packet1(gspca_dev, 0x001c, 0x0000); cit_model3_Packet1(gspca_dev, 0x0024, 0x0001); cit_model3_Packet1(gspca_dev, 0x0027, 0x0001); cit_model3_Packet1(gspca_dev, 0x002a, 0x0004); cit_model3_Packet1(gspca_dev, 0x0035, 0x000b); cit_model3_Packet1(gspca_dev, 0x003f, 0x0001); cit_model3_Packet1(gspca_dev, 0x0044, 0x0000); cit_model3_Packet1(gspca_dev, 0x0054, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c4, 0x0000); cit_model3_Packet1(gspca_dev, 0x00e7, 0x0001); cit_model3_Packet1(gspca_dev, 0x00e9, 0x0001); cit_model3_Packet1(gspca_dev, 0x00ee, 0x0000); cit_model3_Packet1(gspca_dev, 0x00f3, 0x00c0); cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, clock_div, 0x0111); /* if (sd->input_index) { */ if (rca_input) { for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) { if (rca_initdata[i][0]) cit_read_reg(gspca_dev, rca_initdata[i][2], 0); else cit_write_reg(gspca_dev, rca_initdata[i][1], rca_initdata[i][2]); } } return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int packet_size; packet_size = cit_get_packet_size(gspca_dev); if (packet_size < 0) return packet_size; switch (sd->model) { case CIT_MODEL0: cit_start_model0(gspca_dev); break; case CIT_MODEL1: cit_start_model1(gspca_dev); break; case CIT_MODEL2: cit_start_model2(gspca_dev); break; case CIT_MODEL3: cit_start_model3(gspca_dev); break; case CIT_MODEL4: cit_start_model4(gspca_dev); break; case CIT_IBM_NETCAM_PRO: cit_start_ibm_netcam_pro(gspca_dev); break; } /* Program max isoc packet size */ cit_write_reg(gspca_dev, packet_size >> 8, 0x0106); cit_write_reg(gspca_dev, packet_size & 0xff, 0x0107); cit_restart_stream(gspca_dev); return 0; } static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct usb_interface_cache *intfc; struct usb_host_interface *alt; int max_packet_size; switch (gspca_dev->pixfmt.width) { case 160: max_packet_size = 450; break; case 176: max_packet_size = 600; break; default: max_packet_size = 1022; break; } intfc = gspca_dev->dev->actconfig->intf_cache[0]; if (intfc->num_altsetting < 2) return -ENODEV; alt = &intfc->altsetting[1]; if (alt->desc.bNumEndpoints < 1) return -ENODEV; /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size); return 0; } static int sd_isoc_nego(struct gspca_dev *gspca_dev) { int ret, packet_size, min_packet_size; struct usb_host_interface *alt; switch (gspca_dev->pixfmt.width) { case 160: min_packet_size = 200; break; case 176: min_packet_size = 266; break; default: min_packet_size = 400; break; } /* * Existence of altsetting and endpoint was verified in sd_isoc_init() */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); if (packet_size <= min_packet_size) return -EIO; packet_size -= 100; if (packet_size < min_packet_size) packet_size = min_packet_size; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); if (ret < 0) pr_err("set alt 1 err %d\n", ret); return ret; } static void sd_stopN(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x010c); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!gspca_dev->present) return; switch (sd->model) { case CIT_MODEL0: /* HDG windows does this, but it causes the cams autogain to restart from a gain of 0, which does not look good when changing resolutions. */ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */ cit_write_reg(gspca_dev, 0x00c0, 0x0100); /* LED Off */ break; case CIT_MODEL1: cit_send_FF_04_02(gspca_dev); cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */ break; case CIT_MODEL2: v4l2_ctrl_grab(sd->lighting, false); fallthrough; case CIT_MODEL4: cit_model2_Packet1(gspca_dev, 0x0030, 0x0004); cit_write_reg(gspca_dev, 0x0080, 0x0100); /* LED Off */ cit_write_reg(gspca_dev, 0x0020, 0x0111); cit_write_reg(gspca_dev, 0x00a0, 0x0111); cit_model2_Packet1(gspca_dev, 0x0030, 0x0002); cit_write_reg(gspca_dev, 0x0020, 0x0111); cit_write_reg(gspca_dev, 0x0000, 0x0112); break; case CIT_MODEL3: cit_write_reg(gspca_dev, 0x0006, 0x012c); cit_model3_Packet1(gspca_dev, 0x0046, 0x0000); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0064, 0x0116); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x0003, 0x0115); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0000, 0x0112); cit_write_reg(gspca_dev, 0x0080, 0x0100); break; case CIT_IBM_NETCAM_PRO: cit_model3_Packet1(gspca_dev, 0x0049, 0x00ff); cit_write_reg(gspca_dev, 0x0006, 0x012c); cit_write_reg(gspca_dev, 0x0000, 0x0116); /* HDG windows does this, but I cannot get the camera to restart with this without redoing the entire init sequence which makes switching modes really slow */ /* cit_write_reg(gspca_dev, 0x0006, 0x0115); */ cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0111); /* HDG windows does this, but I get a green picture when restarting the stream after this */ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */ cit_write_reg(gspca_dev, 0x00c0, 0x0100); break; } #if IS_ENABLED(CONFIG_INPUT) /* If the last button state is pressed, release it now! */ if (sd->button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); sd->button_state = 0; } #endif } static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; u8 byte3 = 0, byte4 = 0; int i; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ byte3 = 0x02; byte4 = 0x0a; break; case 176: /* 176x144 */ byte3 = 0x02; byte4 = 0x0e; break; case 320: /* 320x240 */ byte3 = 0x02; byte4 = 0x08; break; case 352: /* 352x288 */ byte3 = 0x02; byte4 = 0x00; break; case 640: byte3 = 0x03; byte4 = 0x08; break; } /* These have a different byte3 */ if (sd->model <= CIT_MODEL1) byte3 = 0x00; for (i = 0; i < len; i++) { /* For this model the SOF always starts at offset 0 so no need to search the entire frame */ if (sd->model == CIT_MODEL0 && sd->sof_read != i) break; switch (sd->sof_read) { case 0: if (data[i] == 0x00) sd->sof_read++; break; case 1: if (data[i] == 0xff) sd->sof_read++; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; case 2: if (data[i] == byte3) sd->sof_read++; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; case 3: if (data[i] == byte4) { sd->sof_read = 0; return data + i + (sd->sof_len - 3); } if (byte3 == 0x00 && data[i] == 0xff) sd->sof_read = 2; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; } } break; case CIT_MODEL2: case CIT_MODEL4: /* TESTME we need to find a longer sof signature to avoid false positives */ for (i = 0; i < len; i++) { switch (sd->sof_read) { case 0: if (data[i] == 0x00) sd->sof_read++; break; case 1: sd->sof_read = 0; if (data[i] == 0xff) { if (i >= 4) gspca_dbg(gspca_dev, D_FRAM, "header found at offset: %d: %02x %02x 00 %3ph\n\n", i - 1, data[i - 4], data[i - 3], &data[i]); else gspca_dbg(gspca_dev, D_FRAM, "header found at offset: %d: 00 %3ph\n\n", i - 1, &data[i]); return data + i + (sd->sof_len - 1); } break; } } break; } return NULL; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; unsigned char *sof; sof = cit_find_sof(gspca_dev, data, len); if (sof) { int n; /* finish decoding current frame */ n = sof - data; if (n > sd->sof_len) n -= sd->sof_len; else n = 0; gspca_frame_add(gspca_dev, LAST_PACKET, data, n); gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); len -= sof - data; data = sof; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } #if IS_ENABLED(CONFIG_INPUT) static void cit_check_button(struct gspca_dev *gspca_dev) { int new_button_state; struct sd *sd = (struct sd *)gspca_dev; switch (sd->model) { case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: break; default: /* TEST ME unknown if this works on other models too */ return; } /* Read the button state */ cit_read_reg(gspca_dev, 0x0113, 0); new_button_state = !gspca_dev->usb_buf[0]; /* Tell the cam we've seen the button press, notice that this is a nop (iow the cam keeps reporting pressed) until the button is actually released. */ if (new_button_state) cit_write_reg(gspca_dev, 0x01, 0x0113); if (sd->button_state != new_button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, new_button_state); input_sync(gspca_dev->input_dev); sd->button_state = new_button_state; } } #endif static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; if (sd->stop_on_control_change) sd_stopN(gspca_dev); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: cit_set_brightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: cit_set_contrast(gspca_dev, ctrl->val); break; case V4L2_CID_HUE: cit_set_hue(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: cit_set_hflip(gspca_dev, ctrl->val); break; case V4L2_CID_SHARPNESS: cit_set_sharpness(gspca_dev, ctrl->val); break; case V4L2_CID_BACKLIGHT_COMPENSATION: cit_set_lighting(gspca_dev, ctrl->val); break; } if (sd->stop_on_control_change) cit_restart_stream(gspca_dev); return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; bool has_brightness; bool has_contrast; bool has_hue; bool has_sharpness; bool has_lighting; bool has_hflip; has_brightness = has_contrast = has_hue = has_sharpness = has_hflip = has_lighting = false; switch (sd->model) { case CIT_MODEL0: has_contrast = has_hflip = true; break; case CIT_MODEL1: has_brightness = has_contrast = has_sharpness = has_lighting = true; break; case CIT_MODEL2: has_brightness = has_hue = has_lighting = true; break; case CIT_MODEL3: has_brightness = has_contrast = has_sharpness = true; break; case CIT_MODEL4: has_brightness = has_hue = true; break; case CIT_IBM_NETCAM_PRO: has_brightness = has_hue = has_sharpness = has_hflip = has_lighting = true; break; } gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); if (has_brightness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 63, 1, 32); if (has_contrast) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 20, 1, 10); if (has_hue) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 0, 127, 1, 63); if (has_sharpness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 6, 1, 3); if (has_lighting) sd->lighting = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, 0, 2, 1, 1); if (has_hflip) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .dq_callback = cit_check_button, .other_input = 1, #endif }; static const struct sd_desc sd_desc_isoc_nego = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .isoc_init = sd_isoc_init, .isoc_nego = sd_isoc_nego, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .dq_callback = cit_check_button, .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x0301, 0x0301), .driver_info = CIT_MODEL3 }, { USB_DEVICE_VER(0x0545, 0x8002, 0x030a, 0x030a), .driver_info = CIT_MODEL4 }, { USB_DEVICE_VER(0x0545, 0x800c, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, { USB_DEVICE_VER(0x0545, 0x800d, 0x030a, 0x030a), .driver_info = CIT_MODEL4 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct sd_desc *desc = &sd_desc; switch (id->driver_info) { case CIT_MODEL0: case CIT_MODEL1: if (intf->cur_altsetting->desc.bInterfaceNumber != 2) return -ENODEV; break; case CIT_MODEL2: case CIT_MODEL4: if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; break; case CIT_MODEL3: if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* FIXME this likely applies to all model3 cams and probably to other models too. */ if (ibm_netcam_pro) desc = &sd_desc_isoc_nego; break; } return gspca_dev_probe2(intf, id, desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
1 87 263 4 275 117 238 238 239 278 5 5 58 90 33 1 36 346 27 349 350 350 2 349 175 4 420 12 51 3 405 407 1 407 406 405 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_IP6_ROUTE_H #define _NET_IP6_ROUTE_H #include <net/addrconf.h> #include <net/flow.h> #include <net/ip6_fib.h> #include <net/sock.h> #include <net/lwtunnel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/route.h> #include <net/nexthop.h> struct route_info { __u8 type; __u8 length; __u8 prefix_len; #if defined(__BIG_ENDIAN_BITFIELD) __u8 reserved_h:3, route_pref:2, reserved_l:3; #elif defined(__LITTLE_ENDIAN_BITFIELD) __u8 reserved_l:3, route_pref:2, reserved_h:3; #endif __be32 lifetime; __u8 prefix[]; /* 0,8 or 16 */ }; #define RT6_LOOKUP_F_IFACE 0x00000001 #define RT6_LOOKUP_F_REACHABLE 0x00000002 #define RT6_LOOKUP_F_HAS_SADDR 0x00000004 #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 #define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040 #define RT6_LOOKUP_F_DST_NOREF 0x00000080 /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header */ #define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr)) /* * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate * between IPV6_ADDR_PREFERENCES socket option values * IPV6_PREFER_SRC_TMP = 0x1 * IPV6_PREFER_SRC_PUBLIC = 0x2 * IPV6_PREFER_SRC_COA = 0x4 * and above RT6_LOOKUP_F_SRCPREF_xxx flags. */ static inline int rt6_srcprefs2flags(unsigned int srcprefs) { return (srcprefs & IPV6_PREFER_SRC_MASK) << 3; } static inline unsigned int rt6_flags2srcprefs(int flags) { return (flags >> 3) & IPV6_PREFER_SRC_MASK; } static inline bool rt6_need_strict(const struct in6_addr *daddr) { return ipv6_addr_type(daddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } /* fib entries using a nexthop object can not be coalesced into * a multipath route */ static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) { /* the RTF_ADDRCONF flag filters out RA's */ return !(f6i->fib6_flags & RTF_ADDRCONF) && !f6i->nh && f6i->fib6_nh->fib_nh_gw_family; } void ip6_route_input(struct sk_buff *skb); struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, struct flowi6 *fl6, const struct sk_buff *skb, int flags); struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags); static inline struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6) { return ip6_route_output_flags(net, sk, fl6, 0); } /* Only conditionally release dst if flags indicates * !RT6_LOOKUP_F_DST_NOREF or dst is in uncached_list. */ static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags) { if (!(flags & RT6_LOOKUP_F_DST_NOREF) || !list_empty(&rt->dst.rt_uncached)) ip6_rt_put(rt); } struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, const struct sk_buff *skb, int flags); struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int ifindex, struct flowi6 *fl6, const struct sk_buff *skb, int flags); void ip6_route_init_special_entries(void); int ip6_route_init(void); void ip6_route_cleanup(void); int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg); int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); int ip6_ins_rt(struct net *net, struct fib6_info *f6i); int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify); void rt6_flush_exceptions(struct fib6_info *f6i); void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, unsigned long now); static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, const struct in6_addr *daddr, unsigned int prefs, int l3mdev_index, struct in6_addr *saddr) { struct net_device *l3mdev; struct net_device *dev; bool same_vrf; int err = 0; rcu_read_lock(); l3mdev = dev_get_by_index_rcu(net, l3mdev_index); if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) dev = f6i ? fib6_info_nh_dev(f6i) : NULL; same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; if (f6i && f6i->fib6_prefsrc.plen && same_vrf) *saddr = f6i->fib6_prefsrc.addr; else err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); rcu_read_unlock(); return err; } struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr, int oif, const struct sk_buff *skb, int flags); u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, const struct sk_buff *skb, struct flow_keys *hkeys); struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6); void fib6_force_start_gc(struct net *net); struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev, const struct in6_addr *addr, bool anycast, gfp_t gfp_flags, struct netlink_ext_ack *extack); struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags); /* * support functions for ND * */ struct fib6_info *rt6_get_dflt_router(struct net *net, const struct in6_addr *addr, struct net_device *dev); struct fib6_info *rt6_add_dflt_router(struct net *net, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref, u32 defrtr_usr_metric, int lifetime); void rt6_purge_dflt_routers(struct net *net); int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, const struct in6_addr *gwaddr); void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, u32 mark, kuid_t uid); void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid); void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif); void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); struct netlink_callback; struct rt6_rtnl_dump_arg { struct sk_buff *skb; struct netlink_callback *cb; struct net *net; struct fib_dump_filter filter; }; int rt6_dump_route(struct fib6_info *f6i, void *p_arg, unsigned int skip); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); void rt6_sync_up(struct net_device *dev, unsigned char nh_flags); void rt6_disable_ip(struct net_device *dev, unsigned long event); void rt6_sync_down_dev(struct net_device *dev, unsigned long event); void rt6_multipath_rebalance(struct fib6_info *f6i); void rt6_uncached_list_add(struct rt6_info *rt); void rt6_uncached_list_del(struct rt6_info *rt); static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) { const struct dst_entry *dst = skb_dst(skb); if (dst) return dst_rt6_info(dst); return NULL; } /* * Store a destination cache entry in a socket */ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct ipv6_pinfo *np = inet6_sk(sk); np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst)); sk_setup_caps(sk, dst); np->daddr_cache = daddr; #ifdef CONFIG_IPV6_SUBTREES np->saddr_cache = saddr; #endif } void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, const struct flowi6 *fl6); static inline bool ipv6_unicast_destination(const struct sk_buff *skb) { const struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); return rt->rt6i_flags & RTF_LOCAL; } static inline bool ipv6_anycast_destination(const struct dst_entry *dst, const struct in6_addr *daddr) { const struct rt6_info *rt = dst_rt6_info(dst); return rt->rt6i_flags & RTF_ANYCAST || (rt->rt6i_dst.plen < 127 && !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); static inline unsigned int ip6_skb_dst_mtu(const struct sk_buff *skb) { const struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; const struct dst_entry *dst = skb_dst(skb); unsigned int mtu; if (np && READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE) { mtu = READ_ONCE(dst_dev(dst)->mtu); mtu -= lwtunnel_headroom(dst->lwtstate, mtu); } else { mtu = dst_mtu(dst); } return mtu; } static inline bool ip6_sk_accept_pmtu(const struct sock *sk) { u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); return pmtudisc != IPV6_PMTUDISC_INTERFACE && pmtudisc != IPV6_PMTUDISC_OMIT; } static inline bool ip6_sk_ignore_df(const struct sock *sk) { u8 pmtudisc = READ_ONCE(inet6_sk(sk)->pmtudisc); return pmtudisc < IPV6_PMTUDISC_DO || pmtudisc == IPV6_PMTUDISC_OMIT; } static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, const struct in6_addr *daddr) { if (rt->rt6i_flags & RTF_GATEWAY) return &rt->rt6i_gateway; else if (unlikely(rt->rt6i_flags & RTF_CACHE)) return &rt->rt6i_dst.addr; else return daddr; } static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b) { struct fib6_nh *nha, *nhb; if (a->nh || b->nh) return nexthop_cmp(a->nh, b->nh); nha = a->fib6_nh; nhb = b->fib6_nh; return nha->fib_nh_dev == nhb->fib_nh_dev && ipv6_addr_equal(&nha->fib_nh_gw6, &nhb->fib_nh_gw6) && !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws); } static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { struct inet6_dev *idev; unsigned int mtu; if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) { mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) goto out; } mtu = IPV6_MIN_MTU; rcu_read_lock(); idev = __in6_dev_get(dst_dev(dst)); if (idev) mtu = READ_ONCE(idev->cnf.mtu6); rcu_read_unlock(); out: return mtu - lwtunnel_headroom(dst->lwtstate, mtu); } u32 ip6_mtu_from_fib6(const struct fib6_result *res, const struct in6_addr *daddr, const struct in6_addr *saddr); struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, struct net_device *dev, struct sk_buff *skb, const void *daddr); #endif
473 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 /* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _LINUX_OF_H #define _LINUX_OF_H /* * Definitions for talking to the Open Firmware PROM on * Power Macintosh and other computers. * * Copyright (C) 1996-2005 Paul Mackerras. * * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. * Updates for SPARC64 by David S. Miller * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. */ #include <linux/types.h> #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/errno.h> #include <linux/kobject.h> #include <linux/mod_devicetable.h> #include <linux/property.h> #include <linux/list.h> #include <asm/byteorder.h> typedef u32 phandle; typedef u32 ihandle; struct property { char *name; int length; void *value; struct property *next; #if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) unsigned long _flags; #endif #if defined(CONFIG_OF_PROMTREE) unsigned int unique_id; #endif #if defined(CONFIG_OF_KOBJ) struct bin_attribute attr; #endif }; #if defined(CONFIG_SPARC) struct of_irq_controller; #endif struct device_node { const char *name; phandle phandle; const char *full_name; struct fwnode_handle fwnode; struct property *properties; struct property *deadprops; /* removed properties */ struct device_node *parent; struct device_node *child; struct device_node *sibling; #if defined(CONFIG_OF_KOBJ) struct kobject kobj; #endif unsigned long _flags; void *data; #if defined(CONFIG_SPARC) unsigned int unique_id; struct of_irq_controller *irq_trans; #endif }; #define MAX_PHANDLE_ARGS NR_FWNODE_REFERENCE_ARGS struct of_phandle_args { struct device_node *np; int args_count; uint32_t args[MAX_PHANDLE_ARGS]; }; struct of_phandle_iterator { /* Common iterator information */ const char *cells_name; int cell_count; const struct device_node *parent; /* List size information */ const __be32 *list_end; const __be32 *phandle_end; /* Current position state */ const __be32 *cur; uint32_t cur_count; phandle phandle; struct device_node *node; }; struct of_reconfig_data { struct device_node *dn; struct property *prop; struct property *old_prop; }; extern const struct kobj_type of_node_ktype; extern const struct fwnode_operations of_fwnode_ops; /** * of_node_init - initialize a devicetree node * @node: Pointer to device node that has been created by kzalloc() * * On return the device_node refcount is set to one. Use of_node_put() * on @node when done to free the memory allocated for it. If the node * is NOT a dynamic node the memory will not be freed. The decision of * whether to free the memory will be done by node->release(), which is * of_node_release(). */ static inline void of_node_init(struct device_node *node) { #if defined(CONFIG_OF_KOBJ) kobject_init(&node->kobj, &of_node_ktype); #endif fwnode_init(&node->fwnode, &of_fwnode_ops); } #if defined(CONFIG_OF_KOBJ) #define of_node_kobj(n) (&(n)->kobj) #else #define of_node_kobj(n) NULL #endif #ifdef CONFIG_OF_DYNAMIC extern struct device_node *of_node_get(struct device_node *node); extern void of_node_put(struct device_node *node); #else /* CONFIG_OF_DYNAMIC */ /* Dummy ref counting routines - to be implemented later */ static inline struct device_node *of_node_get(struct device_node *node) { return node; } static inline void of_node_put(struct device_node *node) { } #endif /* !CONFIG_OF_DYNAMIC */ DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T)) /* Pointer for first entry in chain of all nodes. */ extern struct device_node *of_root; extern struct device_node *of_chosen; extern struct device_node *of_aliases; extern struct device_node *of_stdout; /* * struct device_node flag descriptions * (need to be visible even when !CONFIG_OF) */ #define OF_DYNAMIC 1 /* (and properties) allocated via kmalloc */ #define OF_DETACHED 2 /* detached from the device tree */ #define OF_POPULATED 3 /* device already created */ #define OF_POPULATED_BUS 4 /* platform bus created for children */ #define OF_OVERLAY 5 /* allocated for an overlay */ #define OF_OVERLAY_FREE_CSET 6 /* in overlay cset being freed */ #define OF_BAD_ADDR ((u64)-1) #ifdef CONFIG_OF void of_core_init(void); static inline bool is_of_node(const struct fwnode_handle *fwnode) { return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &of_fwnode_ops; } #define to_of_node(__fwnode) \ ({ \ typeof(__fwnode) __to_of_node_fwnode = (__fwnode); \ \ is_of_node(__to_of_node_fwnode) ? \ container_of(__to_of_node_fwnode, \ struct device_node, fwnode) : \ NULL; \ }) #define of_fwnode_handle(node) \ ({ \ typeof(node) __of_fwnode_handle_node = (node); \ \ __of_fwnode_handle_node ? \ &__of_fwnode_handle_node->fwnode : NULL; \ }) static inline bool of_node_is_root(const struct device_node *node) { return node && (node->parent == NULL); } static inline int of_node_check_flag(const struct device_node *n, unsigned long flag) { return test_bit(flag, &n->_flags); } static inline int of_node_test_and_set_flag(struct device_node *n, unsigned long flag) { return test_and_set_bit(flag, &n->_flags); } static inline void of_node_set_flag(struct device_node *n, unsigned long flag) { set_bit(flag, &n->_flags); } static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) { clear_bit(flag, &n->_flags); } #if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) static inline int of_property_check_flag(const struct property *p, unsigned long flag) { return test_bit(flag, &p->_flags); } static inline void of_property_set_flag(struct property *p, unsigned long flag) { set_bit(flag, &p->_flags); } static inline void of_property_clear_flag(struct property *p, unsigned long flag) { clear_bit(flag, &p->_flags); } #endif extern struct device_node *__of_find_all_nodes(struct device_node *prev); extern struct device_node *of_find_all_nodes(struct device_node *prev); /* * OF address retrieval & translation */ /* Helper to read a big number; size is in cells (not bytes) */ static inline u64 of_read_number(const __be32 *cell, int size) { u64 r = 0; for (; size--; cell++) r = (r << 32) | be32_to_cpu(*cell); return r; } /* Like of_read_number, but we want an unsigned long result */ static inline unsigned long of_read_ulong(const __be32 *cell, int size) { /* toss away upper bits if unsigned long is smaller than u64 */ return of_read_number(cell, size); } #if defined(CONFIG_SPARC) #include <asm/prom.h> #endif #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) extern bool of_node_name_eq(const struct device_node *np, const char *name); extern bool of_node_name_prefix(const struct device_node *np, const char *prefix); static inline const char *of_node_full_name(const struct device_node *np) { return np ? np->full_name : "<no-node>"; } #define for_each_of_allnodes_from(from, dn) \ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) #define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); extern struct device_node *of_find_node_by_type(struct device_node *from, const char *type); extern struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compat); extern struct device_node *of_find_matching_node_and_match( struct device_node *from, const struct of_device_id *matches, const struct of_device_id **match); extern struct device_node *of_find_node_opts_by_path(const char *path, const char **opts); static inline struct device_node *of_find_node_by_path(const char *path) { return of_find_node_opts_by_path(path, NULL); } extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_next_child_with_prefix(const struct device_node *node, struct device_node *prev, const char *prefix); extern struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_next_reserved_child( const struct device_node *node, struct device_node *prev); extern struct device_node *of_get_compatible_child(const struct device_node *parent, const char *compatible); extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); extern struct device_node *of_get_available_child_by_name(const struct device_node *node, const char *name); /* cache lookup */ extern struct device_node *of_find_next_cache_node(const struct device_node *); extern int of_find_last_cache_level(unsigned int cpu); extern struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name); extern struct property *of_find_property(const struct device_node *np, const char *name, int *lenp); extern bool of_property_read_bool(const struct device_node *np, const char *propname); extern int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size); extern int of_property_read_u16_index(const struct device_node *np, const char *propname, u32 index, u16 *out_value); extern int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value); extern int of_property_read_u64_index(const struct device_node *np, const char *propname, u32 index, u64 *out_value); extern int of_property_read_variable_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_variable_u16_array(const struct device_node *np, const char *propname, u16 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_variable_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value); extern int of_property_read_variable_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz_min, size_t sz_max); extern int of_property_read_string(const struct device_node *np, const char *propname, const char **out_string); extern int of_property_match_string(const struct device_node *np, const char *propname, const char *string); extern int of_property_read_string_helper(const struct device_node *np, const char *propname, const char **out_strs, size_t sz, int index); extern int of_device_is_compatible(const struct device_node *device, const char *); extern int of_device_compatible_match(const struct device_node *device, const char *const *compat); extern bool of_device_is_available(const struct device_node *device); extern bool of_device_is_big_endian(const struct device_node *device); extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); extern struct device_node *of_cpu_device_node_get(int cpu); extern int of_cpu_node_to_id(struct device_node *np); extern struct device_node *of_get_next_cpu_node(struct device_node *prev); extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node, int index); extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread); extern int of_n_addr_cells(struct device_node *np); extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); extern const void *of_device_get_match_data(const struct device *dev); extern int of_alias_from_compatible(const struct device_node *node, char *alias, int len); extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args); extern int __of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int cell_count, int index, struct of_phandle_args *out_args); extern int of_parse_phandle_with_args_map(const struct device_node *np, const char *list_name, const char *stem_name, int index, struct of_phandle_args *out_args); extern int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name); /* module functions */ extern ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len); extern int of_request_module(const struct device_node *np); /* phandle iterator functions */ extern int of_phandle_iterator_init(struct of_phandle_iterator *it, const struct device_node *np, const char *list_name, const char *cells_name, int cell_count); extern int of_phandle_iterator_next(struct of_phandle_iterator *it); extern int of_phandle_iterator_args(struct of_phandle_iterator *it, uint32_t *args, int size); extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); bool of_machine_compatible_match(const char *const *compats); /** * of_machine_is_compatible - Test root of device tree for a given compatible value * @compat: compatible string to look for in root node's compatible property. * * Return: true if the root node has the given value in its compatible property. */ static inline bool of_machine_is_compatible(const char *compat) { const char *compats[] = { compat, NULL }; return of_machine_compatible_match(compats); } extern int of_add_property(struct device_node *np, struct property *prop); extern int of_remove_property(struct device_node *np, struct property *prop); extern int of_update_property(struct device_node *np, struct property *newprop); /* For updating the device tree at runtime */ #define OF_RECONFIG_ATTACH_NODE 0x0001 #define OF_RECONFIG_DETACH_NODE 0x0002 #define OF_RECONFIG_ADD_PROPERTY 0x0003 #define OF_RECONFIG_REMOVE_PROPERTY 0x0004 #define OF_RECONFIG_UPDATE_PROPERTY 0x0005 extern int of_attach_node(struct device_node *); extern int of_detach_node(struct device_node *); #define of_match_ptr(_ptr) (_ptr) /* * u32 u; * * of_property_for_each_u32(np, "propname", u) * printk("U32 value: %x\n", u); */ const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu); /* * struct property *prop; * const char *s; * * of_property_for_each_string(np, "propname", prop, s) * printk("String value: %s\n", s); */ const char *of_prop_next_string(const struct property *prop, const char *cur); bool of_console_check(const struct device_node *dn, char *name, int index); int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); phys_addr_t of_dma_get_max_cpu_address(struct device_node *np); struct kimage; void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, unsigned long initrd_load_addr, unsigned long initrd_len, const char *cmdline, size_t extra_fdt_size); #else /* CONFIG_OF */ static inline void of_core_init(void) { } static inline bool is_of_node(const struct fwnode_handle *fwnode) { return false; } static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool of_node_name_eq(const struct device_node *np, const char *name) { return false; } static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix) { return false; } static inline const char* of_node_full_name(const struct device_node *np) { return "<no-node>"; } static inline struct device_node *of_find_node_by_name(struct device_node *from, const char *name) { return NULL; } static inline struct device_node *of_find_node_by_type(struct device_node *from, const char *type) { return NULL; } static inline struct device_node *of_find_matching_node_and_match( struct device_node *from, const struct of_device_id *matches, const struct of_device_id **match) { return NULL; } static inline struct device_node *of_find_node_by_path(const char *path) { return NULL; } static inline struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) { return NULL; } static inline struct device_node *of_find_node_by_phandle(phandle handle) { return NULL; } static inline struct device_node *of_get_parent(const struct device_node *node) { return NULL; } static inline struct device_node *of_get_next_parent(struct device_node *node) { return NULL; } static inline struct device_node *of_get_next_child( const struct device_node *node, struct device_node *prev) { return NULL; } static inline struct device_node *of_get_next_available_child( const struct device_node *node, struct device_node *prev) { return NULL; } static inline struct device_node *of_get_next_reserved_child( const struct device_node *node, struct device_node *prev) { return NULL; } static inline struct device_node *of_find_node_with_property( struct device_node *from, const char *prop_name) { return NULL; } #define of_fwnode_handle(node) NULL static inline struct device_node *of_get_compatible_child(const struct device_node *parent, const char *compatible) { return NULL; } static inline struct device_node *of_get_child_by_name( const struct device_node *node, const char *name) { return NULL; } static inline struct device_node *of_get_available_child_by_name( const struct device_node *node, const char *name) { return NULL; } static inline int of_device_is_compatible(const struct device_node *device, const char *name) { return 0; } static inline int of_device_compatible_match(const struct device_node *device, const char *const *compat) { return 0; } static inline bool of_device_is_available(const struct device_node *device) { return false; } static inline bool of_device_is_big_endian(const struct device_node *device) { return false; } static inline struct property *of_find_property(const struct device_node *np, const char *name, int *lenp) { return NULL; } static inline struct device_node *of_find_compatible_node( struct device_node *from, const char *type, const char *compat) { return NULL; } static inline bool of_property_read_bool(const struct device_node *np, const char *propname) { return false; } static inline int of_property_count_elems_of_size(const struct device_node *np, const char *propname, int elem_size) { return -ENOSYS; } static inline int of_property_read_u16_index(const struct device_node *np, const char *propname, u32 index, u16 *out_value) { return -ENOSYS; } static inline int of_property_read_u32_index(const struct device_node *np, const char *propname, u32 index, u32 *out_value) { return -ENOSYS; } static inline int of_property_read_u64_index(const struct device_node *np, const char *propname, u32 index, u64 *out_value) { return -ENOSYS; } static inline const void *of_get_property(const struct device_node *node, const char *name, int *lenp) { return NULL; } static inline struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) { return NULL; } static inline struct device_node *of_cpu_device_node_get(int cpu) { return NULL; } static inline int of_cpu_node_to_id(struct device_node *np) { return -ENODEV; } static inline struct device_node *of_get_next_cpu_node(struct device_node *prev) { return NULL; } static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, int index) { return NULL; } static inline int of_n_addr_cells(struct device_node *np) { return 0; } static inline int of_n_size_cells(struct device_node *np) { return 0; } static inline int of_property_read_variable_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_variable_u16_array(const struct device_node *np, const char *propname, u16 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_variable_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_u64(const struct device_node *np, const char *propname, u64 *out_value) { return -ENOSYS; } static inline int of_property_read_variable_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz_min, size_t sz_max) { return -ENOSYS; } static inline int of_property_read_string(const struct device_node *np, const char *propname, const char **out_string) { return -ENOSYS; } static inline int of_property_match_string(const struct device_node *np, const char *propname, const char *string) { return -ENOSYS; } static inline int of_property_read_string_helper(const struct device_node *np, const char *propname, const char **out_strs, size_t sz, int index) { return -ENOSYS; } static inline int __of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int cell_count, int index, struct of_phandle_args *out_args) { return -ENOSYS; } static inline int of_parse_phandle_with_args_map(const struct device_node *np, const char *list_name, const char *stem_name, int index, struct of_phandle_args *out_args) { return -ENOSYS; } static inline int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name) { return -ENOSYS; } static inline ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len) { return -ENODEV; } static inline int of_request_module(const struct device_node *np) { return -ENODEV; } static inline int of_phandle_iterator_init(struct of_phandle_iterator *it, const struct device_node *np, const char *list_name, const char *cells_name, int cell_count) { return -ENOSYS; } static inline int of_phandle_iterator_next(struct of_phandle_iterator *it) { return -ENOSYS; } static inline int of_phandle_iterator_args(struct of_phandle_iterator *it, uint32_t *args, int size) { return 0; } static inline int of_alias_get_id(struct device_node *np, const char *stem) { return -ENOSYS; } static inline int of_alias_get_highest_id(const char *stem) { return -ENOSYS; } static inline int of_machine_is_compatible(const char *compat) { return 0; } static inline int of_add_property(struct device_node *np, struct property *prop) { return 0; } static inline int of_remove_property(struct device_node *np, struct property *prop) { return 0; } static inline bool of_machine_compatible_match(const char *const *compats) { return false; } static inline bool of_console_check(const struct device_node *dn, const char *name, int index) { return false; } static inline const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu) { return NULL; } static inline const char *of_prop_next_string(const struct property *prop, const char *cur) { return NULL; } static inline int of_node_check_flag(struct device_node *n, unsigned long flag) { return 0; } static inline int of_node_test_and_set_flag(struct device_node *n, unsigned long flag) { return 0; } static inline void of_node_set_flag(struct device_node *n, unsigned long flag) { } static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) { } static inline int of_property_check_flag(const struct property *p, unsigned long flag) { return 0; } static inline void of_property_set_flag(struct property *p, unsigned long flag) { } static inline void of_property_clear_flag(struct property *p, unsigned long flag) { } static inline int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { return -EINVAL; } static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np) { return PHYS_ADDR_MAX; } static inline const void *of_device_get_match_data(const struct device *dev) { return NULL; } #define of_match_ptr(_ptr) NULL #define of_match_node(_matches, _node) NULL #endif /* CONFIG_OF */ /* Default string compare functions, Allow arch asm/prom.h to override */ #if !defined(of_compat_cmp) #define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif #define for_each_property_of_node(dn, pp) \ for (pp = dn->properties; pp != NULL; pp = pp->next) #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else static inline int of_node_to_nid(struct device_node *device) { return NUMA_NO_NODE; } #endif #ifdef CONFIG_OF_NUMA extern int of_numa_init(void); #else static inline int of_numa_init(void) { return -ENOSYS; } #endif static inline struct device_node *of_find_matching_node( struct device_node *from, const struct of_device_id *matches) { return of_find_matching_node_and_match(from, matches, NULL); } static inline const char *of_node_get_device_type(const struct device_node *np) { return of_get_property(np, "device_type", NULL); } static inline bool of_node_is_type(const struct device_node *np, const char *type) { const char *match = of_node_get_device_type(np); return np && match && type && !strcmp(match, type); } /** * of_parse_phandle - Resolve a phandle property to a device_node pointer * @np: Pointer to device node holding phandle property * @phandle_name: Name of property holding a phandle value * @index: For properties holding a table of phandles, this is the index into * the table * * Return: The device_node pointer with refcount incremented. Use * of_node_put() on it when done. */ static inline struct device_node *of_parse_phandle(const struct device_node *np, const char *phandle_name, int index) { struct of_phandle_args args; if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0, index, &args)) return NULL; return args.np; } /** * of_parse_phandle_with_args() - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cells_name: property name that specifies phandles' arguments count * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * This function is useful to parse lists of phandles and their arguments. * Returns 0 on success and fills out_args, on error returns appropriate * errno value. * * Caller is responsible to call of_node_put() on the returned out_args->np * pointer. * * Example:: * * phandle1: node1 { * #list-cells = <2>; * }; * * phandle2: node2 { * #list-cells = <1>; * }; * * node3 { * list = <&phandle1 1 2 &phandle2 3>; * }; * * To get a device_node of the ``node2`` node you may call this: * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args); */ static inline int of_parse_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args) { int cell_count = -1; /* If cells_name is NULL we assume a cell count of 0 */ if (!cells_name) cell_count = 0; return __of_parse_phandle_with_args(np, list_name, cells_name, cell_count, index, out_args); } /** * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cell_count: number of argument cells following the phandle * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * This function is useful to parse lists of phandles and their arguments. * Returns 0 on success and fills out_args, on error returns appropriate * errno value. * * Caller is responsible to call of_node_put() on the returned out_args->np * pointer. * * Example:: * * phandle1: node1 { * }; * * phandle2: node2 { * }; * * node3 { * list = <&phandle1 0 2 &phandle2 2 3>; * }; * * To get a device_node of the ``node2`` node you may call this: * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args); */ static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, const char *list_name, int cell_count, int index, struct of_phandle_args *out_args) { return __of_parse_phandle_with_args(np, list_name, NULL, cell_count, index, out_args); } /** * of_parse_phandle_with_optional_args() - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cells_name: property name that specifies phandles' arguments count * @index: index of a phandle to parse out * @out_args: optional pointer to output arguments structure (will be filled) * * Same as of_parse_phandle_with_args() except that if the cells_name property * is not found, cell_count of 0 is assumed. * * This is used to useful, if you have a phandle which didn't have arguments * before and thus doesn't have a '#*-cells' property but is now migrated to * having arguments while retaining backwards compatibility. */ static inline int of_parse_phandle_with_optional_args(const struct device_node *np, const char *list_name, const char *cells_name, int index, struct of_phandle_args *out_args) { return __of_parse_phandle_with_args(np, list_name, cells_name, 0, index, out_args); } /** * of_phandle_args_equal() - Compare two of_phandle_args * @a1: First of_phandle_args to compare * @a2: Second of_phandle_args to compare * * Return: True if a1 and a2 are the same (same node pointer, same phandle * args), false otherwise. */ static inline bool of_phandle_args_equal(const struct of_phandle_args *a1, const struct of_phandle_args *a2) { return a1->np == a2->np && a1->args_count == a2->args_count && !memcmp(a1->args, a2->args, sizeof(a1->args[0]) * a1->args_count); } /** * of_property_count_u8_elems - Count the number of u8 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u8 elements * in it. * * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u8 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u8_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u8)); } /** * of_property_count_u16_elems - Count the number of u16 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u16 elements * in it. * * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u16 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u16_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u16)); } /** * of_property_count_u32_elems - Count the number of u32 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u32 elements * in it. * * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u32 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u32_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u32)); } /** * of_property_count_u64_elems - Count the number of u64 elements in a property * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device node and count the number of u64 elements * in it. * * Return: The number of elements on sucess, -EINVAL if the property does * not exist or its length does not match a multiple of u64 and -ENODATA if the * property does not have a value. */ static inline int of_property_count_u64_elems(const struct device_node *np, const char *propname) { return of_property_count_elems_of_size(np, propname, sizeof(u64)); } /** * of_property_read_string_array() - Read an array of strings from a multiple * strings property. * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_strs: output array of string pointers. * @sz: number of array elements to read. * * Search for a property in a device tree node and retrieve a list of * terminated string values (pointer to data, not a copy) in that property. * * Return: If @out_strs is NULL, the number of strings in the property is returned. */ static inline int of_property_read_string_array(const struct device_node *np, const char *propname, const char **out_strs, size_t sz) { return of_property_read_string_helper(np, propname, out_strs, sz, 0); } /** * of_property_count_strings() - Find and return the number of strings from a * multiple strings property. * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * * Search for a property in a device tree node and retrieve the number of null * terminated string contain in it. * * Return: The number of strings on success, -EINVAL if the property does not * exist, -ENODATA if property does not have a value, and -EILSEQ if the string * is not null-terminated within the length of the property data. */ static inline int of_property_count_strings(const struct device_node *np, const char *propname) { return of_property_read_string_helper(np, propname, NULL, 0, 0); } /** * of_property_read_string_index() - Find and read a string from a multiple * strings property. * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @index: index of the string in the list of strings * @output: pointer to null terminated return string, modified only if * return value is 0. * * Search for a property in a device tree node and retrieve a null * terminated string value (pointer to data, not a copy) in the list of strings * contained in that property. * * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if * property does not have a value, and -EILSEQ if the string is not * null-terminated within the length of the property data. * * The out_string pointer is modified only if a valid string can be decoded. */ static inline int of_property_read_string_index(const struct device_node *np, const char *propname, int index, const char **output) { int rc = of_property_read_string_helper(np, propname, output, 1, index); return rc < 0 ? rc : 0; } /** * of_property_present - Test if a property is present in a node * @np: device node to search for the property. * @propname: name of the property to be searched. * * Test for a property present in a device node. * * Return: true if the property exists false otherwise. */ static inline bool of_property_present(const struct device_node *np, const char *propname) { struct property *prop = of_find_property(np, propname, NULL); return prop ? true : false; } /** * of_property_read_u8_array - Find and read an array of u8 from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 8-bit value(s) from * it. * * dts entry of array should be like: * ``property = /bits/ 8 <0x50 0x60 0x70>;`` * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u8 value can be decoded. */ static inline int of_property_read_u8_array(const struct device_node *np, const char *propname, u8 *out_values, size_t sz) { int ret = of_property_read_variable_u8_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } /** * of_property_read_u16_array - Find and read an array of u16 from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 16-bit value(s) from * it. * * dts entry of array should be like: * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;`` * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u16 value can be decoded. */ static inline int of_property_read_u16_array(const struct device_node *np, const char *propname, u16 *out_values, size_t sz) { int ret = of_property_read_variable_u16_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } /** * of_property_read_u32_array - Find and read an array of 32 bit integers * from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 32-bit value(s) from * it. * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u32 value can be decoded. */ static inline int of_property_read_u32_array(const struct device_node *np, const char *propname, u32 *out_values, size_t sz) { int ret = of_property_read_variable_u32_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } /** * of_property_read_u64_array - Find and read an array of 64 bit integers * from a property. * * @np: device node from which the property value is to be read. * @propname: name of the property to be searched. * @out_values: pointer to return value, modified only if return value is 0. * @sz: number of array elements to read * * Search for a property in a device node and read 64-bit value(s) from * it. * * Return: 0 on success, -EINVAL if the property does not exist, * -ENODATA if property does not have a value, and -EOVERFLOW if the * property data isn't large enough. * * The out_values is modified only if a valid u64 value can be decoded. */ static inline int of_property_read_u64_array(const struct device_node *np, const char *propname, u64 *out_values, size_t sz) { int ret = of_property_read_variable_u64_array(np, propname, out_values, sz, 0); if (ret >= 0) return 0; else return ret; } static inline int of_property_read_u8(const struct device_node *np, const char *propname, u8 *out_value) { return of_property_read_u8_array(np, propname, out_value, 1); } static inline int of_property_read_u16(const struct device_node *np, const char *propname, u16 *out_value) { return of_property_read_u16_array(np, propname, out_value, 1); } static inline int of_property_read_u32(const struct device_node *np, const char *propname, u32 *out_value) { return of_property_read_u32_array(np, propname, out_value, 1); } static inline int of_property_read_s32(const struct device_node *np, const char *propname, s32 *out_value) { return of_property_read_u32(np, propname, (u32*) out_value); } #define of_for_each_phandle(it, err, np, ln, cn, cc) \ for (of_phandle_iterator_init((it), (np), (ln), (cn), (cc)), \ err = of_phandle_iterator_next(it); \ err == 0; \ err = of_phandle_iterator_next(it)) #define of_property_for_each_u32(np, propname, u) \ for (struct {const struct property *prop; const __be32 *item; } _it = \ {of_find_property(np, propname, NULL), \ of_prop_next_u32(_it.prop, NULL, &u)}; \ _it.item; \ _it.item = of_prop_next_u32(_it.prop, _it.item, &u)) #define of_property_for_each_string(np, propname, prop, s) \ for (prop = of_find_property(np, propname, NULL), \ s = of_prop_next_string(prop, NULL); \ s; \ s = of_prop_next_string(prop, s)) #define for_each_node_by_name(dn, name) \ for (dn = of_find_node_by_name(NULL, name); dn; \ dn = of_find_node_by_name(dn, name)) #define for_each_node_by_type(dn, type) \ for (dn = of_find_node_by_type(NULL, type); dn; \ dn = of_find_node_by_type(dn, type)) #define for_each_compatible_node(dn, type, compatible) \ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \ dn = of_find_compatible_node(dn, type, compatible)) #define for_each_matching_node(dn, matches) \ for (dn = of_find_matching_node(NULL, matches); dn; \ dn = of_find_matching_node(dn, matches)) #define for_each_matching_node_and_match(dn, matches, match) \ for (dn = of_find_matching_node_and_match(NULL, matches, match); \ dn; dn = of_find_matching_node_and_match(dn, matches, match)) #define for_each_child_of_node(parent, child) \ for (child = of_get_next_child(parent, NULL); child != NULL; \ child = of_get_next_child(parent, child)) #define for_each_child_of_node_scoped(parent, child) \ for (struct device_node *child __free(device_node) = \ of_get_next_child(parent, NULL); \ child != NULL; \ child = of_get_next_child(parent, child)) #define for_each_child_of_node_with_prefix(parent, child, prefix) \ for (struct device_node *child __free(device_node) = \ of_get_next_child_with_prefix(parent, NULL, prefix); \ child != NULL; \ child = of_get_next_child_with_prefix(parent, child, prefix)) #define for_each_available_child_of_node(parent, child) \ for (child = of_get_next_available_child(parent, NULL); child != NULL; \ child = of_get_next_available_child(parent, child)) #define for_each_reserved_child_of_node(parent, child) \ for (child = of_get_next_reserved_child(parent, NULL); child != NULL; \ child = of_get_next_reserved_child(parent, child)) #define for_each_available_child_of_node_scoped(parent, child) \ for (struct device_node *child __free(device_node) = \ of_get_next_available_child(parent, NULL); \ child != NULL; \ child = of_get_next_available_child(parent, child)) #define for_each_of_cpu_node(cpu) \ for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \ cpu = of_get_next_cpu_node(cpu)) #define for_each_node_with_property(dn, prop_name) \ for (dn = of_find_node_with_property(NULL, prop_name); dn; \ dn = of_find_node_with_property(dn, prop_name)) static inline int of_get_child_count(const struct device_node *np) { struct device_node *child; int num = 0; for_each_child_of_node(np, child) num++; return num; } static inline int of_get_available_child_count(const struct device_node *np) { struct device_node *child; int num = 0; for_each_available_child_of_node(np, child) num++; return num; } #define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __attribute__((unused)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #if defined(CONFIG_OF) && !defined(MODULE) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __used __section("__" #table "_of_table") \ __aligned(__alignof__(struct of_device_id)) \ = { .compatible = compat, \ .data = (fn == (fn_type)NULL) ? fn : fn } #else #define _OF_DECLARE(table, name, compat, fn, fn_type) \ _OF_DECLARE_STUB(table, name, compat, fn, fn_type) #endif typedef int (*of_init_fn_2)(struct device_node *, struct device_node *); typedef int (*of_init_fn_1_ret)(struct device_node *); typedef void (*of_init_fn_1)(struct device_node *); #define OF_DECLARE_1(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1) #define OF_DECLARE_1_RET(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret) #define OF_DECLARE_2(table, name, compat, fn) \ _OF_DECLARE(table, name, compat, fn, of_init_fn_2) /** * struct of_changeset_entry - Holds a changeset entry * * @node: list_head for the log list * @action: notifier action * @np: pointer to the device node affected * @prop: pointer to the property affected * @old_prop: hold a pointer to the original property * * Every modification of the device tree during a changeset * is held in a list of of_changeset_entry structures. * That way we can recover from a partial application, or we can * revert the changeset */ struct of_changeset_entry { struct list_head node; unsigned long action; struct device_node *np; struct property *prop; struct property *old_prop; }; /** * struct of_changeset - changeset tracker structure * * @entries: list_head for the changeset entries * * changesets are a convenient way to apply bulk changes to the * live tree. In case of an error, changes are rolled-back. * changesets live on after initial application, and if not * destroyed after use, they can be reverted in one single call. */ struct of_changeset { struct list_head entries; }; enum of_reconfig_change { OF_RECONFIG_NO_CHANGE = 0, OF_RECONFIG_CHANGE_ADD, OF_RECONFIG_CHANGE_REMOVE, }; struct notifier_block; #ifdef CONFIG_OF_DYNAMIC extern int of_reconfig_notifier_register(struct notifier_block *); extern int of_reconfig_notifier_unregister(struct notifier_block *); extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd); extern int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *arg); extern void of_changeset_init(struct of_changeset *ocs); extern void of_changeset_destroy(struct of_changeset *ocs); extern int of_changeset_apply(struct of_changeset *ocs); extern int of_changeset_revert(struct of_changeset *ocs); extern int of_changeset_action(struct of_changeset *ocs, unsigned long action, struct device_node *np, struct property *prop); static inline int of_changeset_attach_node(struct of_changeset *ocs, struct device_node *np) { return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL); } static inline int of_changeset_detach_node(struct of_changeset *ocs, struct device_node *np) { return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL); } static inline int of_changeset_add_property(struct of_changeset *ocs, struct device_node *np, struct property *prop) { return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop); } static inline int of_changeset_remove_property(struct of_changeset *ocs, struct device_node *np, struct property *prop) { return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop); } static inline int of_changeset_update_property(struct of_changeset *ocs, struct device_node *np, struct property *prop) { return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); } struct device_node *of_changeset_create_node(struct of_changeset *ocs, struct device_node *parent, const char *full_name); int of_changeset_add_prop_string(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const char *str); int of_changeset_add_prop_string_array(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const char * const *str_array, size_t sz); int of_changeset_add_prop_u32_array(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const u32 *array, size_t sz); static inline int of_changeset_add_prop_u32(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const u32 val) { return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1); } int of_changeset_update_prop_string(struct of_changeset *ocs, struct device_node *np, const char *prop_name, const char *str); int of_changeset_add_prop_bool(struct of_changeset *ocs, struct device_node *np, const char *prop_name); #else /* CONFIG_OF_DYNAMIC */ static inline int of_reconfig_notifier_register(struct notifier_block *nb) { return -EINVAL; } static inline int of_reconfig_notifier_unregister(struct notifier_block *nb) { return -EINVAL; } static inline int of_reconfig_notify(unsigned long action, struct of_reconfig_data *arg) { return -EINVAL; } static inline int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *arg) { return -EINVAL; } #endif /* CONFIG_OF_DYNAMIC */ /** * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node * @np: Pointer to the given device_node * * Return: true if present false otherwise */ static inline bool of_device_is_system_power_controller(const struct device_node *np) { return of_property_read_bool(np, "system-power-controller"); } /** * of_have_populated_dt() - Has DT been populated by bootloader * * Return: True if a DTB has been populated by the bootloader and it isn't the * empty builtin one. False otherwise. */ static inline bool of_have_populated_dt(void) { #ifdef CONFIG_OF return of_property_present(of_root, "compatible"); #else return false; #endif } /* * Overlay support */ enum of_overlay_notify_action { OF_OVERLAY_INIT = 0, /* kzalloc() of ovcs sets this value */ OF_OVERLAY_PRE_APPLY, OF_OVERLAY_POST_APPLY, OF_OVERLAY_PRE_REMOVE, OF_OVERLAY_POST_REMOVE, }; static inline const char *of_overlay_action_name(enum of_overlay_notify_action action) { static const char *const of_overlay_action_name[] = { "init", "pre-apply", "post-apply", "pre-remove", "post-remove", }; return of_overlay_action_name[action]; } struct of_overlay_notify_data { struct device_node *overlay; struct device_node *target; }; #ifdef CONFIG_OF_OVERLAY int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, int *ovcs_id, const struct device_node *target_base); int of_overlay_remove(int *ovcs_id); int of_overlay_remove_all(void); int of_overlay_notifier_register(struct notifier_block *nb); int of_overlay_notifier_unregister(struct notifier_block *nb); #else static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, int *ovcs_id, const struct device_node *target_base) { return -ENOTSUPP; } static inline int of_overlay_remove(int *ovcs_id) { return -ENOTSUPP; } static inline int of_overlay_remove_all(void) { return -ENOTSUPP; } static inline int of_overlay_notifier_register(struct notifier_block *nb) { return 0; } static inline int of_overlay_notifier_unregister(struct notifier_block *nb) { return 0; } #endif #endif /* _LINUX_OF_H */
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 // SPDX-License-Identifier: GPL-2.0 /* * quota.c - CephFS quota * * Copyright (C) 2017-2018 SUSE */ #include <linux/statfs.h> #include "super.h" #include "mds_client.h" void ceph_adjust_quota_realms_count(struct inode *inode, bool inc) { struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); if (inc) atomic64_inc(&mdsc->quotarealms_count); else atomic64_dec(&mdsc->quotarealms_count); } static inline bool ceph_has_realms_with_quotas(struct inode *inode) { struct super_block *sb = inode->i_sb; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb); struct inode *root = d_inode(sb->s_root); if (atomic64_read(&mdsc->quotarealms_count) > 0) return true; /* if root is the real CephFS root, we don't have quota realms */ if (root && ceph_ino(root) == CEPH_INO_ROOT) return false; /* MDS stray dirs have no quota realms */ if (ceph_vino_is_reserved(ceph_inode(inode)->i_vino)) return false; /* otherwise, we can't know for sure */ return true; } void ceph_handle_quota(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg) { struct super_block *sb = mdsc->fsc->sb; struct ceph_mds_quota *h = msg->front.iov_base; struct ceph_client *cl = mdsc->fsc->client; struct ceph_vino vino; struct inode *inode; struct ceph_inode_info *ci; if (!ceph_inc_mds_stopping_blocker(mdsc, session)) return; if (msg->front.iov_len < sizeof(*h)) { pr_err_client(cl, "corrupt message mds%d len %d\n", session->s_mds, (int)msg->front.iov_len); ceph_msg_dump(msg); goto out; } /* lookup inode */ vino.ino = le64_to_cpu(h->ino); vino.snap = CEPH_NOSNAP; inode = ceph_find_inode(sb, vino); if (!inode) { pr_warn_client(cl, "failed to find inode %llx\n", vino.ino); goto out; } ci = ceph_inode(inode); spin_lock(&ci->i_ceph_lock); ci->i_rbytes = le64_to_cpu(h->rbytes); ci->i_rfiles = le64_to_cpu(h->rfiles); ci->i_rsubdirs = le64_to_cpu(h->rsubdirs); __ceph_update_quota(ci, le64_to_cpu(h->max_bytes), le64_to_cpu(h->max_files)); spin_unlock(&ci->i_ceph_lock); iput(inode); out: ceph_dec_mds_stopping_blocker(mdsc); } static struct ceph_quotarealm_inode * find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino) { struct ceph_quotarealm_inode *qri = NULL; struct rb_node **node, *parent = NULL; struct ceph_client *cl = mdsc->fsc->client; mutex_lock(&mdsc->quotarealms_inodes_mutex); node = &(mdsc->quotarealms_inodes.rb_node); while (*node) { parent = *node; qri = container_of(*node, struct ceph_quotarealm_inode, node); if (ino < qri->ino) node = &((*node)->rb_left); else if (ino > qri->ino) node = &((*node)->rb_right); else break; } if (!qri || (qri->ino != ino)) { /* Not found, create a new one and insert it */ qri = kmalloc(sizeof(*qri), GFP_KERNEL); if (qri) { qri->ino = ino; qri->inode = NULL; qri->timeout = 0; mutex_init(&qri->mutex); rb_link_node(&qri->node, parent, node); rb_insert_color(&qri->node, &mdsc->quotarealms_inodes); } else pr_warn_client(cl, "Failed to alloc quotarealms_inode\n"); } mutex_unlock(&mdsc->quotarealms_inodes_mutex); return qri; } /* * This function will try to lookup a realm inode which isn't visible in the * filesystem mountpoint. A list of these kind of inodes (not visible) is * maintained in the mdsc and freed only when the filesystem is umounted. * * Note that these inodes are kept in this list even if the lookup fails, which * allows to prevent useless lookup requests. */ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, struct super_block *sb, struct ceph_snap_realm *realm) { struct ceph_client *cl = mdsc->fsc->client; struct ceph_quotarealm_inode *qri; struct inode *in; qri = find_quotarealm_inode(mdsc, realm->ino); if (!qri) return NULL; mutex_lock(&qri->mutex); if (qri->inode && ceph_is_any_caps(qri->inode)) { /* A request has already returned the inode */ mutex_unlock(&qri->mutex); return qri->inode; } /* Check if this inode lookup has failed recently */ if (qri->timeout && time_before_eq(jiffies, qri->timeout)) { mutex_unlock(&qri->mutex); return NULL; } if (qri->inode) { /* get caps */ int ret = __ceph_do_getattr(qri->inode, NULL, CEPH_STAT_CAP_INODE, true); if (ret >= 0) in = qri->inode; else in = ERR_PTR(ret); } else { in = ceph_lookup_inode(sb, realm->ino); } if (IS_ERR(in)) { doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino, PTR_ERR(in)); qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */ } else { qri->timeout = 0; qri->inode = in; } mutex_unlock(&qri->mutex); return in; } void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc) { struct ceph_quotarealm_inode *qri; struct rb_node *node; /* * It should now be safe to clean quotarealms_inode tree without holding * mdsc->quotarealms_inodes_mutex... */ mutex_lock(&mdsc->quotarealms_inodes_mutex); while (!RB_EMPTY_ROOT(&mdsc->quotarealms_inodes)) { node = rb_first(&mdsc->quotarealms_inodes); qri = rb_entry(node, struct ceph_quotarealm_inode, node); rb_erase(node, &mdsc->quotarealms_inodes); iput(qri->inode); kfree(qri); } mutex_unlock(&mdsc->quotarealms_inodes_mutex); } /* * This function walks through the snaprealm for an inode and set the * realmp with the first snaprealm that has quotas set (max_files, * max_bytes, or any, depending on the 'which_quota' argument). If the root is * reached, set the realmp with the root ceph_snap_realm instead. * * Note that the caller is responsible for calling ceph_put_snap_realm() on the * returned realm. * * Callers of this function need to hold mdsc->snap_rwsem. However, if there's * a need to do an inode lookup, this rwsem will be temporarily dropped. Hence * the 'retry' argument: if rwsem needs to be dropped and 'retry' is 'false' * this function will return -EAGAIN; otherwise, the snaprealms walk-through * will be restarted. */ static int get_quota_realm(struct ceph_mds_client *mdsc, struct inode *inode, enum quota_get_realm which_quota, struct ceph_snap_realm **realmp, bool retry) { struct ceph_client *cl = mdsc->fsc->client; struct ceph_inode_info *ci = NULL; struct ceph_snap_realm *realm, *next; struct inode *in; bool has_quota; if (realmp) *realmp = NULL; if (ceph_snap(inode) != CEPH_NOSNAP) return 0; restart: realm = ceph_inode(inode)->i_snap_realm; if (realm) ceph_get_snap_realm(mdsc, realm); else pr_err_ratelimited_client(cl, "%p %llx.%llx null i_snap_realm\n", inode, ceph_vinop(inode)); while (realm) { bool has_inode; spin_lock(&realm->inodes_with_caps_lock); has_inode = realm->inode; in = has_inode ? igrab(realm->inode) : NULL; spin_unlock(&realm->inodes_with_caps_lock); if (has_inode && !in) break; if (!in) { up_read(&mdsc->snap_rwsem); in = lookup_quotarealm_inode(mdsc, inode->i_sb, realm); down_read(&mdsc->snap_rwsem); if (IS_ERR_OR_NULL(in)) break; ceph_put_snap_realm(mdsc, realm); if (!retry) return -EAGAIN; goto restart; } ci = ceph_inode(in); has_quota = __ceph_has_quota(ci, which_quota); iput(in); next = realm->parent; if (has_quota || !next) { if (realmp) *realmp = realm; return 0; } ceph_get_snap_realm(mdsc, next); ceph_put_snap_realm(mdsc, realm); realm = next; } if (realm) ceph_put_snap_realm(mdsc, realm); return 0; } bool ceph_quota_is_same_realm(struct inode *old, struct inode *new) { struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old->i_sb); struct ceph_snap_realm *old_realm, *new_realm; bool is_same; int ret; restart: /* * We need to lookup 2 quota realms atomically, i.e. with snap_rwsem. * However, get_quota_realm may drop it temporarily. By setting the * 'retry' parameter to 'false', we'll get -EAGAIN if the rwsem was * dropped and we can then restart the whole operation. */ down_read(&mdsc->snap_rwsem); get_quota_realm(mdsc, old, QUOTA_GET_ANY, &old_realm, true); ret = get_quota_realm(mdsc, new, QUOTA_GET_ANY, &new_realm, false); if (ret == -EAGAIN) { up_read(&mdsc->snap_rwsem); if (old_realm) ceph_put_snap_realm(mdsc, old_realm); goto restart; } is_same = (old_realm == new_realm); up_read(&mdsc->snap_rwsem); if (old_realm) ceph_put_snap_realm(mdsc, old_realm); if (new_realm) ceph_put_snap_realm(mdsc, new_realm); return is_same; } enum quota_check_op { QUOTA_CHECK_MAX_FILES_OP, /* check quota max_files limit */ QUOTA_CHECK_MAX_BYTES_OP, /* check quota max_files limit */ QUOTA_CHECK_MAX_BYTES_APPROACHING_OP /* check if quota max_files limit is approaching */ }; /* * check_quota_exceeded() will walk up the snaprealm hierarchy and, for each * realm, it will execute quota check operation defined by the 'op' parameter. * The snaprealm walk is interrupted if the quota check detects that the quota * is exceeded or if the root inode is reached. */ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op, loff_t delta) { struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); struct ceph_client *cl = mdsc->fsc->client; struct ceph_inode_info *ci; struct ceph_snap_realm *realm, *next; struct inode *in; u64 max, rvalue; bool exceeded = false; if (ceph_snap(inode) != CEPH_NOSNAP) return false; down_read(&mdsc->snap_rwsem); restart: realm = ceph_inode(inode)->i_snap_realm; if (realm) ceph_get_snap_realm(mdsc, realm); else pr_err_ratelimited_client(cl, "%p %llx.%llx null i_snap_realm\n", inode, ceph_vinop(inode)); while (realm) { bool has_inode; spin_lock(&realm->inodes_with_caps_lock); has_inode = realm->inode; in = has_inode ? igrab(realm->inode) : NULL; spin_unlock(&realm->inodes_with_caps_lock); if (has_inode && !in) break; if (!in) { up_read(&mdsc->snap_rwsem); in = lookup_quotarealm_inode(mdsc, inode->i_sb, realm); down_read(&mdsc->snap_rwsem); if (IS_ERR_OR_NULL(in)) break; ceph_put_snap_realm(mdsc, realm); goto restart; } ci = ceph_inode(in); spin_lock(&ci->i_ceph_lock); if (op == QUOTA_CHECK_MAX_FILES_OP) { max = ci->i_max_files; rvalue = ci->i_rfiles + ci->i_rsubdirs; } else { max = ci->i_max_bytes; rvalue = ci->i_rbytes; } spin_unlock(&ci->i_ceph_lock); switch (op) { case QUOTA_CHECK_MAX_FILES_OP: case QUOTA_CHECK_MAX_BYTES_OP: exceeded = (max && (rvalue + delta > max)); break; case QUOTA_CHECK_MAX_BYTES_APPROACHING_OP: if (max) { if (rvalue >= max) exceeded = true; else { /* * when we're writing more that 1/16th * of the available space */ exceeded = (((max - rvalue) >> 4) < delta); } } break; default: /* Shouldn't happen */ pr_warn_client(cl, "Invalid quota check op (%d)\n", op); exceeded = true; /* Just break the loop */ } iput(in); next = realm->parent; if (exceeded || !next) break; ceph_get_snap_realm(mdsc, next); ceph_put_snap_realm(mdsc, realm); realm = next; } if (realm) ceph_put_snap_realm(mdsc, realm); up_read(&mdsc->snap_rwsem); return exceeded; } /* * ceph_quota_is_max_files_exceeded - check if we can create a new file * @inode: directory where a new file is being created * * This functions returns true is max_files quota allows a new file to be * created. It is necessary to walk through the snaprealm hierarchy (until the * FS root) to check all realms with quotas set. */ bool ceph_quota_is_max_files_exceeded(struct inode *inode) { if (!ceph_has_realms_with_quotas(inode)) return false; WARN_ON(!S_ISDIR(inode->i_mode)); return check_quota_exceeded(inode, QUOTA_CHECK_MAX_FILES_OP, 1); } /* * ceph_quota_is_max_bytes_exceeded - check if we can write to a file * @inode: inode being written * @newsize: new size if write succeeds * * This functions returns true is max_bytes quota allows a file size to reach * @newsize; it returns false otherwise. */ bool ceph_quota_is_max_bytes_exceeded(struct inode *inode, loff_t newsize) { loff_t size = i_size_read(inode); if (!ceph_has_realms_with_quotas(inode)) return false; /* return immediately if we're decreasing file size */ if (newsize <= size) return false; return check_quota_exceeded(inode, QUOTA_CHECK_MAX_BYTES_OP, (newsize - size)); } /* * ceph_quota_is_max_bytes_approaching - check if we're reaching max_bytes * @inode: inode being written * @newsize: new size if write succeeds * * This function returns true if the new file size @newsize will be consuming * more than 1/16th of the available quota space; it returns false otherwise. */ bool ceph_quota_is_max_bytes_approaching(struct inode *inode, loff_t newsize) { loff_t size = ceph_inode(inode)->i_reported_size; if (!ceph_has_realms_with_quotas(inode)) return false; /* return immediately if we're decreasing file size */ if (newsize <= size) return false; return check_quota_exceeded(inode, QUOTA_CHECK_MAX_BYTES_APPROACHING_OP, (newsize - size)); } /* * ceph_quota_update_statfs - if root has quota update statfs with quota status * @fsc: filesystem client instance * @buf: statfs to update * * If the mounted filesystem root has max_bytes quota set, update the filesystem * statistics with the quota status. * * This function returns true if the stats have been updated, false otherwise. */ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf) { struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_inode_info *ci; struct ceph_snap_realm *realm; struct inode *in; u64 total = 0, used, free; bool is_updated = false; down_read(&mdsc->snap_rwsem); get_quota_realm(mdsc, d_inode(fsc->sb->s_root), QUOTA_GET_MAX_BYTES, &realm, true); up_read(&mdsc->snap_rwsem); if (!realm) return false; spin_lock(&realm->inodes_with_caps_lock); in = realm->inode ? igrab(realm->inode) : NULL; spin_unlock(&realm->inodes_with_caps_lock); if (in) { ci = ceph_inode(in); spin_lock(&ci->i_ceph_lock); if (ci->i_max_bytes) { total = ci->i_max_bytes >> CEPH_BLOCK_SHIFT; used = ci->i_rbytes >> CEPH_BLOCK_SHIFT; /* For quota size less than 4MB, use 4KB block size */ if (!total) { total = ci->i_max_bytes >> CEPH_4K_BLOCK_SHIFT; used = ci->i_rbytes >> CEPH_4K_BLOCK_SHIFT; buf->f_frsize = 1 << CEPH_4K_BLOCK_SHIFT; } /* It is possible for a quota to be exceeded. * Report 'zero' in that case */ free = total > used ? total - used : 0; /* For quota size less than 4KB, report the * total=used=4KB,free=0 when quota is full * and total=free=4KB, used=0 otherwise */ if (!total) { total = 1; free = ci->i_max_bytes > ci->i_rbytes ? 1 : 0; buf->f_frsize = 1 << CEPH_4K_BLOCK_SHIFT; } } spin_unlock(&ci->i_ceph_lock); if (total) { buf->f_blocks = total; buf->f_bfree = free; buf->f_bavail = free; is_updated = true; } iput(in); } ceph_put_snap_realm(mdsc, realm); return is_updated; }
6 2 2 118 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * Copyright 2016 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DRM_DRV_H_ #define _DRM_DRV_H_ #include <linux/list.h> #include <linux/irqreturn.h> #include <video/nomodeset.h> #include <drm/drm_device.h> struct dmem_cgroup_region; struct drm_fb_helper; struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; struct drm_minor; struct dma_buf; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; struct drm_printer; struct sg_table; /** * enum drm_driver_feature - feature flags * * See &drm_driver.driver_features, drm_device.driver_features and * drm_core_check_feature(). */ enum drm_driver_feature { /** * @DRIVER_GEM: * * Driver use the GEM memory manager. This should be set for all modern * drivers. */ DRIVER_GEM = BIT(0), /** * @DRIVER_MODESET: * * Driver supports mode setting interfaces (KMS). */ DRIVER_MODESET = BIT(1), /** * @DRIVER_RENDER: * * Driver supports dedicated render nodes. See also the :ref:`section on * render nodes <drm_render_node>` for details. */ DRIVER_RENDER = BIT(3), /** * @DRIVER_ATOMIC: * * Driver supports the full atomic modesetting userspace API. Drivers * which only use atomic internally, but do not support the full * userspace API (e.g. not all properties converted to atomic, or * multi-plane updates are not guaranteed to be tear-free) should not * set this flag. */ DRIVER_ATOMIC = BIT(4), /** * @DRIVER_SYNCOBJ: * * Driver supports &drm_syncobj for explicit synchronization of command * submission. */ DRIVER_SYNCOBJ = BIT(5), /** * @DRIVER_SYNCOBJ_TIMELINE: * * Driver supports the timeline flavor of &drm_syncobj for explicit * synchronization of command submission. */ DRIVER_SYNCOBJ_TIMELINE = BIT(6), /** * @DRIVER_COMPUTE_ACCEL: * * Driver supports compute acceleration devices. This flag is mutually exclusive with * @DRIVER_RENDER and @DRIVER_MODESET. Devices that support both graphics and compute * acceleration should be handled by two drivers that are connected using auxiliary bus. */ DRIVER_COMPUTE_ACCEL = BIT(7), /** * @DRIVER_GEM_GPUVA: * * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), /** * @DRIVER_CURSOR_HOTSPOT: * * Driver supports and requires cursor hotspot information in the * cursor plane (e.g. cursor plane has to actually track the mouse * cursor and the clients are required to set hotspot in order for * the cursor planes to work correctly). */ DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ /** * @DRIVER_USE_AGP: * * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage * AGP resources. New drivers don't need this. */ DRIVER_USE_AGP = BIT(25), /** * @DRIVER_LEGACY: * * Denote a legacy driver using shadow attach. Do not use. */ DRIVER_LEGACY = BIT(26), /** * @DRIVER_PCI_DMA: * * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace * will be enabled. Only for legacy drivers. Do not use. */ DRIVER_PCI_DMA = BIT(27), /** * @DRIVER_SG: * * Driver can perform scatter/gather DMA, allocation and mapping of * scatter/gather buffers will be enabled. Only for legacy drivers. Do * not use. */ DRIVER_SG = BIT(28), /** * @DRIVER_HAVE_DMA: * * Driver supports DMA, the userspace DMA API will be supported. Only * for legacy drivers. Do not use. */ DRIVER_HAVE_DMA = BIT(29), /** * @DRIVER_HAVE_IRQ: * * Legacy irq support. Only for legacy drivers. Do not use. */ DRIVER_HAVE_IRQ = BIT(30), }; /** * struct drm_driver - DRM driver structure * * This structure represent the common code for a family of cards. There will be * one &struct drm_device for each card present in this family. It contains lots * of vfunc entries, and a pile of those probably should be moved to more * appropriate places like &drm_mode_config_funcs or into a new operations * structure for GEM drivers. */ struct drm_driver { /** * @load: * * Backward-compatible driver callback to complete initialization steps * after the driver is registered. For this reason, may suffer from * race conditions and its use is deprecated for new drivers. It is * therefore only supported for existing drivers not yet converted to * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for * proper and race-free way to set up a &struct drm_device. * * This is deprecated, do not use! * * Returns: * * Zero on success, non-zero value on failure. */ int (*load) (struct drm_device *, unsigned long flags); /** * @open: * * Driver callback when a new &struct drm_file is opened. Useful for * setting up driver-private data structures like buffer allocators, * execution contexts or similar things. Such driver-private resources * must be released again in @postclose. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to set up any modeset related resources * in this callback. Doing so would be a driver design bug. * * Returns: * * 0 on success, a negative error code on failure, which will be * promoted to userspace as the result of the open() system call. */ int (*open) (struct drm_device *, struct drm_file *); /** * @postclose: * * One of the driver callbacks when a new &struct drm_file is closed. * Useful for tearing down driver-private data structures allocated in * @open like buffer allocators, execution contexts or similar things. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to tear down any modeset related * resources in this callback. Doing so would be a driver design bug. */ void (*postclose) (struct drm_device *, struct drm_file *); /** * @unload: * * Reverse the effects of the driver load callback. Ideally, * the clean up performed by the driver should happen in the * reverse order of the initialization. Similarly to the load * hook, this handler is deprecated and its usage should be * dropped in favor of an open-coded teardown function at the * driver layer. See drm_dev_unregister() and drm_dev_put() * for the proper way to remove a &struct drm_device. * * The unload() hook is called right after unregistering * the device. * */ void (*unload) (struct drm_device *); /** * @release: * * Optional callback for destroying device data after the final * reference is released, i.e. the device is being destroyed. * * This is deprecated, clean up all memory allocations associated with a * &drm_device using drmm_add_action(), drmm_kmalloc() and related * managed resources functions. */ void (*release) (struct drm_device *); /** * @master_set: * * Called whenever the minor master is set. Only used by vmwgfx. */ void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, bool from_open); /** * @master_drop: * * Called whenever the minor master is dropped. Only used by vmwgfx. */ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); /** * @debugfs_init: * * Allows drivers to create driver-specific debugfs files. */ void (*debugfs_init)(struct drm_minor *minor); /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by the CMA * and SHMEM GEM helpers. Returns a GEM object on success, or an * ERR_PTR()-encoded error code otherwise. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); /** * @prime_handle_to_fd: * * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /** * @gem_prime_import: * * Import hook for GEM drivers. * * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); /** * @gem_prime_import_sg_table: * * Optional hook used by the PRIME helper functions * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). */ struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); /** * @dumb_create: * * This creates a new dumb buffer in the driver's backing storage manager (GEM, * TTM or something else entirely) and returns the resulting buffer handle. This * handle can then be wrapped up into a framebuffer modeset object. * * Note that userspace is not allowed to use such objects for render * acceleration - drivers must create their own private ioctls for such a use * case. * * Width, height and depth are specified in the &drm_mode_create_dumb * argument. The callback needs to fill the handle, pitch and size for * the created buffer. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); /** * @dumb_map_offset: * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * The default implementation is drm_gem_create_mmap_offset(). GEM based * drivers must not overwrite this. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); /** * @fbdev_probe: * * Allocates and initialize the fb_info structure for fbdev emulation. * Furthermore it also needs to allocate the DRM framebuffer used to * back the fbdev. * * This callback is mandatory for fbdev support. * * Returns: * * 0 on success ot a negative error code otherwise. */ int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, struct drm_fb_helper_surface_size *sizes); /** * @show_fdinfo: * * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. */ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f); /** @major: driver major number */ int major; /** @minor: driver minor number */ int minor; /** @patchlevel: driver patch level */ int patchlevel; /** @name: driver name */ char *name; /** @desc: driver description */ char *desc; /** * @driver_features: * Driver features, see &enum drm_driver_feature. Drivers can disable * some features on a per-instance basis using * &drm_device.driver_features. */ u32 driver_features; /** * @ioctls: * * Array of driver-private IOCTL description entries. See the chapter on * :ref:`IOCTL support in the userland interfaces * chapter<drm_driver_ioctl>` for the full details. */ const struct drm_ioctl_desc *ioctls; /** @num_ioctls: Number of entries in @ioctls. */ int num_ioctls; /** * @fops: * * File operations for the DRM device node. See the discussion in * :ref:`file operations<drm_driver_fops>` for in-depth coverage and * some examples. */ const struct file_operations *fops; }; void *__devm_drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); struct dmem_cgroup_region * drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size); /** * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance * @parent: Parent device object * @driver: DRM driver * @type: the type of the struct which contains struct &drm_device * @member: the name of the &drm_device within @type. * * This allocates and initialize a new DRM device. No device registration is done. * Call drm_dev_register() to advertice the device to user space and register it * with other core subsystems. This should be done last in the device * initialization sequence to make sure userspace can't access an inconsistent * state. * * The initial ref-count of the object is 1. Use drm_dev_get() and * drm_dev_put() to take and drop further ref-counts. * * It is recommended that drivers embed &struct drm_device into their own device * structure. * * Note that this manages the lifetime of the resulting &drm_device * automatically using devres. The DRM device initialized with this function is * automatically put on driver detach using drm_dev_put(). * * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. */ #define devm_drm_dev_alloc(parent, driver, type, member) \ ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ offsetof(type, member))) struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent); void *__drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); void drm_dev_get(struct drm_device *dev); void drm_dev_put(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); bool drm_dev_enter(struct drm_device *dev, int *idx); void drm_dev_exit(int idx); void drm_dev_unplug(struct drm_device *dev); int drm_dev_wedged_event(struct drm_device *dev, unsigned long method, struct drm_wedge_task_info *info); /** * drm_dev_is_unplugged - is a DRM device unplugged * @dev: DRM device * * This function can be called to check whether a hotpluggable is unplugged. * Unplugging itself is singalled through drm_dev_unplug(). If a device is * unplugged, these two functions guarantee that any store before calling * drm_dev_unplug() is visible to callers of this function after it completes * * WARNING: This function fundamentally races against drm_dev_unplug(). It is * recommended that drivers instead use the underlying drm_dev_enter() and * drm_dev_exit() function pairs. */ static inline bool drm_dev_is_unplugged(struct drm_device *dev) { int idx; if (drm_dev_enter(dev, &idx)) { drm_dev_exit(idx); return false; } return true; } /** * drm_core_check_all_features - check driver feature flags mask * @dev: DRM device to check * @features: feature flag(s) mask * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if all features in the @features mask are supported, false * otherwise. */ static inline bool drm_core_check_all_features(const struct drm_device *dev, u32 features) { u32 supported = dev->driver->driver_features & dev->driver_features; return features && (supported & features) == features; } /** * drm_core_check_feature - check driver feature flags * @dev: DRM device to check * @feature: feature flag * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if the @feature is supported, false otherwise. */ static inline bool drm_core_check_feature(const struct drm_device *dev, enum drm_driver_feature feature) { return drm_core_check_all_features(dev, feature); } /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() * @dev: DRM device * * This check is useful if drivers do not have DRIVER_ATOMIC set but * have atomic modesetting internally implemented. */ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } /* TODO: Inline drm_firmware_drivers_only() in all its callers. */ static inline bool drm_firmware_drivers_only(void) { return video_firmware_drivers_only(); } #if defined(CONFIG_DEBUG_FS) void drm_debugfs_dev_init(struct drm_device *dev); void drm_debugfs_init_root(void); void drm_debugfs_remove_root(void); void drm_debugfs_bridge_params(void); #else static inline void drm_debugfs_dev_init(struct drm_device *dev) { } static inline void drm_debugfs_init_root(void) { } static inline void drm_debugfs_remove_root(void) { } static inline void drm_debugfs_bridge_params(void) { } #endif #endif
481 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HUGETLB_INLINE_H #define _LINUX_HUGETLB_INLINE_H #ifdef CONFIG_HUGETLB_PAGE #include <linux/mm.h> static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return !!(vma->vm_flags & VM_HUGETLB); } #else static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { return false; } #endif #endif
7 5 1 1 5 1 4 1 3 4 4 4 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/hpfs/super.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * mounting, unmounting, error handling */ #include "hpfs_fn.h" #include <linux/module.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/init.h> #include <linux/statfs.h> #include <linux/magic.h> #include <linux/sched.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <linux/seq_file.h> /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ static void mark_dirty(struct super_block *s, int remount) { if (hpfs_sb(s)->sb_chkdsk && (remount || !sb_rdonly(s))) { struct buffer_head *bh; struct hpfs_spare_block *sb; if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = 1; sb->old_wrote = 0; mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } } } /* Mark the filesystem clean (mark it dirty for chkdsk if chkdsk==2 or if there were errors) */ static void unmark_dirty(struct super_block *s) { struct buffer_head *bh; struct hpfs_spare_block *sb; if (sb_rdonly(s)) return; sync_blockdev(s->s_bdev); if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } } /* Filesystem error... */ void hpfs_error(struct super_block *s, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("filesystem error: %pV", &vaf); va_end(args); if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { pr_cont("; crashing the system because you wanted it\n"); mark_dirty(s, 0); panic("HPFS panic"); } else if (hpfs_sb(s)->sb_err == 1) { if (sb_rdonly(s)) pr_cont("; already mounted read-only\n"); else { pr_cont("; remounting read-only\n"); mark_dirty(s, 0); s->s_flags |= SB_RDONLY; } } else if (sb_rdonly(s)) pr_cont("; going on - but anything won't be destroyed because it's read-only\n"); else pr_cont("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); } else pr_cont("\n"); hpfs_sb(s)->sb_was_error = 1; } /* * A little trick to detect cycles in many hpfs structures and don't let the * kernel crash on corrupted filesystem. When first called, set c2 to 0. * * BTW. chkdsk doesn't detect cycles correctly. When I had 2 lost directories * nested each in other, chkdsk locked up happilly. */ int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2, char *msg) { if (*c2 && *c1 == key) { hpfs_error(s, "cycle detected on key %08x in %s", key, msg); return 1; } (*c2)++; if (!((*c2 - 1) & *c2)) *c1 = key; return 0; } static void free_sbi(struct hpfs_sb_info *sbi) { kfree(sbi->sb_cp_table); kfree(sbi->sb_bmp_dir); kfree(sbi); } static void lazy_free_sbi(struct rcu_head *rcu) { free_sbi(container_of(rcu, struct hpfs_sb_info, rcu)); } static void hpfs_put_super(struct super_block *s) { hpfs_lock(s); unmark_dirty(s); hpfs_unlock(s); call_rcu(&hpfs_sb(s)->rcu, lazy_free_sbi); } static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno) { struct quad_buffer_head qbh; unsigned long *bits; unsigned count; bits = hpfs_map_4sectors(s, secno, &qbh, 0); if (!bits) return (unsigned)-1; count = bitmap_weight(bits, 2048 * BITS_PER_BYTE); hpfs_brelse4(&qbh); return count; } static unsigned count_bitmaps(struct super_block *s) { unsigned n, count, n_bands; n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; count = 0; for (n = 0; n < COUNT_RD_AHEAD; n++) { hpfs_prefetch_bitmap(s, n); } for (n = 0; n < n_bands; n++) { unsigned c; hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD); c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); if (c != (unsigned)-1) count += c; } return count; } unsigned hpfs_get_free_dnodes(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes == (unsigned)-1) { unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); if (c == (unsigned)-1) return 0; sbi->sb_n_free_dnodes = c; } return sbi->sb_n_free_dnodes; } static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); hpfs_lock(s); if (sbi->sb_n_free == (unsigned)-1) sbi->sb_n_free = count_bitmaps(s); buf->f_type = s->s_magic; buf->f_bsize = 512; buf->f_blocks = sbi->sb_fs_size; buf->f_bfree = sbi->sb_n_free; buf->f_bavail = sbi->sb_n_free; buf->f_files = sbi->sb_dirband_size / 4; buf->f_ffree = hpfs_get_free_dnodes(s); buf->f_fsid = u64_to_fsid(id); buf->f_namelen = 254; hpfs_unlock(s); return 0; } long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg) { switch (cmd) { case FITRIM: { struct fstrim_range range; secno n_trimmed; int r; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed); if (r) return r; range.len = (u64)n_trimmed << 9; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } default: { return -ENOIOCTLCMD; } } } static struct kmem_cache * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) { struct hpfs_inode_info *ei; ei = alloc_inode_sb(sb, hpfs_inode_cachep, GFP_NOFS); if (!ei) return NULL; return &ei->vfs_inode; } static void hpfs_free_inode(struct inode *inode) { kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); } static void init_once(void *foo) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache", sizeof(struct hpfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), init_once); if (hpfs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(hpfs_inode_cachep); } enum { Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case, Opt_check, Opt_err, Opt_eas, Opt_chkdsk, Opt_timeshift, }; static const struct constant_table hpfs_param_case[] = { {"asis", 0}, {"lower", 1}, {} }; static const struct constant_table hpfs_param_check[] = { {"none", 0}, {"normal", 1}, {"strict", 2}, {} }; static const struct constant_table hpfs_param_err[] = { {"continue", 0}, {"remount-ro", 1}, {"panic", 2}, {} }; static const struct constant_table hpfs_param_eas[] = { {"no", 0}, {"ro", 1}, {"rw", 2}, {} }; static const struct constant_table hpfs_param_chkdsk[] = { {"no", 0}, {"errors", 1}, {"always", 2}, {} }; static const struct fs_parameter_spec hpfs_param_spec[] = { fsparam_flag ("help", Opt_help), fsparam_uid ("uid", Opt_uid), fsparam_gid ("gid", Opt_gid), fsparam_u32oct ("umask", Opt_umask), fsparam_enum ("case", Opt_case, hpfs_param_case), fsparam_enum ("check", Opt_check, hpfs_param_check), fsparam_enum ("errors", Opt_err, hpfs_param_err), fsparam_enum ("eas", Opt_eas, hpfs_param_eas), fsparam_enum ("chkdsk", Opt_chkdsk, hpfs_param_chkdsk), fsparam_s32 ("timeshift", Opt_timeshift), {} }; struct hpfs_fc_context { kuid_t uid; kgid_t gid; umode_t umask; int lowercase; int eas; int chk; int errs; int chkdsk; int timeshift; }; static inline void hpfs_help(void) { pr_info("\n\ HPFS filesystem options:\n\ help do not mount and display this text\n\ uid=xxx set uid of files that don't have uid specified in eas\n\ gid=xxx set gid of files that don't have gid specified in eas\n\ umask=xxx set mode of files that don't have mode specified in eas\n\ case=lower lowercase all files\n\ case=asis do not lowercase files (default)\n\ check=none no fs checks - kernel may crash on corrupted filesystem\n\ check=normal do some checks - it should not crash (default)\n\ check=strict do extra time-consuming checks, used for debugging\n\ errors=continue continue on errors\n\ errors=remount-ro remount read-only if errors found (default)\n\ errors=panic panic on errors\n\ chkdsk=no do not mark fs for chkdsking even if there were errors\n\ chkdsk=errors mark fs dirty if errors found (default)\n\ chkdsk=always always mark fs dirty - used for debugging\n\ eas=no ignore extended attributes\n\ eas=ro read but do not write extended attributes\n\ eas=rw r/w eas => enables chmod, chown, mknod, ln -s (default)\n\ timeshift=nnn add nnn seconds to file times\n\ \n"); } static int hpfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct hpfs_fc_context *ctx = fc->fs_private; struct fs_parse_result result; int opt; opt = fs_parse(fc, hpfs_param_spec, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_help: hpfs_help(); return -EINVAL; case Opt_uid: ctx->uid = result.uid; break; case Opt_gid: ctx->gid = result.gid; break; case Opt_umask: ctx->umask = result.uint_32; break; case Opt_case: ctx->lowercase = result.uint_32; break; case Opt_check: ctx->chk = result.uint_32; break; case Opt_err: ctx->errs = result.uint_32; break; case Opt_eas: ctx->eas = result.uint_32; break; case Opt_chkdsk: ctx->chkdsk = result.uint_32; break; case Opt_timeshift: { int m = 1; char *rhs = param->string; int timeshift; if (*rhs == '-') m = -1; if (*rhs == '+' || *rhs == '-') rhs++; timeshift = simple_strtoul(rhs, &rhs, 0) * m; if (*rhs) return -EINVAL; ctx->timeshift = timeshift; break; } default: return -EINVAL; } return 0; } static int hpfs_reconfigure(struct fs_context *fc) { struct hpfs_fc_context *ctx = fc->fs_private; struct super_block *s = fc->root->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(s); sync_filesystem(s); fc->sb_flags |= SB_NOATIME; hpfs_lock(s); if (ctx->timeshift != sbi->sb_timeshift) { pr_err("timeshift can't be changed using remount.\n"); goto out_err; } unmark_dirty(s); sbi->sb_uid = ctx->uid; sbi->sb_gid = ctx->gid; sbi->sb_mode = 0777 & ~ctx->umask; sbi->sb_lowercase = ctx->lowercase; sbi->sb_eas = ctx->eas; sbi->sb_chk = ctx->chk; sbi->sb_chkdsk = ctx->chkdsk; sbi->sb_err = ctx->errs; sbi->sb_timeshift = ctx->timeshift; if (!(fc->sb_flags & SB_RDONLY)) mark_dirty(s, 1); hpfs_unlock(s); return 0; out_err: hpfs_unlock(s); return -EINVAL; } static int hpfs_show_options(struct seq_file *seq, struct dentry *root) { struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb); seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid)); seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid)); seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777)); if (sbi->sb_lowercase) seq_printf(seq, ",case=lower"); if (!sbi->sb_chk) seq_printf(seq, ",check=none"); if (sbi->sb_chk == 2) seq_printf(seq, ",check=strict"); if (!sbi->sb_err) seq_printf(seq, ",errors=continue"); if (sbi->sb_err == 2) seq_printf(seq, ",errors=panic"); if (!sbi->sb_chkdsk) seq_printf(seq, ",chkdsk=no"); if (sbi->sb_chkdsk == 2) seq_printf(seq, ",chkdsk=always"); if (!sbi->sb_eas) seq_printf(seq, ",eas=no"); if (sbi->sb_eas == 1) seq_printf(seq, ",eas=ro"); if (sbi->sb_timeshift) seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift); return 0; } /* Super operations */ static const struct super_operations hpfs_sops = { .alloc_inode = hpfs_alloc_inode, .free_inode = hpfs_free_inode, .evict_inode = hpfs_evict_inode, .put_super = hpfs_put_super, .statfs = hpfs_statfs, .show_options = hpfs_show_options, }; static int hpfs_fill_super(struct super_block *s, struct fs_context *fc) { struct hpfs_fc_context *ctx = fc->fs_private; struct buffer_head *bh0, *bh1, *bh2; struct hpfs_boot_block *bootblock; struct hpfs_super_block *superblock; struct hpfs_spare_block *spareblock; struct hpfs_sb_info *sbi; struct inode *root; int silent = fc->sb_flags & SB_SILENT; dnode_secno root_dno; struct hpfs_dirent *de = NULL; struct quad_buffer_head qbh; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) { return -ENOMEM; } s->s_fs_info = sbi; mutex_init(&sbi->hpfs_mutex); hpfs_lock(s); /*sbi->sb_mounting = 1;*/ sb_set_blocksize(s, 512); sbi->sb_fs_size = -1; if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1; if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2; if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; /* Check magics */ if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC || le32_to_cpu(spareblock->magic) != SP_MAGIC) { if (!silent) pr_err("Bad magic ... probably not HPFS\n"); goto bail4; } /* Check version */ if (!sb_rdonly(s) && superblock->funcversion != 2 && superblock->funcversion != 3) { pr_err("Bad version %d,%d. Mount readonly to go around\n", (int)superblock->version, (int)superblock->funcversion); pr_err("please try recent version of HPFS driver at http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi and if it still can't understand this format, contact author - mikulas@artax.karlin.mff.cuni.cz\n"); goto bail4; } s->s_flags |= SB_NOATIME; /* Fill superblock stuff */ s->s_magic = HPFS_SUPER_MAGIC; s->s_op = &hpfs_sops; set_default_d_op(s, &hpfs_dentry_operations); s->s_time_min = local_to_gmt(s, 0); s->s_time_max = local_to_gmt(s, U32_MAX); sbi->sb_root = le32_to_cpu(superblock->root); sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors); sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps); sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start); sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band); sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap); sbi->sb_uid = ctx->uid; sbi->sb_gid = ctx->gid; sbi->sb_mode = 0777 & ~ctx->umask; sbi->sb_n_free = -1; sbi->sb_n_free_dnodes = -1; sbi->sb_lowercase = ctx->lowercase; sbi->sb_eas = ctx->eas; sbi->sb_chk = ctx->chk; sbi->sb_chkdsk = ctx->chkdsk; sbi->sb_err = ctx->errs; sbi->sb_timeshift = ctx->timeshift; sbi->sb_was_error = 0; sbi->sb_cp_table = NULL; sbi->sb_c_bitmap = -1; sbi->sb_max_fwd_alloc = 0xffffff; if (sbi->sb_fs_size >= 0x80000000) { hpfs_error(s, "invalid size in superblock: %08x", (unsigned)sbi->sb_fs_size); goto bail4; } if (spareblock->n_spares_used) hpfs_load_hotfix_map(s, spareblock); /* Load bitmap directory */ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) goto bail4; /* Check for general fs errors*/ if (spareblock->dirty && !spareblock->old_wrote) { if (sbi->sb_err == 2) { pr_err("Improperly stopped, not mounted\n"); goto bail4; } hpfs_error(s, "improperly stopped"); } if (!sb_rdonly(s)) { spareblock->dirty = 1; spareblock->old_wrote = 0; mark_buffer_dirty(bh2); } if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) { if (sbi->sb_err >= 2) { pr_err("Spare dnodes used, try chkdsk\n"); mark_dirty(s, 0); goto bail4; } hpfs_error(s, "warning: spare dnodes used, try chkdsk"); if (sbi->sb_err == 0) pr_err("Proceeding, but your filesystem could be corrupted if you delete files or directories\n"); } if (sbi->sb_chk) { unsigned a; if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) || le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) { hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band)); goto bail4; } a = sbi->sb_dirband_size; sbi->sb_dirband_size = 0; if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") || hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") || hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) { mark_dirty(s, 0); goto bail4; } sbi->sb_dirband_size = a; } else pr_err("You really don't want any checks? You are crazy...\n"); /* Load code page table */ if (le32_to_cpu(spareblock->n_code_pages)) if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir)))) pr_err("code page support is disabled\n"); brelse(bh2); brelse(bh1); brelse(bh0); root = iget_locked(s, sbi->sb_root); if (!root) goto bail0; hpfs_init_inode(root); hpfs_read_inode(root); unlock_new_inode(root); s->s_root = d_make_root(root); if (!s->s_root) goto bail0; /* * find the root directory's . pointer & finish filling in the inode */ root_dno = hpfs_fnode_dno(s, sbi->sb_root); if (root_dno) de = map_dirent(root, root_dno, "\001\001", 2, NULL, &qbh); if (!de) hpfs_error(s, "unable to find root dir"); else { inode_set_atime(root, local_to_gmt(s, le32_to_cpu(de->read_date)), 0); inode_set_mtime(root, local_to_gmt(s, le32_to_cpu(de->write_date)), 0); inode_set_ctime(root, local_to_gmt(s, le32_to_cpu(de->creation_date)), 0); hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size); hpfs_i(root)->i_parent_dir = root->i_ino; if (root->i_size == -1) root->i_size = 2048; if (root->i_blocks == -1) root->i_blocks = 5; hpfs_brelse4(&qbh); } hpfs_unlock(s); return 0; bail4: brelse(bh2); bail3: brelse(bh1); bail2: brelse(bh0); bail1: bail0: hpfs_unlock(s); free_sbi(sbi); return -EINVAL; } static int hpfs_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, hpfs_fill_super); } static void hpfs_free_fc(struct fs_context *fc) { kfree(fc->fs_private); } static const struct fs_context_operations hpfs_fc_context_ops = { .parse_param = hpfs_parse_param, .get_tree = hpfs_get_tree, .reconfigure = hpfs_reconfigure, .free = hpfs_free_fc, }; static int hpfs_init_fs_context(struct fs_context *fc) { struct hpfs_fc_context *ctx; ctx = kzalloc(sizeof(struct hpfs_fc_context), GFP_KERNEL); if (!ctx) return -ENOMEM; if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { struct super_block *sb = fc->root->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(sb); ctx->uid = sbi->sb_uid; ctx->gid = sbi->sb_gid; ctx->umask = 0777 & ~sbi->sb_mode; ctx->lowercase = sbi->sb_lowercase; ctx->eas = sbi->sb_eas; ctx->chk = sbi->sb_chk; ctx->chkdsk = sbi->sb_chkdsk; ctx->errs = sbi->sb_err; ctx->timeshift = sbi->sb_timeshift; } else { ctx->uid = current_uid(); ctx->gid = current_gid(); ctx->umask = current_umask(); ctx->lowercase = 0; ctx->eas = 2; ctx->chk = 1; ctx->errs = 1; ctx->chkdsk = 1; ctx->timeshift = 0; } fc->fs_private = ctx; fc->ops = &hpfs_fc_context_ops; return 0; }; static struct file_system_type hpfs_fs_type = { .owner = THIS_MODULE, .name = "hpfs", .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, .init_fs_context = hpfs_init_fs_context, .parameters = hpfs_param_spec, }; MODULE_ALIAS_FS("hpfs"); static int __init init_hpfs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&hpfs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_hpfs_fs(void) { unregister_filesystem(&hpfs_fs_type); destroy_inodecache(); } module_init(init_hpfs_fs) module_exit(exit_hpfs_fs) MODULE_DESCRIPTION("OS/2 HPFS file system support"); MODULE_LICENSE("GPL");
6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 // SPDX-License-Identifier: GPL-2.0-or-later /* * Handle bridge arp/nd proxy/suppress * * Copyright (C) 2017 Cumulus Networks * Copyright (c) 2017 Roopa Prabhu <roopa@cumulusnetworks.com> * * Authors: * Roopa Prabhu <roopa@cumulusnetworks.com> */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/neighbour.h> #include <net/arp.h> #include <linux/if_vlan.h> #include <linux/inetdevice.h> #include <net/addrconf.h> #include <net/ipv6_stubs.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_checksum.h> #endif #include "br_private.h" void br_recalculate_neigh_suppress_enabled(struct net_bridge *br) { struct net_bridge_port *p; bool neigh_suppress = false; list_for_each_entry(p, &br->port_list, list) { if (p->flags & (BR_NEIGH_SUPPRESS | BR_NEIGH_VLAN_SUPPRESS)) { neigh_suppress = true; break; } } br_opt_toggle(br, BROPT_NEIGH_SUPPRESS_ENABLED, neigh_suppress); } #if IS_ENABLED(CONFIG_INET) static void br_arp_send(struct net_bridge *br, struct net_bridge_port *p, struct net_device *dev, __be32 dest_ip, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw, __be16 vlan_proto, u16 vlan_tci) { struct net_bridge_vlan_group *vg; struct sk_buff *skb; u16 pvid; netdev_dbg(dev, "arp send dev %s dst %pI4 dst_hw %pM src %pI4 src_hw %pM\n", dev->name, &dest_ip, dest_hw, &src_ip, src_hw); if (!vlan_tci) { arp_send(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip, dest_hw, src_hw, target_hw); return; } skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip, dest_hw, src_hw, target_hw); if (!skb) return; if (p) vg = nbp_vlan_group_rcu(p); else vg = br_vlan_group_rcu(br); pvid = br_get_pvid(vg); if (pvid == (vlan_tci & VLAN_VID_MASK)) vlan_tci = 0; if (vlan_tci) __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); if (p) { arp_xmit(skb); } else { skb_reset_mac_header(skb); __skb_pull(skb, skb_network_offset(skb)); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_HOST; netif_rx(skb); } } static int br_chk_addr_ip(struct net_device *dev, struct netdev_nested_priv *priv) { __be32 ip = *(__be32 *)priv->data; struct in_device *in_dev; __be32 addr = 0; in_dev = __in_dev_get_rcu(dev); if (in_dev) addr = inet_confirm_addr(dev_net(dev), in_dev, 0, ip, RT_SCOPE_HOST); if (addr == ip) return 1; return 0; } static bool br_is_local_ip(struct net_device *dev, __be32 ip) { struct netdev_nested_priv priv = { .data = (void *)&ip, }; if (br_chk_addr_ip(dev, &priv)) return true; /* check if ip is configured on upper dev */ if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip, &priv)) return true; return false; } void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p) { struct net_device *dev = br->dev; struct net_device *vlandev = dev; struct neighbour *n; struct arphdr *parp; u8 *arpptr, *sha; __be32 sip, tip; BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0; if ((dev->flags & IFF_NOARP) || !pskb_may_pull(skb, arp_hdr_len(dev))) return; parp = arp_hdr(skb); if (parp->ar_pro != htons(ETH_P_IP) || parp->ar_hln != dev->addr_len || parp->ar_pln != 4) return; arpptr = (u8 *)parp + sizeof(struct arphdr); sha = arpptr; arpptr += dev->addr_len; /* sha */ memcpy(&sip, arpptr, sizeof(sip)); arpptr += sizeof(sip); arpptr += dev->addr_len; /* tha */ memcpy(&tip, arpptr, sizeof(tip)); if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) return; if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { if (br_is_neigh_suppress_enabled(p, vid)) return; if (is_unicast_ether_addr(eth_hdr(skb)->h_dest) && parp->ar_op == htons(ARPOP_REQUEST)) return; if (parp->ar_op != htons(ARPOP_RREQUEST) && parp->ar_op != htons(ARPOP_RREPLY) && (ipv4_is_zeronet(sip) || sip == tip)) { /* prevent flooding to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } } if (parp->ar_op != htons(ARPOP_REQUEST)) return; if (vid != 0) { vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto, vid); if (!vlandev) return; } if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && br_is_local_ip(vlandev, tip)) { /* its our local ip, so don't proxy reply * and don't forward to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } n = neigh_lookup(&arp_tbl, &tip, vlandev); if (n) { struct net_bridge_fdb_entry *f; if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { neigh_release(n); return; } f = br_fdb_find_rcu(br, n->ha, vid); if (f) { bool replied = false; if ((p && (p->flags & BR_PROXYARP)) || (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) || br_is_neigh_suppress_enabled(f->dst, vid)) { if (!vid) br_arp_send(br, p, skb->dev, sip, tip, sha, n->ha, sha, 0, 0); else br_arp_send(br, p, skb->dev, sip, tip, sha, n->ha, sha, skb->vlan_proto, skb_vlan_tag_get(skb)); replied = true; } /* If we have replied or as long as we know the * mac, indicate to arp replied */ if (replied || br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; } neigh_release(n); } } #endif #if IS_ENABLED(CONFIG_IPV6) struct nd_msg *br_is_nd_neigh_msg(const struct sk_buff *skb, struct nd_msg *msg) { struct nd_msg *m; m = skb_header_pointer(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), sizeof(*msg), msg); if (!m) return NULL; if (m->icmph.icmp6_code != 0 || (m->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION && m->icmph.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)) return NULL; return m; } static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p, struct sk_buff *request, struct neighbour *n, __be16 vlan_proto, u16 vlan_tci, struct nd_msg *ns) { struct net_device *dev = request->dev; struct net_bridge_vlan_group *vg; struct sk_buff *reply; struct nd_msg *na; struct ipv6hdr *pip6; int na_olen = 8; /* opt hdr + ETH_ALEN for target */ int ns_olen; int i, len; u8 *daddr; u16 pvid; if (!dev) return; len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + sizeof(*na) + na_olen + dev->needed_tailroom; reply = alloc_skb(len, GFP_ATOMIC); if (!reply) return; reply->protocol = htons(ETH_P_IPV6); reply->dev = dev; skb_reserve(reply, LL_RESERVED_SPACE(dev)); skb_push(reply, sizeof(struct ethhdr)); skb_set_mac_header(reply, 0); daddr = eth_hdr(request)->h_source; /* Do we need option processing ? */ ns_olen = request->len - (skb_network_offset(request) + sizeof(struct ipv6hdr)) - sizeof(*ns); for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) { if (!ns->opt[i + 1]) { kfree_skb(reply); return; } if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; } } /* Ethernet header */ ether_addr_copy(eth_hdr(reply)->h_dest, daddr); ether_addr_copy(eth_hdr(reply)->h_source, n->ha); eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); reply->protocol = htons(ETH_P_IPV6); skb_pull(reply, sizeof(struct ethhdr)); skb_set_network_header(reply, 0); skb_put(reply, sizeof(struct ipv6hdr)); /* IPv6 header */ pip6 = ipv6_hdr(reply); memset(pip6, 0, sizeof(struct ipv6hdr)); pip6->version = 6; pip6->priority = ipv6_hdr(request)->priority; pip6->nexthdr = IPPROTO_ICMPV6; pip6->hop_limit = 255; pip6->daddr = ipv6_hdr(request)->saddr; pip6->saddr = *(struct in6_addr *)n->primary_key; skb_pull(reply, sizeof(struct ipv6hdr)); skb_set_transport_header(reply, 0); na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); /* Neighbor Advertisement */ memset(na, 0, sizeof(*na) + na_olen); na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0; na->icmph.icmp6_override = 1; na->icmph.icmp6_solicited = 1; na->target = ns->target; ether_addr_copy(&na->opt[2], n->ha); na->opt[0] = ND_OPT_TARGET_LL_ADDR; na->opt[1] = na_olen >> 3; na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, sizeof(*na) + na_olen, IPPROTO_ICMPV6, csum_partial(na, sizeof(*na) + na_olen, 0)); pip6->payload_len = htons(sizeof(*na) + na_olen); skb_push(reply, sizeof(struct ipv6hdr)); skb_push(reply, sizeof(struct ethhdr)); reply->ip_summed = CHECKSUM_UNNECESSARY; if (p) vg = nbp_vlan_group_rcu(p); else vg = br_vlan_group_rcu(br); pvid = br_get_pvid(vg); if (pvid == (vlan_tci & VLAN_VID_MASK)) vlan_tci = 0; if (vlan_tci) __vlan_hwaccel_put_tag(reply, vlan_proto, vlan_tci); netdev_dbg(dev, "nd send dev %s dst %pI6 dst_hw %pM src %pI6 src_hw %pM\n", dev->name, &pip6->daddr, daddr, &pip6->saddr, n->ha); if (p) { dev_queue_xmit(reply); } else { skb_reset_mac_header(reply); __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; reply->pkt_type = PACKET_HOST; netif_rx(reply); } } static int br_chk_addr_ip6(struct net_device *dev, struct netdev_nested_priv *priv) { struct in6_addr *addr = (struct in6_addr *)priv->data; if (ipv6_chk_addr(dev_net(dev), addr, dev, 0)) return 1; return 0; } static bool br_is_local_ip6(struct net_device *dev, struct in6_addr *addr) { struct netdev_nested_priv priv = { .data = (void *)addr, }; if (br_chk_addr_ip6(dev, &priv)) return true; /* check if ip is configured on upper dev */ if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip6, &priv)) return true; return false; } void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p, struct nd_msg *msg) { struct net_device *dev = br->dev; struct net_device *vlandev = NULL; struct in6_addr *saddr, *daddr; struct ipv6hdr *iphdr; struct neighbour *n; BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0; if (br_is_neigh_suppress_enabled(p, vid)) return; if (is_unicast_ether_addr(eth_hdr(skb)->h_dest) && msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) return; if (msg->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT && !msg->icmph.icmp6_solicited) { /* prevent flooding to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) return; iphdr = ipv6_hdr(skb); saddr = &iphdr->saddr; daddr = &iphdr->daddr; if (ipv6_addr_any(saddr) || !ipv6_addr_cmp(saddr, daddr)) { /* prevent flooding to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } if (vid != 0) { /* build neigh table lookup on the vlan device */ vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto, vid); if (!vlandev) return; } else { vlandev = dev; } if (br_is_local_ip6(vlandev, &msg->target)) { /* its our own ip, so don't proxy reply * and don't forward to arp suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, vlandev); if (n) { struct net_bridge_fdb_entry *f; if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { neigh_release(n); return; } f = br_fdb_find_rcu(br, n->ha, vid); if (f) { bool replied = false; if (br_is_neigh_suppress_enabled(f->dst, vid)) { if (vid != 0) br_nd_send(br, p, skb, n, skb->vlan_proto, skb_vlan_tag_get(skb), msg); else br_nd_send(br, p, skb, n, 0, 0, msg); replied = true; } /* If we have replied or as long as we know the * mac, indicate to NEIGH_SUPPRESS ports that we * have replied */ if (replied || br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; } neigh_release(n); } } #endif bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid) { if (!p) return false; if (!vid) return !!(p->flags & BR_NEIGH_SUPPRESS); if (p->flags & BR_NEIGH_VLAN_SUPPRESS) { struct net_bridge_vlan_group *vg = nbp_vlan_group_rcu(p); struct net_bridge_vlan *v; v = br_vlan_find(vg, vid); if (!v) return false; return !!(v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED); } else { return !!(p->flags & BR_NEIGH_SUPPRESS); } }
2190 2189 2195 2197 2192 1 2175 21 20 2175 2140 20 2120 2115 21 2140 1 2185 2181 2189 2190 2180 2178 1997 1320 2122 380 2184 14 1935 965 71 1924 311 1910 634 1026 1025 967 67 1023 501 676 676 798 5 1 239 964 67 270 763 29 997 78 9 71 66 5 44 2 20 22 2198 2209 2193 2185 2186 2193 2187 2187 112 2154 32 2190 2185 2185 2187 2185 2190 2190 413 2184 2190 2190 403 2195 2201 2197 2191 21 2096 80 2197 2175 2197 2197 2197 2197 2171 2190 2180 2189 2214 2209 2194 2210 2199 1 1 1 1 1 1 1 1 1 58 60 2 58 2 21 21 21 32 28 2 4 2 13 60 1 59 59 59 60 60 66 50 21 21 21 19 19 19 78 2199 2202 998 2202 2216 2218 10 2210 2199 2220 2222 12 12 2212 86 26 1 60 32 1 28 1 55 1 4 76 18 60 66 26 26 8 8 2141 2139 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 // SPDX-License-Identifier: GPL-2.0 /* * drivers/usb/core/driver.c - most of the driver model stuff for usb * * (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de> * * based on drivers/usb/usb.c which had the following copyrights: * (C) Copyright Linus Torvalds 1999 * (C) Copyright Johannes Erdfelt 1999-2001 * (C) Copyright Andreas Gal 1999 * (C) Copyright Gregory P. Smith 1999 * (C) Copyright Deti Fliegl 1999 (new USB architecture) * (C) Copyright Randy Dunlap 2000 * (C) Copyright David Brownell 2000-2004 * (C) Copyright Yggdrasil Computing, Inc. 2000 * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * * Released under the GPLv2 only. * * NOTE! This is not actually a driver at all, rather this is * just a collection of helper routines that implement the * matching, probing, releasing, suspending and resuming for * real drivers. * */ #include <linux/device.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/usb.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> #include "usb.h" /* * Adds a new dynamic USBdevice ID to this driver, * and cause the driver to probe for all devices again. */ ssize_t usb_store_new_id(struct usb_dynids *dynids, const struct usb_device_id *id_table, struct device_driver *driver, const char *buf, size_t count) { struct usb_dynid *dynid; u32 idVendor = 0; u32 idProduct = 0; unsigned int bInterfaceClass = 0; u32 refVendor, refProduct; int fields = 0; int retval = 0; fields = sscanf(buf, "%x %x %x %x %x", &idVendor, &idProduct, &bInterfaceClass, &refVendor, &refProduct); if (fields < 2) return -EINVAL; dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; INIT_LIST_HEAD(&dynid->node); dynid->id.idVendor = idVendor; dynid->id.idProduct = idProduct; dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; if (fields > 2 && bInterfaceClass) { if (bInterfaceClass > 255) { retval = -EINVAL; goto fail; } dynid->id.bInterfaceClass = (u8)bInterfaceClass; dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS; } if (fields > 4) { const struct usb_device_id *id = id_table; if (!id) { retval = -ENODEV; goto fail; } for (; id->match_flags; id++) if (id->idVendor == refVendor && id->idProduct == refProduct) break; if (id->match_flags) { dynid->id.driver_info = id->driver_info; } else { retval = -ENODEV; goto fail; } } mutex_lock(&usb_dynids_lock); list_add_tail(&dynid->node, &dynids->list); mutex_unlock(&usb_dynids_lock); retval = driver_attach(driver); if (retval) return retval; return count; fail: kfree(dynid); return retval; } EXPORT_SYMBOL_GPL(usb_store_new_id); ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf) { struct usb_dynid *dynid; size_t count = 0; guard(mutex)(&usb_dynids_lock); list_for_each_entry(dynid, &dynids->list, node) if (dynid->id.bInterfaceClass != 0) count += sysfs_emit_at(&buf[count], count, "%04x %04x %02x\n", dynid->id.idVendor, dynid->id.idProduct, dynid->id.bInterfaceClass); else count += sysfs_emit_at(&buf[count], count, "%04x %04x\n", dynid->id.idVendor, dynid->id.idProduct); return count; } EXPORT_SYMBOL_GPL(usb_show_dynids); static ssize_t new_id_show(struct device_driver *driver, char *buf) { struct usb_driver *usb_drv = to_usb_driver(driver); return usb_show_dynids(&usb_drv->dynids, buf); } static ssize_t new_id_store(struct device_driver *driver, const char *buf, size_t count) { struct usb_driver *usb_drv = to_usb_driver(driver); return usb_store_new_id(&usb_drv->dynids, usb_drv->id_table, driver, buf, count); } static DRIVER_ATTR_RW(new_id); /* * Remove a USB device ID from this driver */ static ssize_t remove_id_store(struct device_driver *driver, const char *buf, size_t count) { struct usb_dynid *dynid, *n; struct usb_driver *usb_driver = to_usb_driver(driver); u32 idVendor; u32 idProduct; int fields; fields = sscanf(buf, "%x %x", &idVendor, &idProduct); if (fields < 2) return -EINVAL; guard(mutex)(&usb_dynids_lock); list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) { struct usb_device_id *id = &dynid->id; if ((id->idVendor == idVendor) && (id->idProduct == idProduct)) { list_del(&dynid->node); kfree(dynid); break; } } return count; } static ssize_t remove_id_show(struct device_driver *driver, char *buf) { return new_id_show(driver, buf); } static DRIVER_ATTR_RW(remove_id); static int usb_create_newid_files(struct usb_driver *usb_drv) { int error = 0; if (usb_drv->no_dynamic_id) goto exit; if (usb_drv->probe != NULL) { error = driver_create_file(&usb_drv->driver, &driver_attr_new_id); if (error == 0) { error = driver_create_file(&usb_drv->driver, &driver_attr_remove_id); if (error) driver_remove_file(&usb_drv->driver, &driver_attr_new_id); } } exit: return error; } static void usb_remove_newid_files(struct usb_driver *usb_drv) { if (usb_drv->no_dynamic_id) return; if (usb_drv->probe != NULL) { driver_remove_file(&usb_drv->driver, &driver_attr_remove_id); driver_remove_file(&usb_drv->driver, &driver_attr_new_id); } } static void usb_free_dynids(struct usb_driver *usb_drv) { struct usb_dynid *dynid, *n; guard(mutex)(&usb_dynids_lock); list_for_each_entry_safe(dynid, n, &usb_drv->dynids.list, node) { list_del(&dynid->node); kfree(dynid); } } static const struct usb_device_id *usb_match_dynamic_id(struct usb_interface *intf, const struct usb_driver *drv) { struct usb_dynid *dynid; guard(mutex)(&usb_dynids_lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (usb_match_one_id(intf, &dynid->id)) { return &dynid->id; } } return NULL; } /* called from driver core with dev locked */ static int usb_probe_device(struct device *dev) { struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); struct usb_device *udev = to_usb_device(dev); int error = 0; dev_dbg(dev, "%s\n", __func__); /* TODO: Add real matching code */ /* The device should always appear to be in use * unless the driver supports autosuspend. */ if (!udriver->supports_autosuspend) error = usb_autoresume_device(udev); if (error) return error; if (udriver->generic_subclass) error = usb_generic_driver_probe(udev); if (error) return error; /* Probe the USB device with the driver in hand, but only * defer to a generic driver in case the current USB * device driver has an id_table or a match function; i.e., * when the device driver was explicitly matched against * a device. * * If the device driver does not have either of these, * then we assume that it can bind to any device and is * not truly a more specialized/non-generic driver, so a * return value of -ENODEV should not force the device * to be handled by the generic USB driver, as there * can still be another, more specialized, device driver. * * This accommodates the usbip driver. * * TODO: What if, in the future, there are multiple * specialized USB device drivers for a particular device? * In such cases, there is a need to try all matching * specialised device drivers prior to setting the * use_generic_driver bit. */ if (udriver->probe) error = udriver->probe(udev); else if (!udriver->generic_subclass) error = -EINVAL; if (error == -ENODEV && udriver != &usb_generic_driver && (udriver->id_table || udriver->match)) { udev->use_generic_driver = 1; return -EPROBE_DEFER; } return error; } /* called from driver core with dev locked */ static int usb_unbind_device(struct device *dev) { struct usb_device *udev = to_usb_device(dev); struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); if (udriver->disconnect) udriver->disconnect(udev); if (udriver->generic_subclass) usb_generic_driver_disconnect(udev); if (!udriver->supports_autosuspend) usb_autosuspend_device(udev); return 0; } /* called from driver core with dev locked */ static int usb_probe_interface(struct device *dev) { struct usb_driver *driver = to_usb_driver(dev->driver); struct usb_interface *intf = to_usb_interface(dev); struct usb_device *udev = interface_to_usbdev(intf); const struct usb_device_id *id; int error = -ENODEV; int lpm_disable_error = -ENODEV; dev_dbg(dev, "%s\n", __func__); intf->needs_binding = 0; if (usb_device_is_owned(udev)) return error; if (udev->authorized == 0) { dev_err(&intf->dev, "Device is not authorized for usage\n"); return error; } else if (intf->authorized == 0) { dev_err(&intf->dev, "Interface %d is not authorized for usage\n", intf->altsetting->desc.bInterfaceNumber); return error; } id = usb_match_dynamic_id(intf, driver); if (!id) id = usb_match_id(intf, driver->id_table); if (!id) return error; dev_dbg(dev, "%s - got id\n", __func__); error = usb_autoresume_device(udev); if (error) return error; intf->condition = USB_INTERFACE_BINDING; /* Probed interfaces are initially active. They are * runtime-PM-enabled only if the driver has autosuspend support. * They are sensitive to their children's power states. */ pm_runtime_set_active(dev); pm_suspend_ignore_children(dev, false); if (driver->supports_autosuspend) pm_runtime_enable(dev); /* If the new driver doesn't allow hub-initiated LPM, and we can't * disable hub-initiated LPM, then fail the probe. * * Otherwise, leaving LPM enabled should be harmless, because the * endpoint intervals should remain the same, and the U1/U2 timeouts * should remain the same. * * If we need to install alt setting 0 before probe, or another alt * setting during probe, that should also be fine. usb_set_interface() * will attempt to disable LPM, and fail if it can't disable it. */ if (driver->disable_hub_initiated_lpm) { lpm_disable_error = usb_unlocked_disable_lpm(udev); if (lpm_disable_error) { dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n", __func__, driver->name); error = lpm_disable_error; goto err; } } /* Carry out a deferred switch to altsetting 0 */ if (intf->needs_altsetting0) { error = usb_set_interface(udev, intf->altsetting[0]. desc.bInterfaceNumber, 0); if (error < 0) goto err; intf->needs_altsetting0 = 0; } error = driver->probe(intf, id); if (error) goto err; intf->condition = USB_INTERFACE_BOUND; /* If the LPM disable succeeded, balance the ref counts. */ if (!lpm_disable_error) usb_unlocked_enable_lpm(udev); usb_autosuspend_device(udev); return error; err: usb_set_intfdata(intf, NULL); intf->needs_remote_wakeup = 0; intf->condition = USB_INTERFACE_UNBOUND; /* If the LPM disable succeeded, balance the ref counts. */ if (!lpm_disable_error) usb_unlocked_enable_lpm(udev); /* Unbound interfaces are always runtime-PM-disabled and -suspended */ if (driver->supports_autosuspend) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); usb_autosuspend_device(udev); return error; } /* called from driver core with dev locked */ static int usb_unbind_interface(struct device *dev) { struct usb_driver *driver = to_usb_driver(dev->driver); struct usb_interface *intf = to_usb_interface(dev); struct usb_host_endpoint *ep, **eps = NULL; struct usb_device *udev; int i, j, error, r; int lpm_disable_error = -ENODEV; intf->condition = USB_INTERFACE_UNBINDING; /* Autoresume for set_interface call below */ udev = interface_to_usbdev(intf); error = usb_autoresume_device(udev); /* If hub-initiated LPM policy may change, attempt to disable LPM until * the driver is unbound. If LPM isn't disabled, that's fine because it * wouldn't be enabled unless all the bound interfaces supported * hub-initiated LPM. */ if (driver->disable_hub_initiated_lpm) lpm_disable_error = usb_unlocked_disable_lpm(udev); /* * Terminate all URBs for this interface unless the driver * supports "soft" unbinding and the device is still present. */ if (!driver->soft_unbind || udev->state == USB_STATE_NOTATTACHED) usb_disable_interface(udev, intf, false); driver->disconnect(intf); /* Free streams */ for (i = 0, j = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep = &intf->cur_altsetting->endpoint[i]; if (ep->streams == 0) continue; if (j == 0) { eps = kmalloc_array(USB_MAXENDPOINTS, sizeof(void *), GFP_KERNEL); if (!eps) break; } eps[j++] = ep; } if (j) { usb_free_streams(intf, eps, j, GFP_KERNEL); kfree(eps); } /* Reset other interface state. * We cannot do a Set-Interface if the device is suspended or * if it is prepared for a system sleep (since installing a new * altsetting means creating new endpoint device entries). * When either of these happens, defer the Set-Interface. */ if (intf->cur_altsetting->desc.bAlternateSetting == 0) { /* Already in altsetting 0 so skip Set-Interface. * Just re-enable it without affecting the endpoint toggles. */ usb_enable_interface(udev, intf, false); } else if (!error && !intf->dev.power.is_prepared) { r = usb_set_interface(udev, intf->altsetting[0]. desc.bInterfaceNumber, 0); if (r < 0) intf->needs_altsetting0 = 1; } else { intf->needs_altsetting0 = 1; } usb_set_intfdata(intf, NULL); intf->condition = USB_INTERFACE_UNBOUND; intf->needs_remote_wakeup = 0; /* Attempt to re-enable USB3 LPM, if the disable succeeded. */ if (!lpm_disable_error) usb_unlocked_enable_lpm(udev); /* Unbound interfaces are always runtime-PM-disabled and -suspended */ if (driver->supports_autosuspend) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); if (!error) usb_autosuspend_device(udev); return 0; } static void usb_shutdown_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_driver *driver; if (!dev->driver) return; driver = to_usb_driver(dev->driver); if (driver->shutdown) driver->shutdown(intf); } /** * usb_driver_claim_interface - bind a driver to an interface * @driver: the driver to be bound * @iface: the interface to which it will be bound; must be in the * usb device's active configuration * @data: driver data associated with that interface * * This is used by usb device drivers that need to claim more than one * interface on a device when probing (audio and acm are current examples). * No device driver should directly modify internal usb_interface or * usb_device structure members. * * Callers must own the device lock, so driver probe() entries don't need * extra locking, but other call contexts may need to explicitly claim that * lock. * * Return: 0 on success. */ int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void *data) { struct device *dev; int retval = 0; if (!iface) return -ENODEV; dev = &iface->dev; if (dev->driver) return -EBUSY; /* reject claim if interface is not authorized */ if (!iface->authorized) return -ENODEV; dev->driver = &driver->driver; usb_set_intfdata(iface, data); iface->needs_binding = 0; iface->condition = USB_INTERFACE_BOUND; /* Claimed interfaces are initially inactive (suspended) and * runtime-PM-enabled, but only if the driver has autosuspend * support. Otherwise they are marked active, to prevent the * device from being autosuspended, but left disabled. In either * case they are sensitive to their children's power states. */ pm_suspend_ignore_children(dev, false); if (driver->supports_autosuspend) pm_runtime_enable(dev); else pm_runtime_set_active(dev); /* if interface was already added, bind now; else let * the future device_add() bind it, bypassing probe() */ if (device_is_registered(dev)) retval = device_bind_driver(dev); if (retval) { dev->driver = NULL; usb_set_intfdata(iface, NULL); iface->needs_remote_wakeup = 0; iface->condition = USB_INTERFACE_UNBOUND; /* * Unbound interfaces are always runtime-PM-disabled * and runtime-PM-suspended */ if (driver->supports_autosuspend) pm_runtime_disable(dev); pm_runtime_set_suspended(dev); } return retval; } EXPORT_SYMBOL_GPL(usb_driver_claim_interface); /** * usb_driver_release_interface - unbind a driver from an interface * @driver: the driver to be unbound * @iface: the interface from which it will be unbound * * This can be used by drivers to release an interface without waiting * for their disconnect() methods to be called. In typical cases this * also causes the driver disconnect() method to be called. * * This call is synchronous, and may not be used in an interrupt context. * Callers must own the device lock, so driver disconnect() entries don't * need extra locking, but other call contexts may need to explicitly claim * that lock. */ void usb_driver_release_interface(struct usb_driver *driver, struct usb_interface *iface) { struct device *dev = &iface->dev; /* this should never happen, don't release something that's not ours */ if (!dev->driver || dev->driver != &driver->driver) return; /* don't release from within disconnect() */ if (iface->condition != USB_INTERFACE_BOUND) return; iface->condition = USB_INTERFACE_UNBINDING; /* Release via the driver core only if the interface * has already been registered */ if (device_is_registered(dev)) { device_release_driver(dev); } else { device_lock(dev); usb_unbind_interface(dev); dev->driver = NULL; device_unlock(dev); } } EXPORT_SYMBOL_GPL(usb_driver_release_interface); /* returns 0 if no match, 1 if match */ int usb_match_device(struct usb_device *dev, const struct usb_device_id *id) { if ((id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && id->idVendor != le16_to_cpu(dev->descriptor.idVendor)) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_PRODUCT) && id->idProduct != le16_to_cpu(dev->descriptor.idProduct)) return 0; /* No need to test id->bcdDevice_lo != 0, since 0 is never greater than any unsigned number. */ if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO) && (id->bcdDevice_lo > le16_to_cpu(dev->descriptor.bcdDevice))) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI) && (id->bcdDevice_hi < le16_to_cpu(dev->descriptor.bcdDevice))) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_CLASS) && (id->bDeviceClass != dev->descriptor.bDeviceClass)) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_SUBCLASS) && (id->bDeviceSubClass != dev->descriptor.bDeviceSubClass)) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_DEV_PROTOCOL) && (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol)) return 0; return 1; } /* returns 0 if no match, 1 if match */ int usb_match_one_id_intf(struct usb_device *dev, struct usb_host_interface *intf, const struct usb_device_id *id) { /* The interface class, subclass, protocol and number should never be * checked for a match if the device class is Vendor Specific, * unless the match record specifies the Vendor ID. */ if (dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC && !(id->match_flags & USB_DEVICE_ID_MATCH_VENDOR) && (id->match_flags & (USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS | USB_DEVICE_ID_MATCH_INT_PROTOCOL | USB_DEVICE_ID_MATCH_INT_NUMBER))) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_CLASS) && (id->bInterfaceClass != intf->desc.bInterfaceClass)) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_SUBCLASS) && (id->bInterfaceSubClass != intf->desc.bInterfaceSubClass)) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_PROTOCOL) && (id->bInterfaceProtocol != intf->desc.bInterfaceProtocol)) return 0; if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER) && (id->bInterfaceNumber != intf->desc.bInterfaceNumber)) return 0; return 1; } /* returns 0 if no match, 1 if match */ int usb_match_one_id(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_host_interface *intf; struct usb_device *dev; /* proc_connectinfo in devio.c may call us with id == NULL. */ if (id == NULL) return 0; intf = interface->cur_altsetting; dev = interface_to_usbdev(interface); if (!usb_match_device(dev, id)) return 0; return usb_match_one_id_intf(dev, intf, id); } EXPORT_SYMBOL_GPL(usb_match_one_id); /** * usb_match_id - find first usb_device_id matching device or interface * @interface: the interface of interest * @id: array of usb_device_id structures, terminated by zero entry * * usb_match_id searches an array of usb_device_id's and returns * the first one matching the device or interface, or null. * This is used when binding (or rebinding) a driver to an interface. * Most USB device drivers will use this indirectly, through the usb core, * but some layered driver frameworks use it directly. * These device tables are exported with MODULE_DEVICE_TABLE, through * modutils, to support the driver loading functionality of USB hotplugging. * * Return: The first matching usb_device_id, or %NULL. * * What Matches: * * The "match_flags" element in a usb_device_id controls which * members are used. If the corresponding bit is set, the * value in the device_id must match its corresponding member * in the device or interface descriptor, or else the device_id * does not match. * * "driver_info" is normally used only by device drivers, * but you can create a wildcard "matches anything" usb_device_id * as a driver's "modules.usbmap" entry if you provide an id with * only a nonzero "driver_info" field. If you do this, the USB device * driver's probe() routine should use additional intelligence to * decide whether to bind to the specified interface. * * What Makes Good usb_device_id Tables: * * The match algorithm is very simple, so that intelligence in * driver selection must come from smart driver id records. * Unless you have good reasons to use another selection policy, * provide match elements only in related groups, and order match * specifiers from specific to general. Use the macros provided * for that purpose if you can. * * The most specific match specifiers use device descriptor * data. These are commonly used with product-specific matches; * the USB_DEVICE macro lets you provide vendor and product IDs, * and you can also match against ranges of product revisions. * These are widely used for devices with application or vendor * specific bDeviceClass values. * * Matches based on device class/subclass/protocol specifications * are slightly more general; use the USB_DEVICE_INFO macro, or * its siblings. These are used with single-function devices * where bDeviceClass doesn't specify that each interface has * its own class. * * Matches based on interface class/subclass/protocol are the * most general; they let drivers bind to any interface on a * multiple-function device. Use the USB_INTERFACE_INFO * macro, or its siblings, to match class-per-interface style * devices (as recorded in bInterfaceClass). * * Note that an entry created by USB_INTERFACE_INFO won't match * any interface if the device class is set to Vendor-Specific. * This is deliberate; according to the USB spec the meanings of * the interface class/subclass/protocol for these devices are also * vendor-specific, and hence matching against a standard product * class wouldn't work anyway. If you really want to use an * interface-based match for such a device, create a match record * that also specifies the vendor ID. (Unforunately there isn't a * standard macro for creating records like this.) * * Within those groups, remember that not all combinations are * meaningful. For example, don't give a product version range * without vendor and product IDs; or specify a protocol without * its associated class and subclass. */ const struct usb_device_id *usb_match_id(struct usb_interface *interface, const struct usb_device_id *id) { /* proc_connectinfo in devio.c may call us with id == NULL. */ if (id == NULL) return NULL; /* It is important to check that id->driver_info is nonzero, since an entry that is all zeroes except for a nonzero id->driver_info is the way to create an entry that indicates that the driver want to examine every device and interface. */ for (; id->idVendor || id->idProduct || id->bDeviceClass || id->bInterfaceClass || id->driver_info; id++) { if (usb_match_one_id(interface, id)) return id; } return NULL; } EXPORT_SYMBOL_GPL(usb_match_id); const struct usb_device_id *usb_device_match_id(struct usb_device *udev, const struct usb_device_id *id) { if (!id) return NULL; for (; id->idVendor || id->idProduct ; id++) { if (usb_match_device(udev, id)) return id; } return NULL; } EXPORT_SYMBOL_GPL(usb_device_match_id); bool usb_driver_applicable(struct usb_device *udev, const struct usb_device_driver *udrv) { if (udrv->id_table && udrv->match) return usb_device_match_id(udev, udrv->id_table) != NULL && udrv->match(udev); if (udrv->id_table) return usb_device_match_id(udev, udrv->id_table) != NULL; if (udrv->match) return udrv->match(udev); return false; } static int usb_device_match(struct device *dev, const struct device_driver *drv) { /* devices and interfaces are handled separately */ if (is_usb_device(dev)) { struct usb_device *udev; const struct usb_device_driver *udrv; /* interface drivers never match devices */ if (!is_usb_device_driver(drv)) return 0; udev = to_usb_device(dev); udrv = to_usb_device_driver(drv); /* If the device driver under consideration does not have a * id_table or a match function, then let the driver's probe * function decide. */ if (!udrv->id_table && !udrv->match) return 1; return usb_driver_applicable(udev, udrv); } else if (is_usb_interface(dev)) { struct usb_interface *intf; const struct usb_driver *usb_drv; const struct usb_device_id *id; /* device drivers never match interfaces */ if (is_usb_device_driver(drv)) return 0; intf = to_usb_interface(dev); usb_drv = to_usb_driver(drv); id = usb_match_id(intf, usb_drv->id_table); if (id) return 1; id = usb_match_dynamic_id(intf, usb_drv); if (id) return 1; } return 0; } static int usb_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_device *usb_dev; if (is_usb_device(dev)) { usb_dev = to_usb_device(dev); } else if (is_usb_interface(dev)) { const struct usb_interface *intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); } else { return 0; } if (usb_dev->devnum < 0) { /* driver is often null here; dev_dbg() would oops */ pr_debug("usb %s: already deleted?\n", dev_name(dev)); return -ENODEV; } if (!usb_dev->bus) { pr_debug("usb %s: bus removed?\n", dev_name(dev)); return -ENODEV; } /* per-device configurations are common */ if (add_uevent_var(env, "PRODUCT=%x/%x/%x", le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct), le16_to_cpu(usb_dev->descriptor.bcdDevice))) return -ENOMEM; /* class-based driver binding models */ if (add_uevent_var(env, "TYPE=%d/%d/%d", usb_dev->descriptor.bDeviceClass, usb_dev->descriptor.bDeviceSubClass, usb_dev->descriptor.bDeviceProtocol)) return -ENOMEM; return 0; } static int __usb_bus_reprobe_drivers(struct device *dev, void *data) { struct usb_device_driver *new_udriver = data; struct usb_device *udev; int ret; /* Don't reprobe if current driver isn't usb_generic_driver */ if (dev->driver != &usb_generic_driver.driver) return 0; udev = to_usb_device(dev); if (!usb_driver_applicable(udev, new_udriver)) return 0; ret = device_reprobe(dev); if (ret && ret != -EPROBE_DEFER) dev_err(dev, "Failed to reprobe device (error %d)\n", ret); return 0; } bool is_usb_device_driver(const struct device_driver *drv) { return drv->probe == usb_probe_device; } /** * usb_register_device_driver - register a USB device (not interface) driver * @new_udriver: USB operations for the device driver * @owner: module owner of this driver. * * Registers a USB device driver with the USB core. The list of * unattached devices will be rescanned whenever a new driver is * added, allowing the new driver to attach to any recognized devices. * * Return: A negative error code on failure and 0 on success. */ int usb_register_device_driver(struct usb_device_driver *new_udriver, struct module *owner) { int retval = 0; if (usb_disabled()) return -ENODEV; new_udriver->driver.name = new_udriver->name; new_udriver->driver.bus = &usb_bus_type; new_udriver->driver.probe = usb_probe_device; new_udriver->driver.remove = usb_unbind_device; new_udriver->driver.owner = owner; new_udriver->driver.dev_groups = new_udriver->dev_groups; retval = driver_register(&new_udriver->driver); if (!retval) { pr_info("%s: registered new device driver %s\n", usbcore_name, new_udriver->name); /* * Check whether any device could be better served with * this new driver */ bus_for_each_dev(&usb_bus_type, NULL, new_udriver, __usb_bus_reprobe_drivers); } else { pr_err("%s: error %d registering device driver %s\n", usbcore_name, retval, new_udriver->name); } return retval; } EXPORT_SYMBOL_GPL(usb_register_device_driver); /** * usb_deregister_device_driver - unregister a USB device (not interface) driver * @udriver: USB operations of the device driver to unregister * Context: must be able to sleep * * Unlinks the specified driver from the internal USB driver list. */ void usb_deregister_device_driver(struct usb_device_driver *udriver) { pr_info("%s: deregistering device driver %s\n", usbcore_name, udriver->name); driver_unregister(&udriver->driver); } EXPORT_SYMBOL_GPL(usb_deregister_device_driver); /** * usb_register_driver - register a USB interface driver * @new_driver: USB operations for the interface driver * @owner: module owner of this driver. * @mod_name: module name string * * Registers a USB interface driver with the USB core. The list of * unattached interfaces will be rescanned whenever a new driver is * added, allowing the new driver to attach to any recognized interfaces. * * Return: A negative error code on failure and 0 on success. * * NOTE: if you want your driver to use the USB major number, you must call * usb_register_dev() to enable that functionality. This function no longer * takes care of that. */ int usb_register_driver(struct usb_driver *new_driver, struct module *owner, const char *mod_name) { int retval = 0; if (usb_disabled()) return -ENODEV; new_driver->driver.name = new_driver->name; new_driver->driver.bus = &usb_bus_type; new_driver->driver.probe = usb_probe_interface; new_driver->driver.remove = usb_unbind_interface; new_driver->driver.shutdown = usb_shutdown_interface; new_driver->driver.owner = owner; new_driver->driver.mod_name = mod_name; new_driver->driver.dev_groups = new_driver->dev_groups; INIT_LIST_HEAD(&new_driver->dynids.list); retval = driver_register(&new_driver->driver); if (retval) goto out; retval = usb_create_newid_files(new_driver); if (retval) goto out_newid; pr_info("%s: registered new interface driver %s\n", usbcore_name, new_driver->name); return 0; out_newid: driver_unregister(&new_driver->driver); out: pr_err("%s: error %d registering interface driver %s\n", usbcore_name, retval, new_driver->name); return retval; } EXPORT_SYMBOL_GPL(usb_register_driver); /** * usb_deregister - unregister a USB interface driver * @driver: USB operations of the interface driver to unregister * Context: must be able to sleep * * Unlinks the specified driver from the internal USB driver list. * * NOTE: If you called usb_register_dev(), you still need to call * usb_deregister_dev() to clean up your driver's allocated minor numbers, * this * call will no longer do it for you. */ void usb_deregister(struct usb_driver *driver) { pr_info("%s: deregistering interface driver %s\n", usbcore_name, driver->name); usb_remove_newid_files(driver); driver_unregister(&driver->driver); usb_free_dynids(driver); } EXPORT_SYMBOL_GPL(usb_deregister); /* Forced unbinding of a USB interface driver, either because * it doesn't support pre_reset/post_reset/reset_resume or * because it doesn't support suspend/resume. * * The caller must hold @intf's device's lock, but not @intf's lock. */ void usb_forced_unbind_intf(struct usb_interface *intf) { struct usb_driver *driver = to_usb_driver(intf->dev.driver); dev_dbg(&intf->dev, "forced unbind\n"); usb_driver_release_interface(driver, intf); /* Mark the interface for later rebinding */ intf->needs_binding = 1; } /* * Unbind drivers for @udev's marked interfaces. These interfaces have * the needs_binding flag set, for example by usb_resume_interface(). * * The caller must hold @udev's device lock. */ static void unbind_marked_interfaces(struct usb_device *udev) { struct usb_host_config *config; int i; struct usb_interface *intf; config = udev->actconfig; if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { intf = config->interface[i]; if (intf->dev.driver && intf->needs_binding) usb_forced_unbind_intf(intf); } } } /* Delayed forced unbinding of a USB interface driver and scan * for rebinding. * * The caller must hold @intf's device's lock, but not @intf's lock. * * Note: Rebinds will be skipped if a system sleep transition is in * progress and the PM "complete" callback hasn't occurred yet. */ static void usb_rebind_intf(struct usb_interface *intf) { int rc; /* Delayed unbind of an existing driver */ if (intf->dev.driver) usb_forced_unbind_intf(intf); /* Try to rebind the interface */ if (!intf->dev.power.is_prepared) { intf->needs_binding = 0; rc = device_attach(&intf->dev); if (rc < 0 && rc != -EPROBE_DEFER) dev_warn(&intf->dev, "rebind failed: %d\n", rc); } } /* * Rebind drivers to @udev's marked interfaces. These interfaces have * the needs_binding flag set. * * The caller must hold @udev's device lock. */ static void rebind_marked_interfaces(struct usb_device *udev) { struct usb_host_config *config; int i; struct usb_interface *intf; config = udev->actconfig; if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { intf = config->interface[i]; if (intf->needs_binding) usb_rebind_intf(intf); } } } /* * Unbind all of @udev's marked interfaces and then rebind all of them. * This ordering is necessary because some drivers claim several interfaces * when they are first probed. * * The caller must hold @udev's device lock. */ void usb_unbind_and_rebind_marked_interfaces(struct usb_device *udev) { unbind_marked_interfaces(udev); rebind_marked_interfaces(udev); } #ifdef CONFIG_PM /* Unbind drivers for @udev's interfaces that don't support suspend/resume * There is no check for reset_resume here because it can be determined * only during resume whether reset_resume is needed. * * The caller must hold @udev's device lock. */ static void unbind_no_pm_drivers_interfaces(struct usb_device *udev) { struct usb_host_config *config; int i; struct usb_interface *intf; struct usb_driver *drv; config = udev->actconfig; if (config) { for (i = 0; i < config->desc.bNumInterfaces; ++i) { intf = config->interface[i]; if (intf->dev.driver) { drv = to_usb_driver(intf->dev.driver); if (!drv->suspend || !drv->resume) usb_forced_unbind_intf(intf); } } } } static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) { struct usb_device_driver *udriver; int status = 0; if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) goto done; /* For devices that don't have a driver, we do a generic suspend. */ if (udev->dev.driver) udriver = to_usb_device_driver(udev->dev.driver); else { udev->do_remote_wakeup = 0; udriver = &usb_generic_driver; } if (udriver->suspend) status = udriver->suspend(udev, msg); if (status == 0 && udriver->generic_subclass) status = usb_generic_driver_suspend(udev, msg); done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); return status; } static int usb_resume_device(struct usb_device *udev, pm_message_t msg) { struct usb_device_driver *udriver; int status = 0; if (udev->state == USB_STATE_NOTATTACHED) goto done; /* Can't resume it if it doesn't have a driver. */ if (udev->dev.driver == NULL) { status = -ENOTCONN; goto done; } /* Non-root devices on a full/low-speed bus must wait for their * companion high-speed root hub, in case a handoff is needed. */ if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion) device_pm_wait_for_dev(&udev->dev, &udev->bus->hs_companion->root_hub->dev); if (udev->quirks & USB_QUIRK_RESET_RESUME) udev->reset_resume = 1; udriver = to_usb_device_driver(udev->dev.driver); if (udriver->generic_subclass) status = usb_generic_driver_resume(udev, msg); if (status == 0 && udriver->resume) status = udriver->resume(udev, msg); done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); return status; } static int usb_suspend_interface(struct usb_device *udev, struct usb_interface *intf, pm_message_t msg) { struct usb_driver *driver; int status = 0; if (udev->state == USB_STATE_NOTATTACHED || intf->condition == USB_INTERFACE_UNBOUND) goto done; driver = to_usb_driver(intf->dev.driver); /* at this time we know the driver supports suspend */ status = driver->suspend(intf, msg); if (status && !PMSG_IS_AUTO(msg)) dev_err(&intf->dev, "suspend error %d\n", status); done: dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status); return status; } static int usb_resume_interface(struct usb_device *udev, struct usb_interface *intf, pm_message_t msg, int reset_resume) { struct usb_driver *driver; int status = 0; if (udev->state == USB_STATE_NOTATTACHED) goto done; /* Don't let autoresume interfere with unbinding */ if (intf->condition == USB_INTERFACE_UNBINDING) goto done; /* Can't resume it if it doesn't have a driver. */ if (intf->condition == USB_INTERFACE_UNBOUND) { /* Carry out a deferred switch to altsetting 0 */ if (intf->needs_altsetting0 && !intf->dev.power.is_prepared) { usb_set_interface(udev, intf->altsetting[0]. desc.bInterfaceNumber, 0); intf->needs_altsetting0 = 0; } goto done; } /* Don't resume if the interface is marked for rebinding */ if (intf->needs_binding) goto done; driver = to_usb_driver(intf->dev.driver); if (reset_resume) { if (driver->reset_resume) { status = driver->reset_resume(intf); if (status) dev_err(&intf->dev, "%s error %d\n", "reset_resume", status); } else { intf->needs_binding = 1; dev_dbg(&intf->dev, "no reset_resume for driver %s?\n", driver->name); } } else { status = driver->resume(intf); if (status) dev_err(&intf->dev, "resume error %d\n", status); } done: dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status); /* Later we will unbind the driver and/or reprobe, if necessary */ return status; } /** * usb_suspend_both - suspend a USB device and its interfaces * @udev: the usb_device to suspend * @msg: Power Management message describing this state transition * * This is the central routine for suspending USB devices. It calls the * suspend methods for all the interface drivers in @udev and then calls * the suspend method for @udev itself. When the routine is called in * autosuspend, if an error occurs at any stage, all the interfaces * which were suspended are resumed so that they remain in the same * state as the device, but when called from system sleep, all error * from suspend methods of interfaces and the non-root-hub device itself * are simply ignored, so all suspended interfaces are only resumed * to the device's state when @udev is root-hub and its suspend method * returns failure. * * Autosuspend requests originating from a child device or an interface * driver may be made without the protection of @udev's device lock, but * all other suspend calls will hold the lock. Usbcore will insure that * method calls do not arrive during bind, unbind, or reset operations. * However drivers must be prepared to handle suspend calls arriving at * unpredictable times. * * This routine can run only in process context. * * Return: 0 if the suspend succeeded. */ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) { int status = 0; int i = 0, n = 0; struct usb_interface *intf; if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) goto done; /* Suspend all the interfaces and then udev itself */ if (udev->actconfig) { n = udev->actconfig->desc.bNumInterfaces; for (i = n - 1; i >= 0; --i) { intf = udev->actconfig->interface[i]; status = usb_suspend_interface(udev, intf, msg); /* Ignore errors during system sleep transitions */ if (!PMSG_IS_AUTO(msg)) status = 0; if (status != 0) break; } } if (status == 0) { status = usb_suspend_device(udev, msg); /* * Ignore errors from non-root-hub devices during * system sleep transitions. For the most part, * these devices should go to low power anyway when * the entire bus is suspended. */ if (udev->parent && !PMSG_IS_AUTO(msg)) status = 0; /* * If the device is inaccessible, don't try to resume * suspended interfaces and just return the error. */ if (status && status != -EBUSY) { int err; u16 devstat; err = usb_get_std_status(udev, USB_RECIP_DEVICE, 0, &devstat); if (err) { dev_err(&udev->dev, "Failed to suspend device, error %d\n", status); goto done; } } } /* If the suspend failed, resume interfaces that did get suspended */ if (status != 0) { if (udev->actconfig) { msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); while (++i < n) { intf = udev->actconfig->interface[i]; usb_resume_interface(udev, intf, msg, 0); } } /* If the suspend succeeded then prevent any more URB submissions * and flush any outstanding URBs. */ } else { udev->can_submit = 0; for (i = 0; i < 16; ++i) { usb_hcd_flush_endpoint(udev, udev->ep_out[i]); usb_hcd_flush_endpoint(udev, udev->ep_in[i]); } } done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); return status; } /** * usb_resume_both - resume a USB device and its interfaces * @udev: the usb_device to resume * @msg: Power Management message describing this state transition * * This is the central routine for resuming USB devices. It calls the * resume method for @udev and then calls the resume methods for all * the interface drivers in @udev. * * Autoresume requests originating from a child device or an interface * driver may be made without the protection of @udev's device lock, but * all other resume calls will hold the lock. Usbcore will insure that * method calls do not arrive during bind, unbind, or reset operations. * However drivers must be prepared to handle resume calls arriving at * unpredictable times. * * This routine can run only in process context. * * Return: 0 on success. */ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) { int status = 0; int i; struct usb_interface *intf; if (udev->state == USB_STATE_NOTATTACHED) { status = -ENODEV; goto done; } udev->can_submit = 1; /* Resume the device */ if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume) status = usb_resume_device(udev, msg); /* Resume the interfaces */ if (status == 0 && udev->actconfig) { for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { intf = udev->actconfig->interface[i]; usb_resume_interface(udev, intf, msg, udev->reset_resume); } } usb_mark_last_busy(udev); done: dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); if (!status) udev->reset_resume = 0; return status; } static void choose_wakeup(struct usb_device *udev, pm_message_t msg) { int w; /* * For FREEZE/QUIESCE, disable remote wakeups so no interrupts get * generated. */ if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) { w = 0; } else { /* * Enable remote wakeup if it is allowed, even if no interface * drivers actually want it. */ w = device_may_wakeup(&udev->dev); } /* * If the device is autosuspended with the wrong wakeup setting, * autoresume now so the setting can be changed. */ if (udev->state == USB_STATE_SUSPENDED && w != udev->do_remote_wakeup) pm_runtime_resume(&udev->dev); udev->do_remote_wakeup = w; } /* The device lock is held by the PM core */ int usb_suspend(struct device *dev, pm_message_t msg) { struct usb_device *udev = to_usb_device(dev); int r; unbind_no_pm_drivers_interfaces(udev); /* From now on we are sure all drivers support suspend/resume * but not necessarily reset_resume() * so we may still need to unbind and rebind upon resume */ choose_wakeup(udev, msg); r = usb_suspend_both(udev, msg); if (r) return r; if (udev->quirks & USB_QUIRK_DISCONNECT_SUSPEND) usb_port_disable(udev); return 0; } /* The device lock is held by the PM core */ int usb_resume_complete(struct device *dev) { struct usb_device *udev = to_usb_device(dev); /* For PM complete calls, all we do is rebind interfaces * whose needs_binding flag is set */ if (udev->state != USB_STATE_NOTATTACHED) rebind_marked_interfaces(udev); return 0; } /* The device lock is held by the PM core */ int usb_resume(struct device *dev, pm_message_t msg) { struct usb_device *udev = to_usb_device(dev); int status; /* For all calls, take the device back to full power and * tell the PM core in case it was autosuspended previously. * Unbind the interfaces that will need rebinding later, * because they fail to support reset_resume. * (This can't be done in usb_resume_interface() * above because it doesn't own the right set of locks.) */ status = usb_resume_both(udev, msg); if (status == 0) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); unbind_marked_interfaces(udev); } /* Avoid PM error messages for devices disconnected while suspended * as we'll display regular disconnect messages just a bit later. */ if (status == -ENODEV || status == -ESHUTDOWN) status = 0; return status; } /** * usb_enable_autosuspend - allow a USB device to be autosuspended * @udev: the USB device which may be autosuspended * * This routine allows @udev to be autosuspended. An autosuspend won't * take place until the autosuspend_delay has elapsed and all the other * necessary conditions are satisfied. * * The caller must hold @udev's device lock. */ void usb_enable_autosuspend(struct usb_device *udev) { pm_runtime_allow(&udev->dev); } EXPORT_SYMBOL_GPL(usb_enable_autosuspend); /** * usb_disable_autosuspend - prevent a USB device from being autosuspended * @udev: the USB device which may not be autosuspended * * This routine prevents @udev from being autosuspended and wakes it up * if it is already autosuspended. * * The caller must hold @udev's device lock. */ void usb_disable_autosuspend(struct usb_device *udev) { pm_runtime_forbid(&udev->dev); } EXPORT_SYMBOL_GPL(usb_disable_autosuspend); /** * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces * @udev: the usb_device to autosuspend * * This routine should be called when a core subsystem is finished using * @udev and wants to allow it to autosuspend. Examples would be when * @udev's device file in usbfs is closed or after a configuration change. * * @udev's usage counter is decremented; if it drops to 0 and all the * interfaces are inactive then a delayed autosuspend will be attempted. * The attempt may fail (see autosuspend_check()). * * The caller must hold @udev's device lock. * * This routine can run only in process context. */ void usb_autosuspend_device(struct usb_device *udev) { int status; usb_mark_last_busy(udev); status = pm_runtime_put_sync_autosuspend(&udev->dev); dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&udev->dev.power.usage_count), status); } /** * usb_autoresume_device - immediately autoresume a USB device and its interfaces * @udev: the usb_device to autoresume * * This routine should be called when a core subsystem wants to use @udev * and needs to guarantee that it is not suspended. No autosuspend will * occur until usb_autosuspend_device() is called. (Note that this will * not prevent suspend events originating in the PM core.) Examples would * be when @udev's device file in usbfs is opened or when a remote-wakeup * request is received. * * @udev's usage counter is incremented to prevent subsequent autosuspends. * However if the autoresume fails then the usage counter is re-decremented. * * The caller must hold @udev's device lock. * * This routine can run only in process context. * * Return: 0 on success. A negative error code otherwise. */ int usb_autoresume_device(struct usb_device *udev) { int status; status = pm_runtime_resume_and_get(&udev->dev); dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&udev->dev.power.usage_count), status); if (status > 0) status = 0; return status; } /** * usb_autopm_put_interface - decrement a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be decremented * * This routine should be called by an interface driver when it is * finished using @intf and wants to allow it to autosuspend. A typical * example would be a character-device driver when its device file is * closed. * * The routine decrements @intf's usage counter. When the counter reaches * 0, a delayed autosuspend request for @intf's device is attempted. The * attempt may fail (see autosuspend_check()). * * This routine can run only in process context. */ void usb_autopm_put_interface(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); int status; usb_mark_last_busy(udev); status = pm_runtime_put_sync(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface); /** * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be decremented * * This routine does much the same thing as usb_autopm_put_interface(): * It decrements @intf's usage counter and schedules a delayed * autosuspend request if the counter is <= 0. The difference is that it * does not perform any synchronization; callers should hold a private * lock and handle all synchronization issues themselves. * * Typically a driver would call this routine during an URB's completion * handler, if no more URBs were pending. * * This routine can run in atomic context. */ void usb_autopm_put_interface_async(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); int status; usb_mark_last_busy(udev); status = pm_runtime_put(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); /** * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be decremented * * This routine decrements @intf's usage counter but does not carry out an * autosuspend. * * This routine can run in atomic context. */ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); usb_mark_last_busy(udev); pm_runtime_put_noidle(&intf->dev); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend); /** * usb_autopm_get_interface - increment a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be incremented * * This routine should be called by an interface driver when it wants to * use @intf and needs to guarantee that it is not suspended. In addition, * the routine prevents @intf from being autosuspended subsequently. (Note * that this will not prevent suspend events originating in the PM core.) * This prevention will persist until usb_autopm_put_interface() is called * or @intf is unbound. A typical example would be a character-device * driver when its device file is opened. * * @intf's usage counter is incremented to prevent subsequent autosuspends. * However if the autoresume fails then the counter is re-decremented. * * This routine can run only in process context. * * Return: 0 on success. */ int usb_autopm_get_interface(struct usb_interface *intf) { int status; status = pm_runtime_resume_and_get(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); if (status > 0) status = 0; return status; } EXPORT_SYMBOL_GPL(usb_autopm_get_interface); /** * usb_autopm_get_interface_async - increment a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be incremented * * This routine does much the same thing as * usb_autopm_get_interface(): It increments @intf's usage counter and * queues an autoresume request if the device is suspended. The * differences are that it does not perform any synchronization (callers * should hold a private lock and handle all synchronization issues * themselves), and it does not autoresume the device directly (it only * queues a request). After a successful call, the device may not yet be * resumed. * * This routine can run in atomic context. * * Return: 0 on success. A negative error code otherwise. */ int usb_autopm_get_interface_async(struct usb_interface *intf) { int status; status = pm_runtime_get(&intf->dev); if (status < 0 && status != -EINPROGRESS) pm_runtime_put_noidle(&intf->dev); dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", __func__, atomic_read(&intf->dev.power.usage_count), status); if (status > 0 || status == -EINPROGRESS) status = 0; return status; } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); /** * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be incremented * * This routine increments @intf's usage counter but does not carry out an * autoresume. * * This routine can run in atomic context. */ void usb_autopm_get_interface_no_resume(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); usb_mark_last_busy(udev); pm_runtime_get_noresume(&intf->dev); } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume); /* Internal routine to check whether we may autosuspend a device. */ static int autosuspend_check(struct usb_device *udev) { int w, i; struct usb_interface *intf; if (udev->state == USB_STATE_NOTATTACHED) return -ENODEV; /* Fail if autosuspend is disabled, or any interfaces are in use, or * any interface drivers require remote wakeup but it isn't available. */ w = 0; if (udev->actconfig) { for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { intf = udev->actconfig->interface[i]; /* We don't need to check interfaces that are * disabled for runtime PM. Either they are unbound * or else their drivers don't support autosuspend * and so they are permanently active. */ if (intf->dev.power.disable_depth) continue; if (atomic_read(&intf->dev.power.usage_count) > 0) return -EBUSY; w |= intf->needs_remote_wakeup; /* Don't allow autosuspend if the device will need * a reset-resume and any of its interface drivers * doesn't include support or needs remote wakeup. */ if (udev->quirks & USB_QUIRK_RESET_RESUME) { struct usb_driver *driver; driver = to_usb_driver(intf->dev.driver); if (!driver->reset_resume || intf->needs_remote_wakeup) return -EOPNOTSUPP; } } } if (w && !device_can_wakeup(&udev->dev)) { dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n"); return -EOPNOTSUPP; } /* * If the device is a direct child of the root hub and the HCD * doesn't handle wakeup requests, don't allow autosuspend when * wakeup is needed. */ if (w && udev->parent == udev->bus->root_hub && bus_to_hcd(udev->bus)->cant_recv_wakeups) { dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n"); return -EOPNOTSUPP; } udev->do_remote_wakeup = w; return 0; } int usb_runtime_suspend(struct device *dev) { struct usb_device *udev = to_usb_device(dev); int status; /* A USB device can be suspended if it passes the various autosuspend * checks. Runtime suspend for a USB device means suspending all the * interfaces and then the device itself. */ if (autosuspend_check(udev) != 0) return -EAGAIN; status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND); /* Allow a retry if autosuspend failed temporarily */ if (status == -EAGAIN || status == -EBUSY) usb_mark_last_busy(udev); /* * The PM core reacts badly unless the return code is 0, * -EAGAIN, or -EBUSY, so always return -EBUSY on an error * (except for root hubs, because they don't suspend through * an upstream port like other USB devices). */ if (status != 0 && udev->parent) return -EBUSY; return status; } int usb_runtime_resume(struct device *dev) { struct usb_device *udev = to_usb_device(dev); int status; /* Runtime resume for a USB device means resuming both the device * and all its interfaces. */ status = usb_resume_both(udev, PMSG_AUTO_RESUME); return status; } int usb_runtime_idle(struct device *dev) { struct usb_device *udev = to_usb_device(dev); /* An idle USB device can be suspended if it passes the various * autosuspend checks. */ if (autosuspend_check(udev) == 0) pm_runtime_autosuspend(dev); /* Tell the core not to suspend it, though. */ return -EBUSY; } static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) { struct usb_hcd *hcd = bus_to_hcd(udev->bus); int ret = -EPERM; if (hcd->driver->set_usb2_hw_lpm) { ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable); if (!ret) udev->usb2_hw_lpm_enabled = enable; } return ret; } int usb_enable_usb2_hardware_lpm(struct usb_device *udev) { if (!udev->usb2_hw_lpm_capable || !udev->usb2_hw_lpm_allowed || udev->usb2_hw_lpm_enabled) return 0; return usb_set_usb2_hardware_lpm(udev, 1); } int usb_disable_usb2_hardware_lpm(struct usb_device *udev) { if (!udev->usb2_hw_lpm_enabled) return 0; return usb_set_usb2_hardware_lpm(udev, 0); } #endif /* CONFIG_PM */ const struct bus_type usb_bus_type = { .name = "usb", .match = usb_device_match, .uevent = usb_uevent, .need_parent_lock = true, };
539 7 536 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2007-2012 Nicira, Inc. */ #include <linux/netdevice.h> #include <net/genetlink.h> #include <net/netns/generic.h> #include "datapath.h" #include "vport-internal_dev.h" #include "vport-netdev.h" static void dp_detach_port_notify(struct vport *vport) { struct sk_buff *notify; struct datapath *dp; dp = vport->dp; notify = ovs_vport_cmd_build_info(vport, ovs_dp_get_net(dp), 0, 0, OVS_VPORT_CMD_DEL); ovs_dp_detach_port(vport); if (IS_ERR(notify)) { genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0, 0, PTR_ERR(notify)); return; } genlmsg_multicast_netns(&dp_vport_genl_family, ovs_dp_get_net(dp), notify, 0, 0, GFP_KERNEL); } void ovs_dp_notify_wq(struct work_struct *work) { struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work); struct datapath *dp; ovs_lock(); list_for_each_entry(dp, &ovs_net->dps, list_node) { int i; for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { struct vport *vport; struct hlist_node *n; hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) continue; if (!(netif_is_ovs_port(vport->dev))) dp_detach_port_notify(vport); } } } ovs_unlock(); } static int dp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct ovs_net *ovs_net; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vport *vport = NULL; if (!ovs_is_internal_dev(dev)) vport = ovs_netdev_get_vport(dev); if (!vport) return NOTIFY_DONE; if (event == NETDEV_UNREGISTER) { /* upper_dev_unlink and decrement promisc immediately */ ovs_netdev_detach_dev(vport); /* schedule vport destroy, dev_put and genl notification */ ovs_net = net_generic(dev_net(dev), ovs_net_id); queue_work(system_wq, &ovs_net->dp_notify_work); } return NOTIFY_DONE; } struct notifier_block ovs_dp_device_notifier = { .notifier_call = dp_device_event };
3 2 1 1 2 1 1 17 17 2 16 2 1 4 1 2 1 1 1 1 1 1 1 1 1 9 1 2 2 6 1 7 7 1 5 6 3 1 2 2 1 1 2 8 3 1 1 3 1 1 1 4 9 1 8 7 1 3 1 1 1 4 35 21 14 2 2 2 2 2 12 2 14 14 36 36 36 36 36 36 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/nfs/fs_context.c * * Copyright (C) 1992 Rick Sladkey * Conversion to new mount api Copyright (C) David Howells * * NFS mount handling. * * Split from fs/nfs/super.c by David Howells <dhowells@redhat.com> */ #include <linux/compat.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/nfs4_mount.h> #include <net/handshake.h> #include "nfs.h" #include "internal.h" #include "nfstrace.h" #define NFSDBG_FACILITY NFSDBG_MOUNT #if IS_ENABLED(CONFIG_NFS_V3) #define NFS_DEFAULT_VERSION 3 #else #define NFS_DEFAULT_VERSION 2 #endif #define NFS_MAX_CONNECTIONS 16 enum nfs_param { Opt_ac, Opt_acdirmax, Opt_acdirmin, Opt_acl, Opt_acregmax, Opt_acregmin, Opt_actimeo, Opt_addr, Opt_bg, Opt_bsize, Opt_clientaddr, Opt_cto, Opt_alignwrite, Opt_fatal_neterrors, Opt_fg, Opt_fscache, Opt_fscache_flag, Opt_hard, Opt_intr, Opt_local_lock, Opt_lock, Opt_lookupcache, Opt_migration, Opt_minorversion, Opt_mountaddr, Opt_mounthost, Opt_mountport, Opt_mountproto, Opt_mountvers, Opt_namelen, Opt_nconnect, Opt_max_connect, Opt_port, Opt_posix, Opt_proto, Opt_rdirplus, Opt_rdirplus_none, Opt_rdirplus_force, Opt_rdma, Opt_resvport, Opt_retrans, Opt_retry, Opt_rsize, Opt_sec, Opt_sharecache, Opt_sloppy, Opt_soft, Opt_softerr, Opt_softreval, Opt_source, Opt_tcp, Opt_timeo, Opt_trunkdiscovery, Opt_udp, Opt_v, Opt_vers, Opt_wsize, Opt_write, Opt_xprtsec, Opt_cert_serial, Opt_privkey_serial, }; enum { Opt_fatal_neterrors_default, Opt_fatal_neterrors_enetunreach, Opt_fatal_neterrors_none, }; static const struct constant_table nfs_param_enums_fatal_neterrors[] = { { "default", Opt_fatal_neterrors_default }, { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach }, { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach }, { "none", Opt_fatal_neterrors_none }, {} }; enum { Opt_local_lock_all, Opt_local_lock_flock, Opt_local_lock_none, Opt_local_lock_posix, }; static const struct constant_table nfs_param_enums_local_lock[] = { { "all", Opt_local_lock_all }, { "flock", Opt_local_lock_flock }, { "posix", Opt_local_lock_posix }, { "none", Opt_local_lock_none }, {} }; enum { Opt_lookupcache_all, Opt_lookupcache_none, Opt_lookupcache_positive, }; static const struct constant_table nfs_param_enums_lookupcache[] = { { "all", Opt_lookupcache_all }, { "none", Opt_lookupcache_none }, { "pos", Opt_lookupcache_positive }, { "positive", Opt_lookupcache_positive }, {} }; enum { Opt_write_lazy, Opt_write_eager, Opt_write_wait, }; static const struct constant_table nfs_param_enums_write[] = { { "lazy", Opt_write_lazy }, { "eager", Opt_write_eager }, { "wait", Opt_write_wait }, {} }; static const struct fs_parameter_spec nfs_fs_parameters[] = { fsparam_flag_no("ac", Opt_ac), fsparam_u32 ("acdirmax", Opt_acdirmax), fsparam_u32 ("acdirmin", Opt_acdirmin), fsparam_flag_no("acl", Opt_acl), fsparam_u32 ("acregmax", Opt_acregmax), fsparam_u32 ("acregmin", Opt_acregmin), fsparam_u32 ("actimeo", Opt_actimeo), fsparam_string("addr", Opt_addr), fsparam_flag ("bg", Opt_bg), fsparam_u32 ("bsize", Opt_bsize), fsparam_string("clientaddr", Opt_clientaddr), fsparam_flag_no("cto", Opt_cto), fsparam_flag_no("alignwrite", Opt_alignwrite), fsparam_enum("fatal_neterrors", Opt_fatal_neterrors, nfs_param_enums_fatal_neterrors), fsparam_flag ("fg", Opt_fg), fsparam_flag_no("fsc", Opt_fscache_flag), fsparam_string("fsc", Opt_fscache), fsparam_flag ("hard", Opt_hard), __fsparam(NULL, "intr", Opt_intr, fs_param_neg_with_no|fs_param_deprecated, NULL), fsparam_enum ("local_lock", Opt_local_lock, nfs_param_enums_local_lock), fsparam_flag_no("lock", Opt_lock), fsparam_enum ("lookupcache", Opt_lookupcache, nfs_param_enums_lookupcache), fsparam_flag_no("migration", Opt_migration), fsparam_u32 ("minorversion", Opt_minorversion), fsparam_string("mountaddr", Opt_mountaddr), fsparam_string("mounthost", Opt_mounthost), fsparam_u32 ("mountport", Opt_mountport), fsparam_string("mountproto", Opt_mountproto), fsparam_u32 ("mountvers", Opt_mountvers), fsparam_u32 ("namlen", Opt_namelen), fsparam_u32 ("nconnect", Opt_nconnect), fsparam_u32 ("max_connect", Opt_max_connect), fsparam_string("nfsvers", Opt_vers), fsparam_u32 ("port", Opt_port), fsparam_flag_no("posix", Opt_posix), fsparam_string("proto", Opt_proto), fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=... fsparam_flag ("rdma", Opt_rdma), fsparam_flag_no("resvport", Opt_resvport), fsparam_u32 ("retrans", Opt_retrans), fsparam_string("retry", Opt_retry), fsparam_u32 ("rsize", Opt_rsize), fsparam_string("sec", Opt_sec), fsparam_flag_no("sharecache", Opt_sharecache), fsparam_flag ("sloppy", Opt_sloppy), fsparam_flag ("soft", Opt_soft), fsparam_flag ("softerr", Opt_softerr), fsparam_flag ("softreval", Opt_softreval), fsparam_string("source", Opt_source), fsparam_flag ("tcp", Opt_tcp), fsparam_u32 ("timeo", Opt_timeo), fsparam_flag_no("trunkdiscovery", Opt_trunkdiscovery), fsparam_flag ("udp", Opt_udp), fsparam_flag ("v2", Opt_v), fsparam_flag ("v3", Opt_v), fsparam_flag ("v4", Opt_v), fsparam_flag ("v4.0", Opt_v), fsparam_flag ("v4.1", Opt_v), fsparam_flag ("v4.2", Opt_v), fsparam_string("vers", Opt_vers), fsparam_enum ("write", Opt_write, nfs_param_enums_write), fsparam_u32 ("wsize", Opt_wsize), fsparam_string("xprtsec", Opt_xprtsec), fsparam_s32("cert_serial", Opt_cert_serial), fsparam_s32("privkey_serial", Opt_privkey_serial), {} }; enum { Opt_vers_2, Opt_vers_3, Opt_vers_4, Opt_vers_4_0, Opt_vers_4_1, Opt_vers_4_2, }; static const struct constant_table nfs_vers_tokens[] = { { "2", Opt_vers_2 }, { "3", Opt_vers_3 }, { "4", Opt_vers_4 }, { "4.0", Opt_vers_4_0 }, { "4.1", Opt_vers_4_1 }, { "4.2", Opt_vers_4_2 }, {} }; enum { Opt_xprt_rdma, Opt_xprt_rdma6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_udp, Opt_xprt_udp6, nr__Opt_xprt }; static const struct constant_table nfs_xprt_protocol_tokens[] = { { "rdma", Opt_xprt_rdma }, { "rdma6", Opt_xprt_rdma6 }, { "tcp", Opt_xprt_tcp }, { "tcp6", Opt_xprt_tcp6 }, { "udp", Opt_xprt_udp }, { "udp6", Opt_xprt_udp6 }, {} }; enum { Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p, Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp, Opt_sec_none, Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp, Opt_sec_sys, nr__Opt_sec }; static const struct constant_table nfs_secflavor_tokens[] = { { "krb5", Opt_sec_krb5 }, { "krb5i", Opt_sec_krb5i }, { "krb5p", Opt_sec_krb5p }, { "lkey", Opt_sec_lkey }, { "lkeyi", Opt_sec_lkeyi }, { "lkeyp", Opt_sec_lkeyp }, { "none", Opt_sec_none }, { "null", Opt_sec_none }, { "spkm3", Opt_sec_spkm }, { "spkm3i", Opt_sec_spkmi }, { "spkm3p", Opt_sec_spkmp }, { "sys", Opt_sec_sys }, {} }; enum { Opt_xprtsec_none, Opt_xprtsec_tls, Opt_xprtsec_mtls, nr__Opt_xprtsec }; static const struct constant_table nfs_xprtsec_policies[] = { { "none", Opt_xprtsec_none }, { "tls", Opt_xprtsec_tls }, { "mtls", Opt_xprtsec_mtls }, {} }; static const struct constant_table nfs_rdirplus_tokens[] = { { "none", Opt_rdirplus_none }, { "force", Opt_rdirplus_force }, {} }; /* * Sanity-check a server address provided by the mount command. * * Address family must be initialized, and address must not be * the ANY address for that family. */ static int nfs_verify_server_address(struct sockaddr_storage *addr) { switch (addr->ss_family) { case AF_INET: { struct sockaddr_in *sa = (struct sockaddr_in *)addr; return sa->sin_addr.s_addr != htonl(INADDR_ANY); } case AF_INET6: { struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr; return !ipv6_addr_any(sa); } } return 0; } #ifdef CONFIG_NFS_DISABLE_UDP_SUPPORT static bool nfs_server_transport_udp_invalid(const struct nfs_fs_context *ctx) { return true; } #else static bool nfs_server_transport_udp_invalid(const struct nfs_fs_context *ctx) { if (ctx->version == 4) return true; return false; } #endif /* * Sanity check the NFS transport protocol. */ static int nfs_validate_transport_protocol(struct fs_context *fc, struct nfs_fs_context *ctx) { switch (ctx->nfs_server.protocol) { case XPRT_TRANSPORT_UDP: if (nfs_server_transport_udp_invalid(ctx)) goto out_invalid_transport_udp; break; case XPRT_TRANSPORT_TCP: case XPRT_TRANSPORT_RDMA: break; default: ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; } if (ctx->xprtsec.policy != RPC_XPRTSEC_NONE) switch (ctx->nfs_server.protocol) { case XPRT_TRANSPORT_TCP: ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP_TLS; break; default: goto out_invalid_xprtsec_policy; } return 0; out_invalid_transport_udp: return nfs_invalf(fc, "NFS: Unsupported transport protocol udp"); out_invalid_xprtsec_policy: return nfs_invalf(fc, "NFS: Transport does not support xprtsec"); } /* * For text based NFSv2/v3 mounts, the mount protocol transport default * settings should depend upon the specified NFS transport. */ static void nfs_set_mount_transport_protocol(struct nfs_fs_context *ctx) { if (ctx->mount_server.protocol == XPRT_TRANSPORT_UDP || ctx->mount_server.protocol == XPRT_TRANSPORT_TCP) return; switch (ctx->nfs_server.protocol) { case XPRT_TRANSPORT_UDP: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case XPRT_TRANSPORT_TCP: case XPRT_TRANSPORT_RDMA: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; } } /* * Add 'flavor' to 'auth_info' if not already present. * Returns true if 'flavor' ends up in the list, false otherwise */ static int nfs_auth_info_add(struct fs_context *fc, struct nfs_auth_info *auth_info, rpc_authflavor_t flavor) { unsigned int i; unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors); /* make sure this flavor isn't already in the list */ for (i = 0; i < auth_info->flavor_len; i++) { if (flavor == auth_info->flavors[i]) return 0; } if (auth_info->flavor_len + 1 >= max_flavor_len) return nfs_invalf(fc, "NFS: too many sec= flavors"); auth_info->flavors[auth_info->flavor_len++] = flavor; return 0; } /* * Parse the value of the 'sec=' option. */ static int nfs_parse_security_flavors(struct fs_context *fc, struct fs_parameter *param) { struct nfs_fs_context *ctx = nfs_fc2context(fc); rpc_authflavor_t pseudoflavor; char *string = param->string, *p; int ret; trace_nfs_mount_assign(param->key, string); while ((p = strsep(&string, ":")) != NULL) { if (!*p) continue; switch (lookup_constant(nfs_secflavor_tokens, p, -1)) { case Opt_sec_none: pseudoflavor = RPC_AUTH_NULL; break; case Opt_sec_sys: pseudoflavor = RPC_AUTH_UNIX; break; case Opt_sec_krb5: pseudoflavor = RPC_AUTH_GSS_KRB5; break; case Opt_sec_krb5i: pseudoflavor = RPC_AUTH_GSS_KRB5I; break; case Opt_sec_krb5p: pseudoflavor = RPC_AUTH_GSS_KRB5P; break; case Opt_sec_lkey: pseudoflavor = RPC_AUTH_GSS_LKEY; break; case Opt_sec_lkeyi: pseudoflavor = RPC_AUTH_GSS_LKEYI; break; case Opt_sec_lkeyp: pseudoflavor = RPC_AUTH_GSS_LKEYP; break; case Opt_sec_spkm: pseudoflavor = RPC_AUTH_GSS_SPKM; break; case Opt_sec_spkmi: pseudoflavor = RPC_AUTH_GSS_SPKMI; break; case Opt_sec_spkmp: pseudoflavor = RPC_AUTH_GSS_SPKMP; break; default: return nfs_invalf(fc, "NFS: sec=%s option not recognized", p); } ret = nfs_auth_info_add(fc, &ctx->auth_info, pseudoflavor); if (ret < 0) return ret; } return 0; } static int nfs_parse_xprtsec_policy(struct fs_context *fc, struct fs_parameter *param) { struct nfs_fs_context *ctx = nfs_fc2context(fc); trace_nfs_mount_assign(param->key, param->string); switch (lookup_constant(nfs_xprtsec_policies, param->string, -1)) { case Opt_xprtsec_none: ctx->xprtsec.policy = RPC_XPRTSEC_NONE; break; case Opt_xprtsec_tls: ctx->xprtsec.policy = RPC_XPRTSEC_TLS_ANON; break; case Opt_xprtsec_mtls: ctx->xprtsec.policy = RPC_XPRTSEC_TLS_X509; break; default: return nfs_invalf(fc, "NFS: Unrecognized transport security policy"); } return 0; } static int nfs_parse_version_string(struct fs_context *fc, const char *string) { struct nfs_fs_context *ctx = nfs_fc2context(fc); ctx->flags &= ~NFS_MOUNT_VER3; switch (lookup_constant(nfs_vers_tokens, string, -1)) { case Opt_vers_2: ctx->version = 2; break; case Opt_vers_3: ctx->flags |= NFS_MOUNT_VER3; ctx->version = 3; break; case Opt_vers_4: /* Backward compatibility option. In future, * the mount program should always supply * a NFSv4 minor version number. */ ctx->version = 4; break; case Opt_vers_4_0: ctx->version = 4; ctx->minorversion = 0; break; case Opt_vers_4_1: ctx->version = 4; ctx->minorversion = 1; break; case Opt_vers_4_2: ctx->version = 4; ctx->minorversion = 2; break; default: return nfs_invalf(fc, "NFS: Unsupported NFS version"); } return 0; } #ifdef CONFIG_KEYS static int nfs_tls_key_verify(key_serial_t key_id) { struct key *key = key_lookup(key_id); int error = 0; if (IS_ERR(key)) { pr_err("key id %08x not found\n", key_id); return PTR_ERR(key); } if (test_bit(KEY_FLAG_REVOKED, &key->flags) || test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { pr_err("key id %08x revoked\n", key_id); error = -EKEYREVOKED; } key_put(key); return error; } #else static inline int nfs_tls_key_verify(key_serial_t key_id) { return -ENOENT; } #endif /* CONFIG_KEYS */ /* * Parse a single mount parameter. */ static int nfs_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; struct nfs_fs_context *ctx = nfs_fc2context(fc); unsigned short protofamily, mountfamily; unsigned int len; int ret, opt; trace_nfs_mount_option(param); opt = fs_parse(fc, nfs_fs_parameters, param, &result); if (opt < 0) return (opt == -ENOPARAM && ctx->sloppy) ? 1 : opt; if (fc->security) ctx->has_sec_mnt_opts = 1; switch (opt) { case Opt_source: if (fc->source) return nfs_invalf(fc, "NFS: Multiple sources not supported"); fc->source = param->string; param->string = NULL; break; /* * boolean options: foo/nofoo */ case Opt_soft: ctx->flags |= NFS_MOUNT_SOFT; ctx->flags &= ~NFS_MOUNT_SOFTERR; break; case Opt_softerr: ctx->flags |= NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL; ctx->flags &= ~NFS_MOUNT_SOFT; break; case Opt_hard: ctx->flags &= ~(NFS_MOUNT_SOFT | NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL); break; case Opt_softreval: if (result.negated) ctx->flags &= ~NFS_MOUNT_SOFTREVAL; else ctx->flags |= NFS_MOUNT_SOFTREVAL; break; case Opt_posix: if (result.negated) ctx->flags &= ~NFS_MOUNT_POSIX; else ctx->flags |= NFS_MOUNT_POSIX; break; case Opt_cto: if (result.negated) ctx->flags |= NFS_MOUNT_NOCTO; else ctx->flags &= ~NFS_MOUNT_NOCTO; break; case Opt_trunkdiscovery: if (result.negated) ctx->flags &= ~NFS_MOUNT_TRUNK_DISCOVERY; else ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY; break; case Opt_alignwrite: if (result.negated) ctx->flags |= NFS_MOUNT_NO_ALIGNWRITE; else ctx->flags &= ~NFS_MOUNT_NO_ALIGNWRITE; break; case Opt_ac: if (result.negated) ctx->flags |= NFS_MOUNT_NOAC; else ctx->flags &= ~NFS_MOUNT_NOAC; break; case Opt_lock: if (result.negated) { ctx->lock_status = NFS_LOCK_NOLOCK; ctx->flags |= NFS_MOUNT_NONLM; ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); } else { ctx->lock_status = NFS_LOCK_LOCK; ctx->flags &= ~NFS_MOUNT_NONLM; ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); } break; case Opt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_tcp: case Opt_rdma: ctx->flags |= NFS_MOUNT_TCP; /* for side protocols */ ret = xprt_find_transport_ident(param->key); if (ret < 0) goto out_bad_transport; ctx->nfs_server.protocol = ret; break; case Opt_acl: if (result.negated) ctx->flags |= NFS_MOUNT_NOACL; else ctx->flags &= ~NFS_MOUNT_NOACL; break; case Opt_rdirplus: if (result.negated) { ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS; ctx->flags |= NFS_MOUNT_NORDIRPLUS; } else if (!param->string) { ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS); } else { switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) { case Opt_rdirplus_none: ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS; ctx->flags |= NFS_MOUNT_NORDIRPLUS; break; case Opt_rdirplus_force: ctx->flags &= ~NFS_MOUNT_NORDIRPLUS; ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS; break; default: goto out_invalid_value; } } break; case Opt_sharecache: if (result.negated) ctx->flags |= NFS_MOUNT_UNSHARED; else ctx->flags &= ~NFS_MOUNT_UNSHARED; break; case Opt_resvport: if (result.negated) ctx->flags |= NFS_MOUNT_NORESVPORT; else ctx->flags &= ~NFS_MOUNT_NORESVPORT; break; case Opt_fscache_flag: if (result.negated) ctx->options &= ~NFS_OPTION_FSCACHE; else ctx->options |= NFS_OPTION_FSCACHE; kfree(ctx->fscache_uniq); ctx->fscache_uniq = NULL; break; case Opt_fscache: trace_nfs_mount_assign(param->key, param->string); ctx->options |= NFS_OPTION_FSCACHE; kfree(ctx->fscache_uniq); ctx->fscache_uniq = param->string; param->string = NULL; break; case Opt_migration: if (result.negated) ctx->options &= ~NFS_OPTION_MIGRATION; else ctx->options |= NFS_OPTION_MIGRATION; break; /* * options that take numeric values */ case Opt_port: if (result.uint_32 > USHRT_MAX) goto out_of_bounds; ctx->nfs_server.port = result.uint_32; break; case Opt_rsize: ctx->rsize = result.uint_32; break; case Opt_wsize: ctx->wsize = result.uint_32; break; case Opt_bsize: ctx->bsize = result.uint_32; break; case Opt_timeo: if (result.uint_32 < 1 || result.uint_32 > INT_MAX) goto out_of_bounds; ctx->timeo = result.uint_32; break; case Opt_retrans: if (result.uint_32 > INT_MAX) goto out_of_bounds; ctx->retrans = result.uint_32; break; case Opt_acregmin: ctx->acregmin = result.uint_32; break; case Opt_acregmax: ctx->acregmax = result.uint_32; break; case Opt_acdirmin: ctx->acdirmin = result.uint_32; break; case Opt_acdirmax: ctx->acdirmax = result.uint_32; break; case Opt_actimeo: ctx->acregmin = result.uint_32; ctx->acregmax = result.uint_32; ctx->acdirmin = result.uint_32; ctx->acdirmax = result.uint_32; break; case Opt_namelen: ctx->namlen = result.uint_32; break; case Opt_mountport: if (result.uint_32 > USHRT_MAX) goto out_of_bounds; ctx->mount_server.port = result.uint_32; break; case Opt_mountvers: if (result.uint_32 < NFS_MNT_VERSION || result.uint_32 > NFS_MNT3_VERSION) goto out_of_bounds; ctx->mount_server.version = result.uint_32; break; case Opt_minorversion: if (result.uint_32 > NFS4_MAX_MINOR_VERSION) goto out_of_bounds; ctx->minorversion = result.uint_32; break; /* * options that take text values */ case Opt_v: ret = nfs_parse_version_string(fc, param->key + 1); if (ret < 0) return ret; break; case Opt_vers: if (!param->string) goto out_invalid_value; trace_nfs_mount_assign(param->key, param->string); ret = nfs_parse_version_string(fc, param->string); if (ret < 0) return ret; break; case Opt_sec: ret = nfs_parse_security_flavors(fc, param); if (ret < 0) return ret; break; case Opt_xprtsec: ret = nfs_parse_xprtsec_policy(fc, param); if (ret < 0) return ret; break; case Opt_cert_serial: ret = nfs_tls_key_verify(result.int_32); if (ret < 0) return ret; ctx->xprtsec.cert_serial = result.int_32; break; case Opt_privkey_serial: ret = nfs_tls_key_verify(result.int_32); if (ret < 0) return ret; ctx->xprtsec.privkey_serial = result.int_32; break; case Opt_proto: if (!param->string) goto out_invalid_value; trace_nfs_mount_assign(param->key, param->string); protofamily = AF_INET; switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: protofamily = AF_INET6; fallthrough; case Opt_xprt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: protofamily = AF_INET6; fallthrough; case Opt_xprt_tcp: ctx->flags |= NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma6: protofamily = AF_INET6; fallthrough; case Opt_xprt_rdma: /* vector side protocols to TCP */ ctx->flags |= NFS_MOUNT_TCP; ret = xprt_find_transport_ident(param->string); if (ret < 0) goto out_bad_transport; ctx->nfs_server.protocol = ret; break; default: goto out_bad_transport; } ctx->protofamily = protofamily; break; case Opt_mountproto: if (!param->string) goto out_invalid_value; trace_nfs_mount_assign(param->key, param->string); mountfamily = AF_INET; switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: mountfamily = AF_INET6; fallthrough; case Opt_xprt_udp: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: mountfamily = AF_INET6; fallthrough; case Opt_xprt_tcp: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma: /* not used for side protocols */ default: goto out_bad_transport; } ctx->mountfamily = mountfamily; break; case Opt_addr: trace_nfs_mount_assign(param->key, param->string); len = rpc_pton(fc->net_ns, param->string, param->size, &ctx->nfs_server.address, sizeof(ctx->nfs_server._address)); if (len == 0) goto out_invalid_address; ctx->nfs_server.addrlen = len; break; case Opt_clientaddr: trace_nfs_mount_assign(param->key, param->string); kfree(ctx->client_address); ctx->client_address = param->string; param->string = NULL; break; case Opt_mounthost: trace_nfs_mount_assign(param->key, param->string); kfree(ctx->mount_server.hostname); ctx->mount_server.hostname = param->string; param->string = NULL; break; case Opt_mountaddr: trace_nfs_mount_assign(param->key, param->string); len = rpc_pton(fc->net_ns, param->string, param->size, &ctx->mount_server.address, sizeof(ctx->mount_server._address)); if (len == 0) goto out_invalid_address; ctx->mount_server.addrlen = len; break; case Opt_nconnect: trace_nfs_mount_assign(param->key, param->string); if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_CONNECTIONS) goto out_of_bounds; ctx->nfs_server.nconnect = result.uint_32; break; case Opt_max_connect: trace_nfs_mount_assign(param->key, param->string); if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS) goto out_of_bounds; ctx->nfs_server.max_connect = result.uint_32; break; case Opt_fatal_neterrors: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_fatal_neterrors_default: if (fc->net_ns != &init_net) ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; else ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL; break; case Opt_fatal_neterrors_enetunreach: ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; break; case Opt_fatal_neterrors_none: ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL; break; default: goto out_invalid_value; } break; case Opt_lookupcache: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_lookupcache_all: ctx->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE); break; case Opt_lookupcache_positive: ctx->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE; ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG; break; case Opt_lookupcache_none: ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE; break; default: goto out_invalid_value; } break; case Opt_local_lock: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_local_lock_all: ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); break; case Opt_local_lock_flock: ctx->flags |= NFS_MOUNT_LOCAL_FLOCK; break; case Opt_local_lock_posix: ctx->flags |= NFS_MOUNT_LOCAL_FCNTL; break; case Opt_local_lock_none: ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); break; default: goto out_invalid_value; } break; case Opt_write: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_write_lazy: ctx->flags &= ~(NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT); break; case Opt_write_eager: ctx->flags |= NFS_MOUNT_WRITE_EAGER; ctx->flags &= ~NFS_MOUNT_WRITE_WAIT; break; case Opt_write_wait: ctx->flags |= NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT; break; default: goto out_invalid_value; } break; /* * Special options */ case Opt_sloppy: ctx->sloppy = true; break; } return 0; out_invalid_value: return nfs_invalf(fc, "NFS: Bad mount option value specified"); out_invalid_address: return nfs_invalf(fc, "NFS: Bad IP address specified"); out_of_bounds: return nfs_invalf(fc, "NFS: Value for '%s' out of range", param->key); out_bad_transport: return nfs_invalf(fc, "NFS: Unrecognized transport protocol"); } /* * Split fc->source into "hostname:export_path". * * The leftmost colon demarks the split between the server's hostname * and the export path. If the hostname starts with a left square * bracket, then it may contain colons. * * Note: caller frees hostname and export path, even on error. */ static int nfs_parse_source(struct fs_context *fc, size_t maxnamlen, size_t maxpathlen) { struct nfs_fs_context *ctx = nfs_fc2context(fc); const char *dev_name = fc->source; size_t len; const char *end; if (unlikely(!dev_name || !*dev_name)) return -EINVAL; /* Is the host name protected with square brakcets? */ if (*dev_name == '[') { end = strchr(++dev_name, ']'); if (end == NULL || end[1] != ':') goto out_bad_devname; len = end - dev_name; end++; } else { const char *comma; end = strchr(dev_name, ':'); if (end == NULL) goto out_bad_devname; len = end - dev_name; /* kill possible hostname list: not supported */ comma = memchr(dev_name, ',', len); if (comma) len = comma - dev_name; } if (len > maxnamlen) goto out_hostname; kfree(ctx->nfs_server.hostname); /* N.B. caller will free nfs_server.hostname in all cases */ ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL); if (!ctx->nfs_server.hostname) goto out_nomem; len = strlen(++end); if (len > maxpathlen) goto out_path; ctx->nfs_server.export_path = kmemdup_nul(end, len, GFP_KERNEL); if (!ctx->nfs_server.export_path) goto out_nomem; trace_nfs_mount_path(ctx->nfs_server.export_path); return 0; out_bad_devname: return nfs_invalf(fc, "NFS: device name not in host:path format"); out_nomem: nfs_errorf(fc, "NFS: not enough memory to parse device name"); return -ENOMEM; out_hostname: nfs_errorf(fc, "NFS: server hostname too long"); return -ENAMETOOLONG; out_path: nfs_errorf(fc, "NFS: export pathname too long"); return -ENAMETOOLONG; } static inline bool is_remount_fc(struct fs_context *fc) { return fc->root != NULL; } /* * Parse monolithic NFS2/NFS3 mount data * - fills in the mount root filehandle * * For option strings, user space handles the following behaviors: * * + DNS: mapping server host name to IP address ("addr=" option) * * + failure mode: how to behave if a mount request can't be handled * immediately ("fg/bg" option) * * + retry: how often to retry a mount request ("retry=" option) * * + breaking back: trying proto=udp after proto=tcp, v2 after v3, * mountproto=tcp after mountproto=udp, and so on */ static int nfs23_parse_monolithic(struct fs_context *fc, struct nfs_mount_data *data) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct nfs_fh *mntfh = ctx->mntfh; struct sockaddr_storage *sap = &ctx->nfs_server._address; int extra_flags = NFS_MOUNT_LEGACY_INTERFACE; int ret; if (data == NULL) goto out_no_data; ctx->version = NFS_DEFAULT_VERSION; switch (data->version) { case 1: data->namlen = 0; fallthrough; case 2: data->bsize = 0; fallthrough; case 3: if (data->flags & NFS_MOUNT_VER3) goto out_no_v3; data->root.size = NFS2_FHSIZE; memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); /* Turn off security negotiation */ extra_flags |= NFS_MOUNT_SECFLAVOUR; fallthrough; case 4: if (data->flags & NFS_MOUNT_SECFLAVOUR) goto out_no_sec; fallthrough; case 5: memset(data->context, 0, sizeof(data->context)); fallthrough; case 6: if (data->flags & NFS_MOUNT_VER3) { if (data->root.size > NFS3_FHSIZE || data->root.size == 0) goto out_invalid_fh; mntfh->size = data->root.size; ctx->version = 3; } else { mntfh->size = NFS2_FHSIZE; ctx->version = 2; } memcpy(mntfh->data, data->root.data, mntfh->size); if (mntfh->size < sizeof(mntfh->data)) memset(mntfh->data + mntfh->size, 0, sizeof(mntfh->data) - mntfh->size); /* * for proto == XPRT_TRANSPORT_UDP, which is what uses * to_exponential, implying shift: limit the shift value * to BITS_PER_LONG (majortimeo is unsigned long) */ if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */ if (data->retrans >= 64) /* shift value is too large */ goto out_invalid_data; /* * Translate to nfs_fs_context, which nfs_fill_super * can deal with. */ ctx->flags = data->flags & NFS_MOUNT_FLAGMASK; ctx->flags |= extra_flags; ctx->rsize = data->rsize; ctx->wsize = data->wsize; ctx->timeo = data->timeo; ctx->retrans = data->retrans; ctx->acregmin = data->acregmin; ctx->acregmax = data->acregmax; ctx->acdirmin = data->acdirmin; ctx->acdirmax = data->acdirmax; ctx->need_mount = false; if (!is_remount_fc(fc)) { memcpy(sap, &data->addr, sizeof(data->addr)); ctx->nfs_server.addrlen = sizeof(data->addr); ctx->nfs_server.port = ntohs(data->addr.sin_port); } if (sap->ss_family != AF_INET || !nfs_verify_server_address(sap)) goto out_no_address; if (!(data->flags & NFS_MOUNT_TCP)) ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; /* N.B. caller will free nfs_server.hostname in all cases */ ctx->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL); if (!ctx->nfs_server.hostname) goto out_nomem; ctx->namlen = data->namlen; ctx->bsize = data->bsize; if (data->flags & NFS_MOUNT_SECFLAVOUR) ctx->selected_flavor = data->pseudoflavor; else ctx->selected_flavor = RPC_AUTH_UNIX; if (!(data->flags & NFS_MOUNT_NONLM)) ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK| NFS_MOUNT_LOCAL_FCNTL); else ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK| NFS_MOUNT_LOCAL_FCNTL); /* * The legacy version 6 binary mount data from userspace has a * field used only to transport selinux information into the * kernel. To continue to support that functionality we * have a touch of selinux knowledge here in the NFS code. The * userspace code converted context=blah to just blah so we are * converting back to the full string selinux understands. */ if (data->context[0]){ #ifdef CONFIG_SECURITY_SELINUX int ret; data->context[NFS_MAX_CONTEXT_LEN] = '\0'; ret = vfs_parse_fs_string(fc, "context", data->context, strlen(data->context)); if (ret < 0) return ret; #else return -EINVAL; #endif } break; default: goto generic; } ret = nfs_validate_transport_protocol(fc, ctx); if (ret) return ret; ctx->skip_reconfig_option_check = true; return 0; generic: return generic_parse_monolithic(fc, data); out_no_data: if (is_remount_fc(fc)) { ctx->skip_reconfig_option_check = true; return 0; } return nfs_invalf(fc, "NFS: mount program didn't pass any mount data"); out_no_v3: return nfs_invalf(fc, "NFS: nfs_mount_data version does not support v3"); out_no_sec: return nfs_invalf(fc, "NFS: nfs_mount_data version supports only AUTH_SYS"); out_nomem: return -ENOMEM; out_no_address: return nfs_invalf(fc, "NFS: mount program didn't pass remote address"); out_invalid_fh: return nfs_invalf(fc, "NFS: invalid root filehandle"); out_invalid_data: return nfs_invalf(fc, "NFS: invalid binary mount data"); } #if IS_ENABLED(CONFIG_NFS_V4) struct compat_nfs_string { compat_uint_t len; compat_uptr_t data; }; static inline void compat_nfs_string(struct nfs_string *dst, struct compat_nfs_string *src) { dst->data = compat_ptr(src->data); dst->len = src->len; } struct compat_nfs4_mount_data_v1 { compat_int_t version; compat_int_t flags; compat_int_t rsize; compat_int_t wsize; compat_int_t timeo; compat_int_t retrans; compat_int_t acregmin; compat_int_t acregmax; compat_int_t acdirmin; compat_int_t acdirmax; struct compat_nfs_string client_addr; struct compat_nfs_string mnt_path; struct compat_nfs_string hostname; compat_uint_t host_addrlen; compat_uptr_t host_addr; compat_int_t proto; compat_int_t auth_flavourlen; compat_uptr_t auth_flavours; }; static void nfs4_compat_mount_data_conv(struct nfs4_mount_data *data) { struct compat_nfs4_mount_data_v1 *compat = (struct compat_nfs4_mount_data_v1 *)data; /* copy the fields backwards */ data->auth_flavours = compat_ptr(compat->auth_flavours); data->auth_flavourlen = compat->auth_flavourlen; data->proto = compat->proto; data->host_addr = compat_ptr(compat->host_addr); data->host_addrlen = compat->host_addrlen; compat_nfs_string(&data->hostname, &compat->hostname); compat_nfs_string(&data->mnt_path, &compat->mnt_path); compat_nfs_string(&data->client_addr, &compat->client_addr); data->acdirmax = compat->acdirmax; data->acdirmin = compat->acdirmin; data->acregmax = compat->acregmax; data->acregmin = compat->acregmin; data->retrans = compat->retrans; data->timeo = compat->timeo; data->wsize = compat->wsize; data->rsize = compat->rsize; data->flags = compat->flags; data->version = compat->version; } /* * Validate NFSv4 mount options */ static int nfs4_parse_monolithic(struct fs_context *fc, struct nfs4_mount_data *data) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct sockaddr_storage *sap = &ctx->nfs_server._address; int ret; char *c; if (!data) { if (is_remount_fc(fc)) goto done; return nfs_invalf(fc, "NFS4: mount program didn't pass any mount data"); } ctx->version = 4; if (data->version != 1) return generic_parse_monolithic(fc, data); if (in_compat_syscall()) nfs4_compat_mount_data_conv(data); if (data->host_addrlen > sizeof(ctx->nfs_server.address)) goto out_no_address; if (data->host_addrlen == 0) goto out_no_address; ctx->nfs_server.addrlen = data->host_addrlen; if (copy_from_user(sap, data->host_addr, data->host_addrlen)) return -EFAULT; if (!nfs_verify_server_address(sap)) goto out_no_address; ctx->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); if (data->auth_flavourlen) { rpc_authflavor_t pseudoflavor; if (data->auth_flavourlen > 1) goto out_inval_auth; if (copy_from_user(&pseudoflavor, data->auth_flavours, sizeof(pseudoflavor))) return -EFAULT; ctx->selected_flavor = pseudoflavor; } else { ctx->selected_flavor = RPC_AUTH_UNIX; } c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN); if (IS_ERR(c)) return PTR_ERR(c); ctx->nfs_server.hostname = c; c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN); if (IS_ERR(c)) return PTR_ERR(c); ctx->nfs_server.export_path = c; trace_nfs_mount_path(c); c = strndup_user(data->client_addr.data, 16); if (IS_ERR(c)) return PTR_ERR(c); ctx->client_address = c; /* * Translate to nfs_fs_context, which nfs_fill_super * can deal with. */ ctx->flags = data->flags & NFS4_MOUNT_FLAGMASK; ctx->rsize = data->rsize; ctx->wsize = data->wsize; ctx->timeo = data->timeo; ctx->retrans = data->retrans; ctx->acregmin = data->acregmin; ctx->acregmax = data->acregmax; ctx->acdirmin = data->acdirmin; ctx->acdirmax = data->acdirmax; ctx->nfs_server.protocol = data->proto; ret = nfs_validate_transport_protocol(fc, ctx); if (ret) return ret; done: ctx->skip_reconfig_option_check = true; return 0; out_inval_auth: return nfs_invalf(fc, "NFS4: Invalid number of RPC auth flavours %d", data->auth_flavourlen); out_no_address: return nfs_invalf(fc, "NFS4: mount program didn't pass remote address"); } #endif /* * Parse a monolithic block of data from sys_mount(). */ static int nfs_fs_context_parse_monolithic(struct fs_context *fc, void *data) { if (fc->fs_type == &nfs_fs_type) return nfs23_parse_monolithic(fc, data); #if IS_ENABLED(CONFIG_NFS_V4) if (fc->fs_type == &nfs4_fs_type) return nfs4_parse_monolithic(fc, data); #endif return nfs_invalf(fc, "NFS: Unsupported monolithic data version"); } /* * Validate the preparsed information in the config. */ static int nfs_fs_context_validate(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct nfs_subversion *nfs_mod; struct sockaddr_storage *sap = &ctx->nfs_server._address; int max_namelen = PAGE_SIZE; int max_pathlen = NFS_MAXPATHLEN; int port = 0; int ret; if (!fc->source) goto out_no_device_name; /* Check for sanity first. */ if (ctx->minorversion && ctx->version != 4) goto out_minorversion_mismatch; if (ctx->options & NFS_OPTION_MIGRATION && (ctx->version != 4 || ctx->minorversion != 0)) goto out_migration_misuse; /* Verify that any proto=/mountproto= options match the address * families in the addr=/mountaddr= options. */ if (ctx->protofamily != AF_UNSPEC && ctx->protofamily != ctx->nfs_server.address.sa_family) goto out_proto_mismatch; if (ctx->mountfamily != AF_UNSPEC) { if (ctx->mount_server.addrlen) { if (ctx->mountfamily != ctx->mount_server.address.sa_family) goto out_mountproto_mismatch; } else { if (ctx->mountfamily != ctx->nfs_server.address.sa_family) goto out_mountproto_mismatch; } } if (!nfs_verify_server_address(sap)) goto out_no_address; ret = nfs_validate_transport_protocol(fc, ctx); if (ret) return ret; if (ctx->version == 4) { if (IS_ENABLED(CONFIG_NFS_V4)) { if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA) port = NFS_RDMA_PORT; else port = NFS_PORT; max_namelen = NFS4_MAXNAMLEN; max_pathlen = NFS4_MAXPATHLEN; ctx->flags &= ~(NFS_MOUNT_NONLM | NFS_MOUNT_NOACL | NFS_MOUNT_VER3 | NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); } else { goto out_v4_not_compiled; } } else { nfs_set_mount_transport_protocol(ctx); if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA) port = NFS_RDMA_PORT; } nfs_set_port(sap, &ctx->nfs_server.port, port); ret = nfs_parse_source(fc, max_namelen, max_pathlen); if (ret < 0) return ret; /* Load the NFS protocol module if we haven't done so yet */ if (!ctx->nfs_mod) { nfs_mod = find_nfs_version(ctx->version); if (IS_ERR(nfs_mod)) { ret = PTR_ERR(nfs_mod); goto out_version_unavailable; } ctx->nfs_mod = nfs_mod; } /* Ensure the filesystem context has the correct fs_type */ if (fc->fs_type != ctx->nfs_mod->nfs_fs) { module_put(fc->fs_type->owner); __module_get(ctx->nfs_mod->nfs_fs->owner); fc->fs_type = ctx->nfs_mod->nfs_fs; } return 0; out_no_device_name: return nfs_invalf(fc, "NFS: Device name not specified"); out_v4_not_compiled: nfs_errorf(fc, "NFS: NFSv4 is not compiled into kernel"); return -EPROTONOSUPPORT; out_no_address: return nfs_invalf(fc, "NFS: mount program didn't pass remote address"); out_mountproto_mismatch: return nfs_invalf(fc, "NFS: Mount server address does not match mountproto= option"); out_proto_mismatch: return nfs_invalf(fc, "NFS: Server address does not match proto= option"); out_minorversion_mismatch: return nfs_invalf(fc, "NFS: Mount option vers=%u does not support minorversion=%u", ctx->version, ctx->minorversion); out_migration_misuse: return nfs_invalf(fc, "NFS: 'Migration' not supported for this NFS version"); out_version_unavailable: nfs_errorf(fc, "NFS: Version unavailable"); return ret; } /* * Create an NFS superblock by the appropriate method. */ static int nfs_get_tree(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); int err = nfs_fs_context_validate(fc); if (err) return err; if (!ctx->internal) return ctx->nfs_mod->rpc_ops->try_get_tree(fc); else return nfs_get_tree_common(fc); } /* * Handle duplication of a configuration. The caller copied *src into *sc, but * it can't deal with resource pointers in the filesystem context, so we have * to do that. We need to clear pointers, copy data or get extra refs as * appropriate. */ static int nfs_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { struct nfs_fs_context *src = nfs_fc2context(src_fc), *ctx; ctx = kmemdup(src, sizeof(struct nfs_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->mntfh = nfs_alloc_fhandle(); if (!ctx->mntfh) { kfree(ctx); return -ENOMEM; } nfs_copy_fh(ctx->mntfh, src->mntfh); get_nfs_version(ctx->nfs_mod); ctx->client_address = NULL; ctx->mount_server.hostname = NULL; ctx->nfs_server.export_path = NULL; ctx->nfs_server.hostname = NULL; ctx->fscache_uniq = NULL; ctx->clone_data.fattr = NULL; fc->fs_private = ctx; return 0; } static void nfs_fs_context_free(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); if (ctx) { if (ctx->server) nfs_free_server(ctx->server); if (ctx->nfs_mod) put_nfs_version(ctx->nfs_mod); kfree(ctx->client_address); kfree(ctx->mount_server.hostname); kfree(ctx->nfs_server.export_path); kfree(ctx->nfs_server.hostname); kfree(ctx->fscache_uniq); nfs_free_fhandle(ctx->mntfh); nfs_free_fattr(ctx->clone_data.fattr); kfree(ctx); } } static const struct fs_context_operations nfs_fs_context_ops = { .free = nfs_fs_context_free, .dup = nfs_fs_context_dup, .parse_param = nfs_fs_context_parse_param, .parse_monolithic = nfs_fs_context_parse_monolithic, .get_tree = nfs_get_tree, .reconfigure = nfs_reconfigure, }; /* * Prepare superblock configuration. We use the namespaces attached to the * context. This may be the current process's namespaces, or it may be a * container's namespaces. */ static int nfs_init_fs_context(struct fs_context *fc) { struct nfs_fs_context *ctx; ctx = kzalloc(sizeof(struct nfs_fs_context), GFP_KERNEL); if (unlikely(!ctx)) return -ENOMEM; ctx->mntfh = nfs_alloc_fhandle(); if (unlikely(!ctx->mntfh)) { kfree(ctx); return -ENOMEM; } ctx->protofamily = AF_UNSPEC; ctx->mountfamily = AF_UNSPEC; ctx->mount_server.port = NFS_UNSPEC_PORT; if (fc->root) { /* reconfigure, start with the current config */ struct nfs_server *nfss = fc->root->d_sb->s_fs_info; struct net *net = nfss->nfs_client->cl_net; ctx->flags = nfss->flags; ctx->rsize = nfss->rsize; ctx->wsize = nfss->wsize; ctx->retrans = nfss->client->cl_timeout->to_retries; ctx->selected_flavor = nfss->client->cl_auth->au_flavor; ctx->acregmin = nfss->acregmin / HZ; ctx->acregmax = nfss->acregmax / HZ; ctx->acdirmin = nfss->acdirmin / HZ; ctx->acdirmax = nfss->acdirmax / HZ; ctx->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ; ctx->nfs_server.port = nfss->port; ctx->nfs_server.addrlen = nfss->nfs_client->cl_addrlen; ctx->version = nfss->nfs_client->rpc_ops->version; ctx->minorversion = nfss->nfs_client->cl_minorversion; memcpy(&ctx->nfs_server._address, &nfss->nfs_client->cl_addr, ctx->nfs_server.addrlen); if (fc->net_ns != net) { put_net(fc->net_ns); fc->net_ns = get_net(net); } ctx->nfs_mod = nfss->nfs_client->cl_nfs_mod; get_nfs_version(ctx->nfs_mod); } else { /* defaults */ ctx->timeo = NFS_UNSPEC_TIMEO; ctx->retrans = NFS_UNSPEC_RETRANS; ctx->acregmin = NFS_DEF_ACREGMIN; ctx->acregmax = NFS_DEF_ACREGMAX; ctx->acdirmin = NFS_DEF_ACDIRMIN; ctx->acdirmax = NFS_DEF_ACDIRMAX; ctx->nfs_server.port = NFS_UNSPEC_PORT; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; ctx->selected_flavor = RPC_AUTH_MAXFLAVOR; ctx->minorversion = 0; ctx->need_mount = true; ctx->xprtsec.policy = RPC_XPRTSEC_NONE; ctx->xprtsec.cert_serial = TLS_NO_CERT; ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY; if (fc->net_ns != &init_net) ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL; fc->s_iflags |= SB_I_STABLE_WRITES; } fc->fs_private = ctx; fc->ops = &nfs_fs_context_ops; return 0; } struct file_system_type nfs_fs_type = { .owner = THIS_MODULE, .name = "nfs", .init_fs_context = nfs_init_fs_context, .parameters = nfs_fs_parameters, .kill_sb = nfs_kill_super, .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA, }; MODULE_ALIAS_FS("nfs"); EXPORT_SYMBOL_GPL(nfs_fs_type); #if IS_ENABLED(CONFIG_NFS_V4) struct file_system_type nfs4_fs_type = { .owner = THIS_MODULE, .name = "nfs4", .init_fs_context = nfs_init_fs_context, .parameters = nfs_fs_parameters, .kill_sb = nfs_kill_super, .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA, }; MODULE_ALIAS_FS("nfs4"); MODULE_ALIAS("nfs4"); EXPORT_SYMBOL_GPL(nfs4_fs_type); #endif /* CONFIG_NFS_V4 */
84 546 546 550 549 282 546 65 23 12 12 12 1 11 12 17 81 87 12 18 137 103 151 152 21 128 5 1 5 55 151 151 129 26 1 26 25 25 11 11 11 11 10 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 4 4 5 5 5 5 5 5 9 9 9 7 8 9 1 1 47 46 3 3 3 89 3 2 2 2 2 2 1 1 2 3 3 3 3 3 3 3 1 1 3 3 1 3 3 226 226 100 101 77 24 101 100 84 84 83 15 544 15 541 226 335 48 87 82 84 22 1 41 2 61 47 14 337 337 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #include "hard-interface.h" #include "main.h" #include <linux/atomic.h> #include <linux/byteorder/generic.h> #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/kref.h> #include <linux/limits.h> #include <linux/list.h> #include <linux/minmax.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/printk.h> #include <linux/rculist.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/net_namespace.h> #include <net/rtnetlink.h> #include <uapi/linux/batadv_packet.h> #include "bat_v.h" #include "bridge_loop_avoidance.h" #include "distributed-arp-table.h" #include "gateway_client.h" #include "log.h" #include "mesh-interface.h" #include "originator.h" #include "send.h" #include "translation-table.h" /** * batadv_hardif_release() - release hard interface from lists and queue for * free after rcu grace period * @ref: kref pointer of the hard interface */ void batadv_hardif_release(struct kref *ref) { struct batadv_hard_iface *hard_iface; hard_iface = container_of(ref, struct batadv_hard_iface, refcount); netdev_put(hard_iface->net_dev, &hard_iface->dev_tracker); kfree_rcu(hard_iface, rcu); } /** * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device * @net_dev: net_device to search for * * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors */ struct batadv_hard_iface * batadv_hardif_get_by_netdev(const struct net_device *net_dev) { struct batadv_hard_iface *hard_iface; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->net_dev == net_dev && kref_get_unless_zero(&hard_iface->refcount)) goto out; } hard_iface = NULL; out: rcu_read_unlock(); return hard_iface; } /** * batadv_getlink_net() - return link net namespace (of use fallback) * @netdev: net_device to check * @fallback_net: return in case get_link_net is not available for @netdev * * Return: result of rtnl_link_ops->get_link_net or @fallback_net */ static struct net *batadv_getlink_net(const struct net_device *netdev, struct net *fallback_net) { if (!netdev->rtnl_link_ops) return fallback_net; if (!netdev->rtnl_link_ops->get_link_net) return fallback_net; return netdev->rtnl_link_ops->get_link_net(netdev); } /** * batadv_mutual_parents() - check if two devices are each others parent * @dev1: 1st net dev * @net1: 1st devices netns * @dev2: 2nd net dev * @net2: 2nd devices netns * * veth devices come in pairs and each is the parent of the other! * * Return: true if the devices are each others parent, otherwise false */ static bool batadv_mutual_parents(const struct net_device *dev1, struct net *net1, const struct net_device *dev2, struct net *net2) { int dev1_parent_iflink = dev_get_iflink(dev1); int dev2_parent_iflink = dev_get_iflink(dev2); const struct net *dev1_parent_net; const struct net *dev2_parent_net; dev1_parent_net = batadv_getlink_net(dev1, net1); dev2_parent_net = batadv_getlink_net(dev2, net2); if (!dev1_parent_iflink || !dev2_parent_iflink) return false; return (dev1_parent_iflink == dev2->ifindex) && (dev2_parent_iflink == dev1->ifindex) && net_eq(dev1_parent_net, net2) && net_eq(dev2_parent_net, net1); } /** * batadv_is_on_batman_iface() - check if a device is a batman iface descendant * @net_dev: the device to check * * If the user creates any virtual device on top of a batman-adv interface, it * is important to prevent this new interface from being used to create a new * mesh network (this behaviour would lead to a batman-over-batman * configuration). This function recursively checks all the fathers of the * device passed as argument looking for a batman-adv mesh interface. * * Return: true if the device is descendant of a batman-adv mesh interface (or * if it is a batman-adv interface itself), false otherwise */ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) { struct net *net = dev_net(net_dev); struct net_device *parent_dev; struct net *parent_net; int iflink; bool ret; /* check if this is a batman-adv mesh interface */ if (batadv_meshif_is_valid(net_dev)) return true; iflink = dev_get_iflink(net_dev); if (iflink == 0) return false; parent_net = batadv_getlink_net(net_dev, net); /* iflink to itself, most likely physical device */ if (net == parent_net && iflink == net_dev->ifindex) return false; /* recurse over the parent device */ parent_dev = __dev_get_by_index((struct net *)parent_net, iflink); if (!parent_dev) { pr_warn("Cannot find parent device. Skipping batadv-on-batadv check for %s\n", net_dev->name); return false; } if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) return false; ret = batadv_is_on_batman_iface(parent_dev); return ret; } static bool batadv_is_valid_iface(const struct net_device *net_dev) { if (net_dev->flags & IFF_LOOPBACK) return false; if (net_dev->type != ARPHRD_ETHER) return false; if (net_dev->addr_len != ETH_ALEN) return false; /* no batman over batman */ if (batadv_is_on_batman_iface(net_dev)) return false; return true; } /** * batadv_get_real_netdevice() - check if the given netdev struct is a virtual * interface on top of another 'real' interface * @netdev: the device to check * * Callers must hold the rtnl semaphore. You may want batadv_get_real_netdev() * instead of this. * * Return: the 'real' net device or the original net device and NULL in case * of an error. */ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) { struct batadv_hard_iface *hard_iface = NULL; struct net_device *real_netdev = NULL; struct net *real_net; struct net *net; int iflink; ASSERT_RTNL(); if (!netdev) return NULL; iflink = dev_get_iflink(netdev); if (iflink == 0) { dev_hold(netdev); return netdev; } hard_iface = batadv_hardif_get_by_netdev(netdev); if (!hard_iface || !hard_iface->mesh_iface) goto out; net = dev_net(hard_iface->mesh_iface); real_net = batadv_getlink_net(netdev, net); /* iflink to itself, most likely physical device */ if (net == real_net && netdev->ifindex == iflink) { real_netdev = netdev; dev_hold(real_netdev); goto out; } real_netdev = dev_get_by_index(real_net, iflink); out: batadv_hardif_put(hard_iface); return real_netdev; } /** * batadv_get_real_netdev() - check if the given net_device struct is a virtual * interface on top of another 'real' interface * @net_device: the device to check * * Return: the 'real' net device or the original net device and NULL in case * of an error. */ struct net_device *batadv_get_real_netdev(struct net_device *net_device) { struct net_device *real_netdev; rtnl_lock(); real_netdev = batadv_get_real_netdevice(net_device); rtnl_unlock(); return real_netdev; } /** * batadv_is_wext_netdev() - check if the given net_device struct is a * wext wifi interface * @net_device: the device to check * * Return: true if the net device is a wext wireless device, false * otherwise. */ static bool batadv_is_wext_netdev(struct net_device *net_device) { if (!net_device) return false; #ifdef CONFIG_WIRELESS_EXT /* pre-cfg80211 drivers have to implement WEXT, so it is possible to * check for wireless_handlers != NULL */ if (net_device->wireless_handlers) return true; #endif return false; } /** * batadv_is_cfg80211_netdev() - check if the given net_device struct is a * cfg80211 wifi interface * @net_device: the device to check * * Return: true if the net device is a cfg80211 wireless device, false * otherwise. */ static bool batadv_is_cfg80211_netdev(struct net_device *net_device) { if (!net_device) return false; #if IS_ENABLED(CONFIG_CFG80211) /* cfg80211 drivers have to set ieee80211_ptr */ if (net_device->ieee80211_ptr) return true; #endif return false; } /** * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device * @net_device: the device to check * * Return: batadv_hard_iface_wifi_flags flags of the device */ static u32 batadv_wifi_flags_evaluate(struct net_device *net_device) { u32 wifi_flags = 0; struct net_device *real_netdev; if (batadv_is_wext_netdev(net_device)) wifi_flags |= BATADV_HARDIF_WIFI_WEXT_DIRECT; if (batadv_is_cfg80211_netdev(net_device)) wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT; real_netdev = batadv_get_real_netdevice(net_device); if (!real_netdev) return wifi_flags; if (real_netdev == net_device) goto out; if (batadv_is_wext_netdev(real_netdev)) wifi_flags |= BATADV_HARDIF_WIFI_WEXT_INDIRECT; if (batadv_is_cfg80211_netdev(real_netdev)) wifi_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT; out: dev_put(real_netdev); return wifi_flags; } /** * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi * interface * @hard_iface: the device to check * * Return: true if the net device is a cfg80211 wireless device, false * otherwise. */ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface) { u32 allowed_flags = 0; allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_DIRECT; allowed_flags |= BATADV_HARDIF_WIFI_CFG80211_INDIRECT; return !!(hard_iface->wifi_flags & allowed_flags); } /** * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface * @hard_iface: the device to check * * Return: true if the net device is a 802.11 wireless device, false otherwise. */ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface) { if (!hard_iface) return false; return hard_iface->wifi_flags != 0; } /** * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary * @if_outgoing: the outgoing interface checked and considered for (re)broadcast * @orig_addr: the originator of this packet * @orig_neigh: originator address of the forwarder we just got the packet from * (NULL if we originated) * * Checks whether a packet needs to be (re)broadcasted on the given interface. * * Return: * BATADV_HARDIF_BCAST_NORECIPIENT: No neighbor on interface * BATADV_HARDIF_BCAST_DUPFWD: Just one neighbor, but it is the forwarder * BATADV_HARDIF_BCAST_DUPORIG: Just one neighbor, but it is the originator * BATADV_HARDIF_BCAST_OK: Several neighbors, must broadcast */ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, u8 *orig_addr, u8 *orig_neigh) { struct batadv_hardif_neigh_node *hardif_neigh; struct hlist_node *first; int ret = BATADV_HARDIF_BCAST_OK; rcu_read_lock(); /* 0 neighbors -> no (re)broadcast */ first = rcu_dereference(hlist_first_rcu(&if_outgoing->neigh_list)); if (!first) { ret = BATADV_HARDIF_BCAST_NORECIPIENT; goto out; } /* >1 neighbors -> (re)broadcast */ if (rcu_dereference(hlist_next_rcu(first))) goto out; hardif_neigh = hlist_entry(first, struct batadv_hardif_neigh_node, list); /* 1 neighbor, is the originator -> no rebroadcast */ if (orig_addr && batadv_compare_eth(hardif_neigh->orig, orig_addr)) { ret = BATADV_HARDIF_BCAST_DUPORIG; /* 1 neighbor, is the one we received from -> no rebroadcast */ } else if (orig_neigh && batadv_compare_eth(hardif_neigh->orig, orig_neigh)) { ret = BATADV_HARDIF_BCAST_DUPFWD; } out: rcu_read_unlock(); return ret; } static struct batadv_hard_iface * batadv_hardif_get_active(struct net_device *mesh_iface) { struct batadv_hard_iface *hard_iface; struct list_head *iter; rcu_read_lock(); netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) { if (hard_iface->if_status == BATADV_IF_ACTIVE && kref_get_unless_zero(&hard_iface->refcount)) goto out; } hard_iface = NULL; out: rcu_read_unlock(); return hard_iface; } static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv, struct batadv_hard_iface *oldif) { struct batadv_hard_iface *primary_if; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; batadv_dat_init_own_addr(bat_priv, primary_if); batadv_bla_update_orig_address(bat_priv, primary_if, oldif); out: batadv_hardif_put(primary_if); } static void batadv_primary_if_select(struct batadv_priv *bat_priv, struct batadv_hard_iface *new_hard_iface) { struct batadv_hard_iface *curr_hard_iface; ASSERT_RTNL(); if (new_hard_iface) kref_get(&new_hard_iface->refcount); curr_hard_iface = rcu_replace_pointer(bat_priv->primary_if, new_hard_iface, 1); if (!new_hard_iface) goto out; bat_priv->algo_ops->iface.primary_set(new_hard_iface); batadv_primary_if_update_addr(bat_priv, curr_hard_iface); out: batadv_hardif_put(curr_hard_iface); } static bool batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface) { if (hard_iface->net_dev->flags & IFF_UP) return true; return false; } static void batadv_check_known_mac_addr(const struct batadv_hard_iface *hard_iface) { struct net_device *mesh_iface = hard_iface->mesh_iface; const struct batadv_hard_iface *tmp_hard_iface; struct list_head *iter; if (!mesh_iface) return; netdev_for_each_lower_private(mesh_iface, tmp_hard_iface, iter) { if (tmp_hard_iface == hard_iface) continue; if (tmp_hard_iface->if_status == BATADV_IF_NOT_IN_USE) continue; if (!batadv_compare_eth(tmp_hard_iface->net_dev->dev_addr, hard_iface->net_dev->dev_addr)) continue; pr_warn("The newly added mac address (%pM) already exists on: %s\n", hard_iface->net_dev->dev_addr, tmp_hard_iface->net_dev->name); pr_warn("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); } } /** * batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom * @mesh_iface: netdev struct of the mesh interface */ static void batadv_hardif_recalc_extra_skbroom(struct net_device *mesh_iface) { const struct batadv_hard_iface *hard_iface; unsigned short lower_header_len = ETH_HLEN; unsigned short lower_headroom = 0; unsigned short lower_tailroom = 0; unsigned short needed_headroom; struct list_head *iter; rcu_read_lock(); netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) { if (hard_iface->if_status == BATADV_IF_NOT_IN_USE) continue; lower_header_len = max_t(unsigned short, lower_header_len, hard_iface->net_dev->hard_header_len); lower_headroom = max_t(unsigned short, lower_headroom, hard_iface->net_dev->needed_headroom); lower_tailroom = max_t(unsigned short, lower_tailroom, hard_iface->net_dev->needed_tailroom); } rcu_read_unlock(); needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN); needed_headroom += batadv_max_header_len(); /* fragmentation headers don't strip the unicast/... header */ needed_headroom += sizeof(struct batadv_frag_packet); mesh_iface->needed_headroom = needed_headroom; mesh_iface->needed_tailroom = lower_tailroom; } /** * batadv_hardif_min_mtu() - Calculate maximum MTU for mesh interface * @mesh_iface: netdev struct of the mesh interface * * Return: MTU for the mesh-interface (limited by the minimal MTU of all active * slave interfaces) */ int batadv_hardif_min_mtu(struct net_device *mesh_iface) { struct batadv_priv *bat_priv = netdev_priv(mesh_iface); const struct batadv_hard_iface *hard_iface; struct list_head *iter; int min_mtu = INT_MAX; rcu_read_lock(); netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) { if (hard_iface->if_status != BATADV_IF_ACTIVE && hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED) continue; min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu); } rcu_read_unlock(); if (atomic_read(&bat_priv->fragmentation) == 0) goto out; /* with fragmentation enabled the maximum size of internally generated * packets such as translation table exchanges or tvlv containers, etc * has to be calculated */ min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE); min_mtu -= sizeof(struct batadv_frag_packet); min_mtu *= BATADV_FRAG_MAX_FRAGMENTS; out: /* report to the other components the maximum amount of bytes that * batman-adv can send over the wire (without considering the payload * overhead). For example, this value is used by TT to compute the * maximum local table size */ atomic_set(&bat_priv->packet_size_max, min_mtu); /* the real mesh-interface MTU is computed by removing the payload * overhead from the maximum amount of bytes that was just computed. */ return min_t(int, min_mtu - batadv_max_header_len(), BATADV_MAX_MTU); } /** * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller * MTU appeared * @mesh_iface: netdev struct of the mesh interface */ void batadv_update_min_mtu(struct net_device *mesh_iface) { struct batadv_priv *bat_priv = netdev_priv(mesh_iface); int limit_mtu; int mtu; mtu = batadv_hardif_min_mtu(mesh_iface); if (bat_priv->mtu_set_by_user) limit_mtu = bat_priv->mtu_set_by_user; else limit_mtu = ETH_DATA_LEN; mtu = min(mtu, limit_mtu); dev_set_mtu(mesh_iface, mtu); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. */ batadv_tt_local_resize_to_mtu(mesh_iface); } static void batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; if (hard_iface->if_status != BATADV_IF_INACTIVE) goto out; bat_priv = netdev_priv(hard_iface->mesh_iface); bat_priv->algo_ops->iface.update_mac(hard_iface); hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED; /* the first active interface becomes our primary interface or * the next active interface after the old primary interface was removed */ primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) batadv_primary_if_select(bat_priv, hard_iface); batadv_info(hard_iface->mesh_iface, "Interface activated: %s\n", hard_iface->net_dev->name); batadv_update_min_mtu(hard_iface->mesh_iface); if (bat_priv->algo_ops->iface.activate) bat_priv->algo_ops->iface.activate(hard_iface); out: batadv_hardif_put(primary_if); } static void batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface) { if (hard_iface->if_status != BATADV_IF_ACTIVE && hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED) return; hard_iface->if_status = BATADV_IF_INACTIVE; batadv_info(hard_iface->mesh_iface, "Interface deactivated: %s\n", hard_iface->net_dev->name); batadv_update_min_mtu(hard_iface->mesh_iface); } /** * batadv_hardif_enable_interface() - Enslave hard interface to mesh interface * @hard_iface: hard interface to add to mesh interface * @mesh_iface: netdev struct of the mesh interface * * Return: 0 on success or negative error number in case of failure */ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, struct net_device *mesh_iface) { struct batadv_priv *bat_priv; __be16 ethertype = htons(ETH_P_BATMAN); int max_header_len = batadv_max_header_len(); unsigned int required_mtu; unsigned int hardif_mtu; int ret; hardif_mtu = READ_ONCE(hard_iface->net_dev->mtu); required_mtu = READ_ONCE(mesh_iface->mtu) + max_header_len; if (hardif_mtu < ETH_MIN_MTU + max_header_len) return -EINVAL; if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) goto out; kref_get(&hard_iface->refcount); netdev_hold(mesh_iface, &hard_iface->meshif_dev_tracker, GFP_ATOMIC); hard_iface->mesh_iface = mesh_iface; bat_priv = netdev_priv(hard_iface->mesh_iface); ret = netdev_master_upper_dev_link(hard_iface->net_dev, mesh_iface, hard_iface, NULL, NULL); if (ret) goto err_dev; ret = bat_priv->algo_ops->iface.enable(hard_iface); if (ret < 0) goto err_upper; hard_iface->if_status = BATADV_IF_INACTIVE; kref_get(&hard_iface->refcount); hard_iface->batman_adv_ptype.type = ethertype; hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv; hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); batadv_info(hard_iface->mesh_iface, "Adding interface: %s\n", hard_iface->net_dev->name); if (atomic_read(&bat_priv->fragmentation) && hardif_mtu < required_mtu) batadv_info(hard_iface->mesh_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n", hard_iface->net_dev->name, hardif_mtu, required_mtu); if (!atomic_read(&bat_priv->fragmentation) && hardif_mtu < required_mtu) batadv_info(hard_iface->mesh_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n", hard_iface->net_dev->name, hardif_mtu, required_mtu); batadv_check_known_mac_addr(hard_iface); if (batadv_hardif_is_iface_up(hard_iface)) batadv_hardif_activate_interface(hard_iface); else batadv_err(hard_iface->mesh_iface, "Not using interface %s (retrying later): interface not active\n", hard_iface->net_dev->name); batadv_hardif_recalc_extra_skbroom(mesh_iface); if (bat_priv->algo_ops->iface.enabled) bat_priv->algo_ops->iface.enabled(hard_iface); out: return 0; err_upper: netdev_upper_dev_unlink(hard_iface->net_dev, mesh_iface); err_dev: hard_iface->mesh_iface = NULL; netdev_put(mesh_iface, &hard_iface->meshif_dev_tracker); batadv_hardif_put(hard_iface); return ret; } /** * batadv_hardif_cnt() - get number of interfaces enslaved to mesh interface * @mesh_iface: mesh interface to check * * This function is only using RCU for locking - the result can therefore be * off when another function is modifying the list at the same time. The * caller can use the rtnl_lock to make sure that the count is accurate. * * Return: number of connected/enslaved hard interfaces */ static size_t batadv_hardif_cnt(struct net_device *mesh_iface) { struct batadv_hard_iface *hard_iface; struct list_head *iter; size_t count = 0; rcu_read_lock(); netdev_for_each_lower_private_rcu(mesh_iface, hard_iface, iter) count++; rcu_read_unlock(); return count; } /** * batadv_hardif_disable_interface() - Remove hard interface from mesh interface * @hard_iface: hard interface to be removed */ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hard_iface *primary_if = NULL; batadv_hardif_deactivate_interface(hard_iface); if (hard_iface->if_status != BATADV_IF_INACTIVE) goto out; batadv_info(hard_iface->mesh_iface, "Removing interface: %s\n", hard_iface->net_dev->name); dev_remove_pack(&hard_iface->batman_adv_ptype); batadv_hardif_put(hard_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (hard_iface == primary_if) { struct batadv_hard_iface *new_if; new_if = batadv_hardif_get_active(hard_iface->mesh_iface); batadv_primary_if_select(bat_priv, new_if); batadv_hardif_put(new_if); } bat_priv->algo_ops->iface.disable(hard_iface); hard_iface->if_status = BATADV_IF_NOT_IN_USE; /* delete all references to this hard_iface */ batadv_purge_orig_ref(bat_priv); batadv_purge_outstanding_packets(bat_priv, hard_iface); netdev_put(hard_iface->mesh_iface, &hard_iface->meshif_dev_tracker); netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->mesh_iface); batadv_hardif_recalc_extra_skbroom(hard_iface->mesh_iface); /* nobody uses this interface anymore */ if (batadv_hardif_cnt(hard_iface->mesh_iface) <= 1) batadv_gw_check_client_stop(bat_priv); hard_iface->mesh_iface = NULL; batadv_hardif_put(hard_iface); out: batadv_hardif_put(primary_if); } static struct batadv_hard_iface * batadv_hardif_add_interface(struct net_device *net_dev) { struct batadv_hard_iface *hard_iface; ASSERT_RTNL(); if (!batadv_is_valid_iface(net_dev)) return NULL; hard_iface = kzalloc(sizeof(*hard_iface), GFP_ATOMIC); if (!hard_iface) return NULL; netdev_hold(net_dev, &hard_iface->dev_tracker, GFP_ATOMIC); hard_iface->net_dev = net_dev; hard_iface->mesh_iface = NULL; hard_iface->if_status = BATADV_IF_NOT_IN_USE; INIT_LIST_HEAD(&hard_iface->list); INIT_HLIST_HEAD(&hard_iface->neigh_list); mutex_init(&hard_iface->bat_iv.ogm_buff_mutex); spin_lock_init(&hard_iface->neigh_list_lock); kref_init(&hard_iface->refcount); hard_iface->num_bcasts = BATADV_NUM_BCASTS_DEFAULT; hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev); if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; atomic_set(&hard_iface->hop_penalty, 0); batadv_v_hardif_init(hard_iface); kref_get(&hard_iface->refcount); list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list); batadv_hardif_generation++; return hard_iface; } static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface) { ASSERT_RTNL(); /* first deactivate interface */ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) batadv_hardif_disable_interface(hard_iface); if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) return; hard_iface->if_status = BATADV_IF_TO_BE_REMOVED; batadv_hardif_put(hard_iface); } /** * batadv_hard_if_event_meshif() - Handle events for mesh interfaces * @event: NETDEV_* event to handle * @net_dev: net_device which generated an event * * Return: NOTIFY_* result */ static int batadv_hard_if_event_meshif(unsigned long event, struct net_device *net_dev) { struct batadv_priv *bat_priv; switch (event) { case NETDEV_REGISTER: bat_priv = netdev_priv(net_dev); batadv_meshif_create_vlan(bat_priv, BATADV_NO_FLAGS); break; } return NOTIFY_DONE; } static int batadv_hard_if_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); struct batadv_hard_iface *hard_iface; struct batadv_hard_iface *primary_if = NULL; struct batadv_priv *bat_priv; if (batadv_meshif_is_valid(net_dev)) return batadv_hard_if_event_meshif(event, net_dev); hard_iface = batadv_hardif_get_by_netdev(net_dev); if (!hard_iface && (event == NETDEV_REGISTER || event == NETDEV_POST_TYPE_CHANGE)) hard_iface = batadv_hardif_add_interface(net_dev); if (!hard_iface) goto out; switch (event) { case NETDEV_UP: batadv_hardif_activate_interface(hard_iface); break; case NETDEV_GOING_DOWN: case NETDEV_DOWN: batadv_hardif_deactivate_interface(hard_iface); break; case NETDEV_UNREGISTER: case NETDEV_PRE_TYPE_CHANGE: list_del_rcu(&hard_iface->list); batadv_hardif_generation++; batadv_hardif_remove_interface(hard_iface); break; case NETDEV_CHANGEMTU: if (hard_iface->mesh_iface) batadv_update_min_mtu(hard_iface->mesh_iface); break; case NETDEV_CHANGEADDR: if (hard_iface->if_status == BATADV_IF_NOT_IN_USE) goto hardif_put; batadv_check_known_mac_addr(hard_iface); bat_priv = netdev_priv(hard_iface->mesh_iface); bat_priv->algo_ops->iface.update_mac(hard_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto hardif_put; if (hard_iface == primary_if) batadv_primary_if_update_addr(bat_priv, NULL); break; case NETDEV_CHANGEUPPER: hard_iface->wifi_flags = batadv_wifi_flags_evaluate(net_dev); if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; break; default: break; } hardif_put: batadv_hardif_put(hard_iface); out: batadv_hardif_put(primary_if); return NOTIFY_DONE; } struct notifier_block batadv_hard_if_notifier = { .notifier_call = batadv_hard_if_event, };
2 1 1 5 1 4 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /* * Helper functions for BLAKE2b implementations. * Keep this in sync with the corresponding BLAKE2s header. */ #ifndef _CRYPTO_INTERNAL_BLAKE2B_H #define _CRYPTO_INTERNAL_BLAKE2B_H #include <asm/byteorder.h> #include <crypto/blake2b.h> #include <crypto/internal/hash.h> #include <linux/array_size.h> #include <linux/compiler.h> #include <linux/build_bug.h> #include <linux/errno.h> #include <linux/math.h> #include <linux/string.h> #include <linux/types.h> static inline void blake2b_set_lastblock(struct blake2b_state *state) { state->f[0] = -1; state->f[1] = 0; } static inline void blake2b_set_nonlast(struct blake2b_state *state) { state->f[0] = 0; state->f[1] = 0; } typedef void (*blake2b_compress_t)(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc); /* Helper functions for shash implementations of BLAKE2b */ struct blake2b_tfm_ctx { u8 key[BLAKE2B_BLOCK_SIZE]; unsigned int keylen; }; static inline int crypto_blake2b_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm); if (keylen > BLAKE2B_KEY_SIZE) return -EINVAL; BUILD_BUG_ON(BLAKE2B_KEY_SIZE > BLAKE2B_BLOCK_SIZE); memcpy(tctx->key, key, keylen); memset(tctx->key + keylen, 0, BLAKE2B_BLOCK_SIZE - keylen); tctx->keylen = keylen; return 0; } static inline int crypto_blake2b_init(struct shash_desc *desc) { const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct blake2b_state *state = shash_desc_ctx(desc); unsigned int outlen = crypto_shash_digestsize(desc->tfm); __blake2b_init(state, outlen, tctx->keylen); return tctx->keylen ? crypto_shash_update(desc, tctx->key, BLAKE2B_BLOCK_SIZE) : 0; } static inline int crypto_blake2b_update_bo(struct shash_desc *desc, const u8 *in, unsigned int inlen, blake2b_compress_t compress) { struct blake2b_state *state = shash_desc_ctx(desc); blake2b_set_nonlast(state); compress(state, in, inlen / BLAKE2B_BLOCK_SIZE, BLAKE2B_BLOCK_SIZE); return inlen - round_down(inlen, BLAKE2B_BLOCK_SIZE); } static inline int crypto_blake2b_finup(struct shash_desc *desc, const u8 *in, unsigned int inlen, u8 *out, blake2b_compress_t compress) { struct blake2b_state *state = shash_desc_ctx(desc); u8 buf[BLAKE2B_BLOCK_SIZE]; int i; memcpy(buf, in, inlen); memset(buf + inlen, 0, BLAKE2B_BLOCK_SIZE - inlen); blake2b_set_lastblock(state); compress(state, buf, 1, inlen); for (i = 0; i < ARRAY_SIZE(state->h); i++) __cpu_to_le64s(&state->h[i]); memcpy(out, state->h, crypto_shash_digestsize(desc->tfm)); memzero_explicit(buf, sizeof(buf)); return 0; } #endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
145 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 // SPDX-License-Identifier: GPL-2.0-or-later /* * Network event notifiers * * Authors: * Tom Tucker <tom@opengridcomputing.com> * Steve Wise <swise@opengridcomputing.com> * * Fixes: */ #include <linux/rtnetlink.h> #include <linux/notifier.h> #include <linux/export.h> #include <net/netevent.h> static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); /** * register_netevent_notifier - register a netevent notifier block * @nb: notifier * * Register a notifier to be called when a netevent occurs. * The notifier passed is linked into the kernel structures and must * not be reused until it has been unregistered. A negative errno code * is returned on a failure. */ int register_netevent_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(register_netevent_notifier); /** * unregister_netevent_notifier - unregister a netevent notifier block * @nb: notifier * * Unregister a notifier previously registered by * register_neigh_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. */ int unregister_netevent_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(unregister_netevent_notifier); /** * call_netevent_notifiers - call all netevent notifier blocks * @val: value passed unmodified to notifier function * @v: pointer passed unmodified to notifier function * * Call all neighbour notifier blocks. Parameters and return value * are as for notifier_call_chain(). */ int call_netevent_notifiers(unsigned long val, void *v) { return atomic_notifier_call_chain(&netevent_notif_chain, val, v); } EXPORT_SYMBOL_GPL(call_netevent_notifiers);
70 71 71 13 13 13 2 1 5 13 15 1 11 11 6 4 3 1 11 11 1 10 8 8 14 14 14 14 14 14 14 14 49 58 14 49 30 30 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic INET6 transport hashtables * * Authors: Lotsa people, from code originally in tcp, generalised here * by Arnaldo Carvalho de Melo <acme@mandriva.com> */ #include <linux/module.h> #include <linux/random.h> #include <net/addrconf.h> #include <net/hotdata.h> #include <net/inet_connection_sock.h> #include <net/inet_hashtables.h> #include <net/inet6_hashtables.h> #include <net/secure_seq.h> #include <net/ip.h> #include <net/sock_reuseport.h> #include <net/tcp.h> u32 inet6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) { u32 lhash, fhash; net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret)); net_get_random_once(&tcp_ipv6_hash_secret, sizeof(tcp_ipv6_hash_secret)); lhash = (__force u32)laddr->s6_addr32[3]; fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret); return lport + __inet6_ehashfn(lhash, 0, fhash, fport, inet6_ehash_secret + net_hash_mix(net)); } EXPORT_SYMBOL_GPL(inet6_ehashfn); /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * * The sockhash lock must be held as a reader here. */ struct sock *__inet6_lookup_established(const struct net *net, struct inet_hashinfo *hashinfo, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, const int dif, const int sdif) { struct sock *sk; const struct hlist_nulls_node *node; const __portpair ports = INET_COMBINED_PORTS(sport, hnum); /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); unsigned int slot = hash & hashinfo->ehash_mask; struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) continue; if (!inet6_match(net, sk, saddr, daddr, ports, dif, sdif)) continue; if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) goto out; if (unlikely(!inet6_match(net, sk, saddr, daddr, ports, dif, sdif))) { sock_gen_put(sk); goto begin; } goto found; } if (get_nulls_value(node) != slot) goto begin; out: sk = NULL; found: return sk; } EXPORT_SYMBOL(__inet6_lookup_established); static inline int compute_score(struct sock *sk, const struct net *net, const unsigned short hnum, const struct in6_addr *daddr, const int dif, const int sdif) { int score = -1; if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum && sk->sk_family == PF_INET6) { if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) return -1; if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) return -1; score = sk->sk_bound_dev_if ? 2 : 1; if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) score++; } return score; } /** * inet6_lookup_reuseport() - execute reuseport logic on AF_INET6 socket if necessary. * @net: network namespace. * @sk: AF_INET6 socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP. * @skb: context for a potential SK_REUSEPORT program. * @doff: header offset. * @saddr: source address. * @sport: source port. * @daddr: destination address. * @hnum: destination port in host byte order. * @ehashfn: hash function used to generate the fallback hash. * * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to * the selected sock or an error. */ struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk, struct sk_buff *skb, int doff, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, inet6_ehashfn_t *ehashfn) { struct sock *reuse_sk = NULL; u32 phash; if (sk->sk_reuseport) { phash = INDIRECT_CALL_INET(ehashfn, udp6_ehashfn, inet6_ehashfn, net, daddr, hnum, saddr, sport); reuse_sk = reuseport_select_sock(sk, phash, skb, doff); } return reuse_sk; } EXPORT_SYMBOL_GPL(inet6_lookup_reuseport); /* called with rcu_read_lock() */ static struct sock *inet6_lhash2_lookup(const struct net *net, struct inet_listen_hashbucket *ilb2, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const unsigned short hnum, const int dif, const int sdif) { struct sock *sk, *result = NULL; struct hlist_nulls_node *node; int score, hiscore = 0; sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { score = compute_score(sk, net, hnum, daddr, dif, sdif); if (score > hiscore) { result = inet6_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, inet6_ehashfn); if (result) return result; result = sk; hiscore = score; } } return result; } struct sock *inet6_lookup_run_sk_lookup(const struct net *net, int protocol, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, const int dif, inet6_ehashfn_t *ehashfn) { struct sock *sk, *reuse_sk; bool no_reuseport; no_reuseport = bpf_sk_lookup_run_v6(net, protocol, saddr, sport, daddr, hnum, dif, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, ehashfn); if (reuse_sk) sk = reuse_sk; return sk; } EXPORT_SYMBOL_GPL(inet6_lookup_run_sk_lookup); struct sock *inet6_lookup_listener(const struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const unsigned short hnum, const int dif, const int sdif) { struct inet_listen_hashbucket *ilb2; struct sock *result = NULL; unsigned int hash2; /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled) && hashinfo == net->ipv4.tcp_death_row.hashinfo) { result = inet6_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, saddr, sport, daddr, hnum, dif, inet6_ehashfn); if (result) goto done; } hash2 = ipv6_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); result = inet6_lhash2_lookup(net, ilb2, skb, doff, saddr, sport, daddr, hnum, dif, sdif); if (result) goto done; /* Lookup lhash2 with in6addr_any */ hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); result = inet6_lhash2_lookup(net, ilb2, skb, doff, saddr, sport, &in6addr_any, hnum, dif, sdif); done: if (IS_ERR(result)) return NULL; return result; } EXPORT_SYMBOL_GPL(inet6_lookup_listener); struct sock *inet6_lookup(const struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, const int dif) { struct sock *sk; bool refcounted; sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, ntohs(dport), dif, 0, &refcounted); if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; } EXPORT_SYMBOL_GPL(inet6_lookup); static int __inet6_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, const __u16 lport, struct inet_timewait_sock **twp, bool rcu_lookup, u32 hash) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_sock *inet = inet_sk(sk); const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; const struct in6_addr *saddr = &sk->sk_v6_daddr; const int dif = sk->sk_bound_dev_if; struct net *net = sock_net(sk); const int sdif = l3mdev_master_ifindex_by_index(net, dif); const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); struct inet_timewait_sock *tw = NULL; const struct hlist_nulls_node *node; struct sock *sk2; spinlock_t *lock; if (rcu_lookup) { sk_nulls_for_each(sk2, node, &head->chain) { if (sk2->sk_hash != hash || !inet6_match(net, sk2, saddr, daddr, ports, dif, sdif)) continue; if (sk2->sk_state == TCP_TIME_WAIT) break; return -EADDRNOTAVAIL; } return 0; } lock = inet_ehash_lockp(hinfo, hash); spin_lock(lock); sk_nulls_for_each(sk2, node, &head->chain) { if (sk2->sk_hash != hash) continue; if (likely(inet6_match(net, sk2, saddr, daddr, ports, dif, sdif))) { if (sk2->sk_state == TCP_TIME_WAIT) { tw = inet_twsk(sk2); if (sk->sk_protocol == IPPROTO_TCP && tcp_twsk_unique(sk, sk2, twp)) break; } goto not_unique; } } /* Must record num and sport now. Otherwise we will see * in hash table socket with a funny identity. */ inet->inet_num = lport; inet->inet_sport = htons(lport); sk->sk_hash = hash; WARN_ON(!sk_unhashed(sk)); __sk_nulls_add_node_rcu(sk, &head->chain); if (tw) { sk_nulls_del_node_init_rcu((struct sock *)tw); __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); if (twp) { *twp = tw; } else if (tw) { /* Silly. Should hash-dance instead... */ inet_twsk_deschedule_put(tw); } return 0; not_unique: spin_unlock(lock); return -EADDRNOTAVAIL; } static u64 inet6_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, inet->inet_dport); } int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; const struct in6_addr *saddr = &sk->sk_v6_daddr; const struct inet_sock *inet = inet_sk(sk); const struct net *net = sock_net(sk); u64 port_offset = 0; u32 hash_port0; if (!inet_sk(sk)->inet_num) port_offset = inet6_sk_port_offset(sk); hash_port0 = inet6_ehashfn(net, daddr, 0, saddr, inet->inet_dport); return __inet_hash_connect(death_row, sk, port_offset, hash_port0, __inet6_check_established); } EXPORT_SYMBOL_GPL(inet6_hash_connect); int inet6_hash(struct sock *sk) { int err = 0; if (sk->sk_state != TCP_CLOSE) err = __inet_hash(sk, NULL); return err; } EXPORT_SYMBOL_GPL(inet6_hash);
5 2 3 1 2 2 2 1 1 5 5 2 1 2 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 // SPDX-License-Identifier: GPL-2.0-or-later /* * SQ905 subdriver * * Copyright (C) 2008, 2009 Adam Baker and Theodore Kilgore */ /* * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by * Marcell Lengyel and further developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for * that driver and for libgphoto2 but shares no code with them. * * This driver has used as a base the finepix driver and other gspca * based drivers and may still contain code fragments taken from those * drivers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq905" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define SQ905_CMD_TIMEOUT 500 #define SQ905_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define SQ905_MAX_TRANSFER 0x8000 #define FRAME_HEADER_LEN 64 /* The known modes, or registers. These go in the "value" slot. */ /* 00 is "none" obviously */ #define SQ905_BULK_READ 0x03 /* precedes any bulk read */ #define SQ905_COMMAND 0x06 /* precedes the command codes below */ #define SQ905_PING 0x07 /* when reading an "idling" command */ #define SQ905_READ_DONE 0xc0 /* ack bulk read completed */ /* Any non-zero value in the bottom 2 bits of the 2nd byte of * the ID appears to indicate the camera can do 640*480. If the * LSB of that byte is set the image is just upside down, otherwise * it is rotated 180 degrees. */ #define SQ905_HIRES_MASK 0x00000300 #define SQ905_ORIENTATION_MASK 0x00000100 /* Some command codes. These go in the "index" slot. */ #define SQ905_ID 0xf0 /* asks for model string */ #define SQ905_CONFIG 0x20 /* gets photo alloc. table, not used here */ #define SQ905_DATA 0x30 /* accesses photo data, not used here */ #define SQ905_CLEAR 0xa0 /* clear everything */ #define SQ905_CAPTURE_LOW 0x60 /* Starts capture at 160x120 */ #define SQ905_CAPTURE_MED 0x61 /* Starts capture at 320x240 */ #define SQ905_CAPTURE_HIGH 0x62 /* Starts capture at 640x480 (some cams only) */ /* note that the capture command also controls the output dimensions */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ /* * Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; }; static struct v4l2_pix_format sq905_mode[] = { { 160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, { 640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0} }; /* * Send a command to the camera. */ static int sq905_command(struct gspca_dev *gspca_dev, u16 index) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_COMMAND, index, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_PING, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed 2 (%d)\n", __func__, ret); return ret; } return 0; } /* * Acknowledge the end of a frame - see warning on sq905_command. */ static int sq905_ack_frame(struct gspca_dev *gspca_dev) { int ret; gspca_dev->usb_buf[0] = '\0'; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_READ_DONE, 0, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } return 0; } /* * request and read a block of data - see warning on sq905_command. */ static int sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock) { int ret; int act_len = 0; gspca_dev->usb_buf[0] = '\0'; if (need_lock) mutex_lock(&gspca_dev->usb_lock); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), USB_REQ_SYNCH_FRAME, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, SQ905_BULK_READ, size, gspca_dev->usb_buf, 1, SQ905_CMD_TIMEOUT); if (need_lock) mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) { pr_err("%s: usb_control_msg failed (%d)\n", __func__, ret); return ret; } ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x81), data, size, &act_len, SQ905_DATA_TIMEOUT); /* successful, it returns 0, otherwise negative */ if (ret < 0 || act_len != size) { pr_err("bulk read fail (%d) len %d/%d\n", ret, act_len, size); return -EIO; } return 0; } /* * This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use gspca_dev->usb_buf we take the usb_lock when * performing USB operations using it. In practice we don't really need this * as the camera doesn't provide any controls. */ static void sq905_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int bytes_left; /* bytes remaining in current frame. */ int data_len; /* size to use for the next read. */ int header_read; /* true if we have already read the frame header. */ int packet_type; int frame_sz; int ret; u8 *data; u8 *buffer; buffer = kmalloc(SQ905_MAX_TRANSFER, GFP_KERNEL); if (!buffer) { pr_err("Couldn't allocate USB buffer\n"); goto quit_stream; } frame_sz = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].sizeimage + FRAME_HEADER_LEN; while (gspca_dev->present && gspca_dev->streaming) { #ifdef CONFIG_PM if (gspca_dev->frozen) break; #endif /* request some data and then read it until we have * a complete frame. */ bytes_left = frame_sz; header_read = 0; /* Note we do not check for gspca_dev->streaming here, as we must finish reading an entire frame, otherwise the next time we stream we start reading in the middle of a frame. */ while (bytes_left > 0 && gspca_dev->present) { data_len = bytes_left > SQ905_MAX_TRANSFER ? SQ905_MAX_TRANSFER : bytes_left; ret = sq905_read_data(gspca_dev, buffer, data_len, 1); if (ret < 0) goto quit_stream; gspca_dbg(gspca_dev, D_PACK, "Got %d bytes out of %d for frame\n", data_len, bytes_left); bytes_left -= data_len; data = buffer; if (!header_read) { packet_type = FIRST_PACKET; /* The first 64 bytes of each frame are * a header full of FF 00 bytes */ data += FRAME_HEADER_LEN; data_len -= FRAME_HEADER_LEN; header_read = 1; } else if (bytes_left == 0) { packet_type = LAST_PACKET; } else { packet_type = INTER_PACKET; } gspca_frame_add(gspca_dev, packet_type, data, data_len); /* If entire frame fits in one packet we still need to add a LAST_PACKET */ if (packet_type == FIRST_PACKET && bytes_left == 0) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } if (gspca_dev->present) { /* acknowledge the frame */ mutex_lock(&gspca_dev->usb_lock); ret = sq905_ack_frame(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); if (ret < 0) goto quit_stream; } } quit_stream: if (gspca_dev->present) { mutex_lock(&gspca_dev->usb_lock); sq905_command(gspca_dev, SQ905_CLEAR); mutex_unlock(&gspca_dev->usb_lock); } kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk = 1; cam->bulk_size = 64; INIT_WORK(&dev->work_struct, sq905_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for sq905_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { u32 ident; int ret; /* connect to the camera and read * the model ID and process that and put it away. */ ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; ret = sq905_command(gspca_dev, SQ905_ID); if (ret < 0) return ret; ret = sq905_read_data(gspca_dev, gspca_dev->usb_buf, 4, 0); if (ret < 0) return ret; /* usb_buf is allocated with kmalloc so is aligned. * Camera model number is the right way round if we assume this * reverse engineered ID is supposed to be big endian. */ ident = be32_to_cpup((__be32 *)gspca_dev->usb_buf); ret = sq905_command(gspca_dev, SQ905_CLEAR); if (ret < 0) return ret; gspca_dbg(gspca_dev, D_CONF, "SQ905 camera ID %08x detected\n", ident); gspca_dev->cam.cam_mode = sq905_mode; gspca_dev->cam.nmodes = ARRAY_SIZE(sq905_mode); if (!(ident & SQ905_HIRES_MASK)) gspca_dev->cam.nmodes--; if (ident & SQ905_ORIENTATION_MASK) gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP; else gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* "Open the shutter" and set size, to start capture */ switch (gspca_dev->curr_mode) { default: /* case 2: */ gspca_dbg(gspca_dev, D_STREAM, "Start streaming at high resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_HIGH); break; case 1: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at medium resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_MED); break; case 0: gspca_dbg(gspca_dev, D_STREAM, "Start streaming at low resolution\n"); ret = sq905_command(&dev->gspca_dev, SQ905_CAPTURE_LOW); } if (ret < 0) { gspca_err(gspca_dev, "Start streaming command failed\n"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); if (!dev->work_thread) return -ENOMEM; queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x2770, 0x9120)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
1 270 342 347 2 4 1 120 229 343 1 269 270 270 11 2 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <asm/ucontext.h> #include <asm/fpu/signal.h> #include <asm/sighandling.h> #include <asm/syscall.h> #include <asm/sigframe.h> #include <asm/signal.h> /* * If regs->ss will cause an IRET fault, change it. Otherwise leave it * alone. Using this generally makes no sense unless * user_64bit_mode(regs) would return true. */ static void force_valid_ss(struct pt_regs *regs) { u32 ar; asm volatile ("lar %[old_ss], %[ar]\n\t" "jz 1f\n\t" /* If invalid: */ "xorl %[ar], %[ar]\n\t" /* set ar = 0 */ "1:" : [ar] "=r" (ar) : [old_ss] "rm" ((u16)regs->ss)); /* * For a valid 64-bit user context, we need DPL 3, type * read-write data or read-write exp-down data, and S and P * set. We can't use VERW because VERW doesn't check the * P bit. */ ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK; if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) && ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN)) regs->ss = __USER_DS; } static bool restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, unsigned long uc_flags) { struct sigcontext sc; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; if (copy_from_user(&sc, usc, offsetof(struct sigcontext, reserved1))) return false; regs->bx = sc.bx; regs->cx = sc.cx; regs->dx = sc.dx; regs->si = sc.si; regs->di = sc.di; regs->bp = sc.bp; regs->ax = sc.ax; regs->sp = sc.sp; regs->ip = sc.ip; regs->r8 = sc.r8; regs->r9 = sc.r9; regs->r10 = sc.r10; regs->r11 = sc.r11; regs->r12 = sc.r12; regs->r13 = sc.r13; regs->r14 = sc.r14; regs->r15 = sc.r15; /* Get CS/SS and force CPL3 */ regs->cs = sc.cs | 0x03; regs->ss = sc.ss | 0x03; regs->flags = (regs->flags & ~FIX_EFLAGS) | (sc.flags & FIX_EFLAGS); /* disable syscall checks */ regs->orig_ax = -1; /* * Fix up SS if needed for the benefit of old DOSEMU and * CRIU. */ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) force_valid_ss(regs); return fpu__restore_sig((void __user *)sc.fpstate, 0); } static __always_inline int __unsafe_setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask) { unsafe_put_user(regs->di, &sc->di, Efault); unsafe_put_user(regs->si, &sc->si, Efault); unsafe_put_user(regs->bp, &sc->bp, Efault); unsafe_put_user(regs->sp, &sc->sp, Efault); unsafe_put_user(regs->bx, &sc->bx, Efault); unsafe_put_user(regs->dx, &sc->dx, Efault); unsafe_put_user(regs->cx, &sc->cx, Efault); unsafe_put_user(regs->ax, &sc->ax, Efault); unsafe_put_user(regs->r8, &sc->r8, Efault); unsafe_put_user(regs->r9, &sc->r9, Efault); unsafe_put_user(regs->r10, &sc->r10, Efault); unsafe_put_user(regs->r11, &sc->r11, Efault); unsafe_put_user(regs->r12, &sc->r12, Efault); unsafe_put_user(regs->r13, &sc->r13, Efault); unsafe_put_user(regs->r14, &sc->r14, Efault); unsafe_put_user(regs->r15, &sc->r15, Efault); unsafe_put_user(current->thread.trap_nr, &sc->trapno, Efault); unsafe_put_user(current->thread.error_code, &sc->err, Efault); unsafe_put_user(regs->ip, &sc->ip, Efault); unsafe_put_user(regs->flags, &sc->flags, Efault); unsafe_put_user(regs->cs, &sc->cs, Efault); unsafe_put_user(0, &sc->gs, Efault); unsafe_put_user(0, &sc->fs, Efault); unsafe_put_user(regs->ss, &sc->ss, Efault); unsafe_put_user(fpstate, (unsigned long __user *)&sc->fpstate, Efault); /* non-iBCS2 extensions.. */ unsafe_put_user(mask, &sc->oldmask, Efault); unsafe_put_user(current->thread.cr2, &sc->cr2, Efault); return 0; Efault: return -EFAULT; } #define unsafe_put_sigcontext(sc, fp, regs, set, label) \ do { \ if (__unsafe_setup_sigcontext(sc, fp, regs, set->sig[0])) \ goto label; \ } while(0); #define unsafe_put_sigmask(set, frame, label) \ unsafe_put_user(*(__u64 *)(set), \ (__u64 __user *)&(frame)->uc.uc_sigmask, \ label) static unsigned long frame_uc_flags(struct pt_regs *regs) { unsigned long flags; if (boot_cpu_has(X86_FEATURE_XSAVE)) flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS; else flags = UC_SIGCONTEXT_SS; if (likely(user_64bit_mode(regs))) flags |= UC_STRICT_RESTORE_SS; return flags; } int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *set = sigmask_to_save(); struct rt_sigframe __user *frame; void __user *fp = NULL; unsigned long uc_flags; /* x86-64 should always use SA_RESTORER. */ if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) return -EFAULT; frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp); uc_flags = frame_uc_flags(regs); if (!user_access_begin(frame, sizeof(*frame))) return -EFAULT; /* Create the ucontext. */ unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); unsafe_put_user(0, &frame->uc.uc_link, Efault); unsafe_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); /* Set up to return from userspace. If provided, use a stub already in userspace. */ unsafe_put_user(ksig->ka.sa.sa_restorer, &frame->pretcode, Efault); unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); unsafe_put_sigmask(set, frame, Efault); user_access_end(); if (ksig->ka.sa.sa_flags & SA_SIGINFO) { if (copy_siginfo_to_user(&frame->info, &ksig->info)) return -EFAULT; } if (setup_signal_shadow_stack(ksig)) return -EFAULT; /* Set up registers for signal handler */ regs->di = ksig->sig; /* In case the signal handler was declared without prototypes */ regs->ax = 0; /* This also works for non SA_SIGINFO handlers because they expect the next argument after the signal number on the stack. */ regs->si = (unsigned long)&frame->info; regs->dx = (unsigned long)&frame->uc; regs->ip = (unsigned long) ksig->ka.sa.sa_handler; regs->sp = (unsigned long)frame; /* * Set up the CS and SS registers to run signal handlers in * 64-bit mode, even if the handler happens to be interrupting * 32-bit or 16-bit code. * * SS is subtle. In 64-bit mode, we don't need any particular * SS descriptor, but we do need SS to be valid. It's possible * that the old SS is entirely bogus -- this can happen if the * signal we're trying to deliver is #GP or #SS caused by a bad * SS value. We also have a compatibility issue here: DOSEMU * relies on the contents of the SS register indicating the * SS value at the time of the signal, even though that code in * DOSEMU predates sigreturn's ability to restore SS. (DOSEMU * avoids relying on sigreturn to restore SS; instead it uses * a trampoline.) So we do our best: if the old SS was valid, * we keep it. Otherwise we replace it. */ regs->cs = __USER_CS; if (unlikely(regs->ss != __USER_DS)) force_valid_ss(regs); return 0; Efault: user_access_end(); return -EFAULT; } /* * Do a signal return; undo the signal stack. */ SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; sigset_t set; unsigned long uc_flags; prevent_single_step_upon_eretu(regs); frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long)); if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(*(__u64 *)&set, (__u64 __user *)&frame->uc.uc_sigmask)) goto badframe; if (__get_user(uc_flags, &frame->uc.uc_flags)) goto badframe; set_current_blocked(&set); if (restore_altstack(&frame->uc.uc_stack)) goto badframe; if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) goto badframe; if (restore_signal_shadow_stack()) goto badframe; return regs->ax; badframe: signal_fault(regs, frame, "rt_sigreturn"); return 0; } #ifdef CONFIG_X86_X32_ABI static int x32_copy_siginfo_to_user(struct compat_siginfo __user *to, const struct kernel_siginfo *from) { struct compat_siginfo new; copy_siginfo_to_external32(&new, from); if (from->si_signo == SIGCHLD) { new._sifields._sigchld_x32._utime = from->si_utime; new._sifields._sigchld_x32._stime = from->si_stime; } if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) return -EFAULT; return 0; } int copy_siginfo_to_user32(struct compat_siginfo __user *to, const struct kernel_siginfo *from) { if (in_x32_syscall()) return x32_copy_siginfo_to_user(to, from); return __copy_siginfo_to_user32(to, from); } int x32_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { compat_sigset_t *set = (compat_sigset_t *) sigmask_to_save(); struct rt_sigframe_x32 __user *frame; unsigned long uc_flags; void __user *restorer; void __user *fp = NULL; if (!(ksig->ka.sa.sa_flags & SA_RESTORER)) return -EFAULT; frame = get_sigframe(ksig, regs, sizeof(*frame), &fp); uc_flags = frame_uc_flags(regs); if (setup_signal_shadow_stack(ksig)) return -EFAULT; if (!user_access_begin(frame, sizeof(*frame))) return -EFAULT; /* Create the ucontext. */ unsafe_put_user(uc_flags, &frame->uc.uc_flags, Efault); unsafe_put_user(0, &frame->uc.uc_link, Efault); unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->sp, Efault); unsafe_put_user(0, &frame->uc.uc__pad0, Efault); restorer = ksig->ka.sa.sa_restorer; unsafe_put_user(restorer, (unsigned long __user *)&frame->pretcode, Efault); unsafe_put_sigcontext(&frame->uc.uc_mcontext, fp, regs, set, Efault); unsafe_put_sigmask(set, frame, Efault); user_access_end(); if (ksig->ka.sa.sa_flags & SA_SIGINFO) { if (x32_copy_siginfo_to_user(&frame->info, &ksig->info)) return -EFAULT; } /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ksig->ka.sa.sa_handler; /* We use the x32 calling convention here... */ regs->di = ksig->sig; regs->si = (unsigned long) &frame->info; regs->dx = (unsigned long) &frame->uc; loadsegment(ds, __USER_DS); loadsegment(es, __USER_DS); regs->cs = __USER_CS; regs->ss = __USER_DS; return 0; Efault: user_access_end(); return -EFAULT; } COMPAT_SYSCALL_DEFINE0(x32_rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe_x32 __user *frame; sigset_t set; unsigned long uc_flags; prevent_single_step_upon_eretu(regs); frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8); if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], (__u64 __user *)&frame->uc.uc_sigmask)) goto badframe; if (__get_user(uc_flags, &frame->uc.uc_flags)) goto badframe; set_current_blocked(&set); if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) goto badframe; if (restore_signal_shadow_stack()) goto badframe; if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->ax; badframe: signal_fault(regs, frame, "x32 rt_sigreturn"); return 0; } #endif /* CONFIG_X86_X32_ABI */ #ifdef CONFIG_COMPAT void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact) { if (!act) return; if (in_ia32_syscall()) act->sa.sa_flags |= SA_IA32_ABI; if (in_x32_syscall()) act->sa.sa_flags |= SA_X32_ABI; } #endif /* CONFIG_COMPAT */ /* * If adding a new si_code, there is probably new data in * the siginfo. Make sure folks bumping the si_code * limits also have to look at this code. Make sure any * new fields are handled in copy_siginfo_to_user32()! */ static_assert(NSIGILL == 11); static_assert(NSIGFPE == 15); static_assert(NSIGSEGV == 10); static_assert(NSIGBUS == 5); static_assert(NSIGTRAP == 6); static_assert(NSIGCHLD == 6); static_assert(NSIGSYS == 2); /* This is part of the ABI and can never change in size: */ static_assert(sizeof(siginfo_t) == 128); /* This is a part of the ABI and can never change in alignment */ static_assert(__alignof__(siginfo_t) == 8); /* * The offsets of all the (unioned) si_fields are fixed * in the ABI, of course. Make sure none of them ever * move and are always at the beginning: */ static_assert(offsetof(siginfo_t, si_signo) == 0); static_assert(offsetof(siginfo_t, si_errno) == 4); static_assert(offsetof(siginfo_t, si_code) == 8); /* * Ensure that the size of each si_field never changes. * If it does, it is a sign that the * copy_siginfo_to_user32() code below needs to updated * along with the size in the CHECK_SI_SIZE(). * * We repeat this check for both the generic and compat * siginfos. * * Note: it is OK for these to grow as long as the whole * structure stays within the padding size (checked * above). */ #define CHECK_SI_OFFSET(name) \ static_assert(offsetof(siginfo_t, _sifields) == \ offsetof(siginfo_t, _sifields.name)) #define CHECK_SI_SIZE(name, size) \ static_assert(sizeof_field(siginfo_t, _sifields.name) == size) CHECK_SI_OFFSET(_kill); CHECK_SI_SIZE (_kill, 2*sizeof(int)); static_assert(offsetof(siginfo_t, si_pid) == 0x10); static_assert(offsetof(siginfo_t, si_uid) == 0x14); CHECK_SI_OFFSET(_timer); CHECK_SI_SIZE (_timer, 6*sizeof(int)); static_assert(offsetof(siginfo_t, si_tid) == 0x10); static_assert(offsetof(siginfo_t, si_overrun) == 0x14); static_assert(offsetof(siginfo_t, si_value) == 0x18); CHECK_SI_OFFSET(_rt); CHECK_SI_SIZE (_rt, 4*sizeof(int)); static_assert(offsetof(siginfo_t, si_pid) == 0x10); static_assert(offsetof(siginfo_t, si_uid) == 0x14); static_assert(offsetof(siginfo_t, si_value) == 0x18); CHECK_SI_OFFSET(_sigchld); CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); static_assert(offsetof(siginfo_t, si_pid) == 0x10); static_assert(offsetof(siginfo_t, si_uid) == 0x14); static_assert(offsetof(siginfo_t, si_status) == 0x18); static_assert(offsetof(siginfo_t, si_utime) == 0x20); static_assert(offsetof(siginfo_t, si_stime) == 0x28); #ifdef CONFIG_X86_X32_ABI /* no _sigchld_x32 in the generic siginfo_t */ static_assert(sizeof_field(compat_siginfo_t, _sifields._sigchld_x32) == 7*sizeof(int)); static_assert(offsetof(compat_siginfo_t, _sifields) == offsetof(compat_siginfo_t, _sifields._sigchld_x32)); static_assert(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) == 0x18); static_assert(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) == 0x20); #endif CHECK_SI_OFFSET(_sigfault); CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); static_assert(offsetof(siginfo_t, si_addr) == 0x10); static_assert(offsetof(siginfo_t, si_trapno) == 0x18); static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); static_assert(offsetof(siginfo_t, si_lower) == 0x20); static_assert(offsetof(siginfo_t, si_upper) == 0x28); static_assert(offsetof(siginfo_t, si_pkey) == 0x20); static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); CHECK_SI_OFFSET(_sigpoll); CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); static_assert(offsetof(siginfo_t, si_band) == 0x10); static_assert(offsetof(siginfo_t, si_fd) == 0x18); CHECK_SI_OFFSET(_sigsys); CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); static_assert(offsetof(siginfo_t, si_syscall) == 0x18); static_assert(offsetof(siginfo_t, si_arch) == 0x1C); /* any new si_fields should be added here */
1 1 1 2 2 2 2 1 1 1 1 6 6 1 5 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2019 Mellanox Technologies. All rights reserved. */ #include <rdma/ib_verbs.h> #include <rdma/rdma_counter.h> #include "core_priv.h" #include "restrack.h" #define ALL_AUTO_MODE_MASKS (RDMA_COUNTER_MASK_QP_TYPE | RDMA_COUNTER_MASK_PID) static int __counter_set_mode(struct rdma_port_counter *port_counter, enum rdma_nl_counter_mode new_mode, enum rdma_nl_counter_mask new_mask, bool bind_opcnt) { if (new_mode == RDMA_COUNTER_MODE_AUTO) { if (new_mask & (~ALL_AUTO_MODE_MASKS)) return -EINVAL; if (port_counter->num_counters) return -EBUSY; } port_counter->mode.mode = new_mode; port_counter->mode.mask = new_mask; port_counter->mode.bind_opcnt = bind_opcnt; return 0; } /* * rdma_counter_set_auto_mode() - Turn on/off per-port auto mode * * @dev: Device to operate * @port: Port to use * @mask: Mask to configure * @extack: Message to the user * * Return 0 on success. If counter mode wasn't changed then it is considered * as success as well. * Return -EBUSY when changing to auto mode while there are bounded counters. * */ int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mask mask, bool bind_opcnt, struct netlink_ext_ack *extack) { struct rdma_port_counter *port_counter; enum rdma_nl_counter_mode mode; int ret; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return -EOPNOTSUPP; mutex_lock(&port_counter->lock); if (mask) mode = RDMA_COUNTER_MODE_AUTO; else mode = (port_counter->num_counters) ? RDMA_COUNTER_MODE_MANUAL : RDMA_COUNTER_MODE_NONE; if (port_counter->mode.mode == mode && port_counter->mode.mask == mask && port_counter->mode.bind_opcnt == bind_opcnt) { ret = 0; goto out; } ret = __counter_set_mode(port_counter, mode, mask, bind_opcnt); out: mutex_unlock(&port_counter->lock); if (ret == -EBUSY) NL_SET_ERR_MSG( extack, "Modifying auto mode is not allowed when there is a bound QP"); return ret; } static void auto_mode_init_counter(struct rdma_counter *counter, const struct ib_qp *qp, enum rdma_nl_counter_mask new_mask) { struct auto_mode_param *param = &counter->mode.param; counter->mode.mode = RDMA_COUNTER_MODE_AUTO; counter->mode.mask = new_mask; if (new_mask & RDMA_COUNTER_MASK_QP_TYPE) param->qp_type = qp->qp_type; } static int __rdma_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp, u32 port) { int ret; if (qp->counter) return -EINVAL; if (!qp->device->ops.counter_bind_qp) return -EOPNOTSUPP; mutex_lock(&counter->lock); ret = qp->device->ops.counter_bind_qp(counter, qp, port); mutex_unlock(&counter->lock); return ret; } int rdma_counter_modify(struct ib_device *dev, u32 port, unsigned int index, bool enable) { struct rdma_hw_stats *stats; int ret = 0; if (!dev->ops.modify_hw_stat) return -EOPNOTSUPP; stats = ib_get_hw_stats_port(dev, port); if (!stats || index >= stats->num_counters || !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) return -EINVAL; mutex_lock(&stats->lock); if (enable != test_bit(index, stats->is_disabled)) goto out; ret = dev->ops.modify_hw_stat(dev, port, index, enable); if (ret) goto out; if (enable) clear_bit(index, stats->is_disabled); else set_bit(index, stats->is_disabled); out: mutex_unlock(&stats->lock); return ret; } static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, struct ib_qp *qp, enum rdma_nl_counter_mode mode, bool bind_opcnt) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; int ret; if (!dev->ops.counter_dealloc || !dev->ops.counter_alloc_stats) return NULL; counter = rdma_zalloc_drv_obj(dev, rdma_counter); if (!counter) return NULL; counter->device = dev; counter->port = port; dev->ops.counter_init(counter); rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER); counter->stats = dev->ops.counter_alloc_stats(counter); if (!counter->stats) goto err_stats; port_counter = &dev->port_data[port].port_counter; mutex_lock(&port_counter->lock); switch (mode) { case RDMA_COUNTER_MODE_MANUAL: ret = __counter_set_mode(port_counter, RDMA_COUNTER_MODE_MANUAL, 0, bind_opcnt); if (ret) { mutex_unlock(&port_counter->lock); goto err_mode; } break; case RDMA_COUNTER_MODE_AUTO: auto_mode_init_counter(counter, qp, port_counter->mode.mask); break; default: ret = -EOPNOTSUPP; mutex_unlock(&port_counter->lock); goto err_mode; } port_counter->num_counters++; mutex_unlock(&port_counter->lock); counter->mode.mode = mode; counter->mode.bind_opcnt = bind_opcnt; kref_init(&counter->kref); mutex_init(&counter->lock); ret = __rdma_counter_bind_qp(counter, qp, port); if (ret) goto err_mode; rdma_restrack_parent_name(&counter->res, &qp->res); rdma_restrack_add(&counter->res); return counter; err_mode: rdma_free_hw_stats_struct(counter->stats); err_stats: rdma_restrack_put(&counter->res); kfree(counter); return NULL; } static void rdma_counter_free(struct rdma_counter *counter) { struct rdma_port_counter *port_counter; port_counter = &counter->device->port_data[counter->port].port_counter; mutex_lock(&port_counter->lock); port_counter->num_counters--; if (!port_counter->num_counters && (port_counter->mode.mode == RDMA_COUNTER_MODE_MANUAL)) __counter_set_mode(port_counter, RDMA_COUNTER_MODE_NONE, 0, false); mutex_unlock(&port_counter->lock); rdma_restrack_del(&counter->res); rdma_free_hw_stats_struct(counter->stats); kfree(counter); } static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter, enum rdma_nl_counter_mask auto_mask) { struct auto_mode_param *param = &counter->mode.param; bool match = true; if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) match &= (param->qp_type == qp->qp_type); if (auto_mask & RDMA_COUNTER_MASK_PID) match &= (task_pid_nr(counter->res.task) == task_pid_nr(qp->res.task)); return match; } static int __rdma_counter_unbind_qp(struct ib_qp *qp, u32 port) { struct rdma_counter *counter = qp->counter; int ret; if (!qp->device->ops.counter_unbind_qp) return -EOPNOTSUPP; mutex_lock(&counter->lock); ret = qp->device->ops.counter_unbind_qp(qp, port); mutex_unlock(&counter->lock); return ret; } static void counter_history_stat_update(struct rdma_counter *counter) { struct ib_device *dev = counter->device; struct rdma_port_counter *port_counter; int i; port_counter = &dev->port_data[counter->port].port_counter; if (!port_counter->hstats) return; rdma_counter_query_stats(counter); for (i = 0; i < counter->stats->num_counters; i++) port_counter->hstats->value[i] += counter->stats->value[i]; } /* * rdma_get_counter_auto_mode - Find the counter that @qp should be bound * with in auto mode * * Return: The counter (with ref-count increased) if found */ static struct rdma_counter *rdma_get_counter_auto_mode(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct rdma_counter *counter = NULL; struct ib_device *dev = qp->device; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; unsigned long id = 0; port_counter = &dev->port_data[port].port_counter; rt = &dev->res[RDMA_RESTRACK_COUNTER]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { counter = container_of(res, struct rdma_counter, res); if ((counter->device != qp->device) || (counter->port != port)) goto next; if (auto_mode_match(qp, counter, port_counter->mode.mask)) break; next: counter = NULL; } if (counter && !kref_get_unless_zero(&counter->kref)) counter = NULL; xa_unlock(&rt->xa); return counter; } static void counter_release(struct kref *kref) { struct rdma_counter *counter; counter = container_of(kref, struct rdma_counter, kref); counter_history_stat_update(counter); counter->device->ops.counter_dealloc(counter); rdma_counter_free(counter); } /* * rdma_counter_bind_qp_auto - Check and bind the QP to a counter base on * the auto-mode rule */ int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port) { struct rdma_port_counter *port_counter; struct ib_device *dev = qp->device; struct rdma_counter *counter; int ret; if (!rdma_restrack_is_tracked(&qp->res) || rdma_is_kernel_res(&qp->res)) return 0; if (!rdma_is_port_valid(dev, port)) return -EINVAL; port_counter = &dev->port_data[port].port_counter; if (port_counter->mode.mode != RDMA_COUNTER_MODE_AUTO) return 0; counter = rdma_get_counter_auto_mode(qp, port); if (counter) { ret = __rdma_counter_bind_qp(counter, qp, port); if (ret) { kref_put(&counter->kref, counter_release); return ret; } } else { counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_AUTO, port_counter->mode.bind_opcnt); if (!counter) return -ENOMEM; } return 0; } /* * rdma_counter_unbind_qp - Unbind a qp from a counter * @force: * true - Decrease the counter ref-count anyway (e.g., qp destroy) */ int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force) { struct rdma_counter *counter = qp->counter; int ret; if (!counter) return -EINVAL; ret = __rdma_counter_unbind_qp(qp, port); if (ret && !force) return ret; kref_put(&counter->kref, counter_release); return 0; } int rdma_counter_query_stats(struct rdma_counter *counter) { struct ib_device *dev = counter->device; int ret; if (!dev->ops.counter_update_stats) return -EINVAL; mutex_lock(&counter->lock); ret = dev->ops.counter_update_stats(counter); mutex_unlock(&counter->lock); return ret; } static u64 get_running_counters_hwstat_sum(struct ib_device *dev, u32 port, u32 index) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct rdma_counter *counter; unsigned long id = 0; u64 sum = 0; rt = &dev->res[RDMA_RESTRACK_COUNTER]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { if (!rdma_restrack_get(res)) continue; xa_unlock(&rt->xa); counter = container_of(res, struct rdma_counter, res); if ((counter->device != dev) || (counter->port != port) || rdma_counter_query_stats(counter)) goto next; sum += counter->stats->value[index]; next: xa_lock(&rt->xa); rdma_restrack_put(res); } xa_unlock(&rt->xa); return sum; } /* * rdma_counter_get_hwstat_value() - Get the sum value of all counters on a * specific port, including the running ones and history data */ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index) { struct rdma_port_counter *port_counter; u64 sum; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return 0; sum = get_running_counters_hwstat_sum(dev, port, index); sum += port_counter->hstats->value[index]; return sum; } static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num) { struct rdma_restrack_entry *res = NULL; struct ib_qp *qp = NULL; res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_QP, qp_num); if (IS_ERR(res)) return NULL; qp = container_of(res, struct ib_qp, res); if (qp->qp_type == IB_QPT_RAW_PACKET && !rdma_dev_has_raw_cap(dev)) goto err; return qp; err: rdma_restrack_put(res); return NULL; } static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, u32 counter_id) { struct rdma_restrack_entry *res; struct rdma_counter *counter; res = rdma_restrack_get_byid(dev, RDMA_RESTRACK_COUNTER, counter_id); if (IS_ERR(res)) return NULL; counter = container_of(res, struct rdma_counter, res); kref_get(&counter->kref); rdma_restrack_put(res); return counter; } /* * rdma_counter_bind_qpn() - Bind QP @qp_num to counter @counter_id */ int rdma_counter_bind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; port_counter = &dev->port_data[port].port_counter; if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; counter = rdma_get_counter_by_id(dev, counter_id); if (!counter) { ret = -ENOENT; goto err; } if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) { ret = -EINVAL; goto err_task; } if ((counter->device != qp->device) || (counter->port != qp->port)) { ret = -EINVAL; goto err_task; } ret = __rdma_counter_bind_qp(counter, qp, port); if (ret) goto err_task; rdma_restrack_put(&qp->res); return 0; err_task: kref_put(&counter->kref, counter_release); err: rdma_restrack_put(&qp->res); return ret; } /* * rdma_counter_bind_qpn_alloc() - Alloc a counter and bind QP @qp_num to it * The id of new counter is returned in @counter_id */ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u32 port, u32 qp_num, u32 *counter_id) { struct rdma_port_counter *port_counter; struct rdma_counter *counter; struct ib_qp *qp; int ret; if (!rdma_is_port_valid(dev, port)) return -EINVAL; port_counter = &dev->port_data[port].port_counter; if (!port_counter->hstats) return -EOPNOTSUPP; if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { ret = -EINVAL; goto err; } counter = alloc_and_bind(dev, port, qp, RDMA_COUNTER_MODE_MANUAL, true); if (!counter) { ret = -ENOMEM; goto err; } if (counter_id) *counter_id = counter->id; rdma_restrack_put(&qp->res); return 0; err: rdma_restrack_put(&qp->res); return ret; } /* * rdma_counter_unbind_qpn() - Unbind QP @qp_num from a counter */ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port, u32 qp_num, u32 counter_id) { struct rdma_port_counter *port_counter; struct ib_qp *qp; int ret; if (!rdma_is_port_valid(dev, port)) return -EINVAL; qp = rdma_counter_get_qp(dev, qp_num); if (!qp) return -ENOENT; if (rdma_is_port_valid(dev, qp->port) && (qp->port != port)) { ret = -EINVAL; goto out; } port_counter = &dev->port_data[port].port_counter; if (!qp->counter || qp->counter->id != counter_id || port_counter->mode.mode != RDMA_COUNTER_MODE_MANUAL) { ret = -EINVAL; goto out; } ret = rdma_counter_unbind_qp(qp, port, false); out: rdma_restrack_put(&qp->res); return ret; } int rdma_counter_get_mode(struct ib_device *dev, u32 port, enum rdma_nl_counter_mode *mode, enum rdma_nl_counter_mask *mask, bool *opcnt) { struct rdma_port_counter *port_counter; port_counter = &dev->port_data[port].port_counter; *mode = port_counter->mode.mode; *mask = port_counter->mode.mask; *opcnt = port_counter->mode.bind_opcnt; return 0; } void rdma_counter_init(struct ib_device *dev) { struct rdma_port_counter *port_counter; u32 port, i; if (!dev->port_data) return; rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; mutex_init(&port_counter->lock); if (!dev->ops.alloc_hw_port_stats) continue; port_counter->hstats = dev->ops.alloc_hw_port_stats(dev, port); if (!port_counter->hstats) goto fail; } return; fail: for (i = port; i >= rdma_start_port(dev); i--) { port_counter = &dev->port_data[port].port_counter; rdma_free_hw_stats_struct(port_counter->hstats); port_counter->hstats = NULL; mutex_destroy(&port_counter->lock); } } void rdma_counter_release(struct ib_device *dev) { struct rdma_port_counter *port_counter; u32 port; rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; rdma_free_hw_stats_struct(port_counter->hstats); mutex_destroy(&port_counter->lock); } }
3 1 6 6 6 4 6 3 3 3 6 3 6 2 1 1 1 1 1 1 1 1 2 1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 // SPDX-License-Identifier: GPL-2.0-only /* * TCP Vegas congestion control * * This is based on the congestion detection/avoidance scheme described in * Lawrence S. Brakmo and Larry L. Peterson. * "TCP Vegas: End to end congestion avoidance on a global internet." * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, * October 1995. Available from: * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps * * See http://www.cs.arizona.edu/xkernel/ for their implementation. * The main aspects that distinguish this implementation from the * Arizona Vegas implementation are: * o We do not change the loss detection or recovery mechanisms of * Linux in any way. Linux already recovers from losses quite well, * using fine-grained timers, NewReno, and FACK. * o To avoid the performance penalty imposed by increasing cwnd * only every-other RTT during slow start, we increase during * every RTT during slow start, just like Reno. * o Largely to allow continuous cwnd growth during slow start, * we use the rate at which ACKs come back as the "actual" * rate, rather than the rate at which data is sent. * o To speed convergence to the right rate, we set the cwnd * to achieve the right ("actual") rate when we exit slow start. * o To filter out the noise caused by delayed ACKs, we use the * minimum RTT sample observed during the last RTT to calculate * the actual rate. * o When the sender re-starts from idle, it waits until it has * received ACKs for an entire flight of new data before making * a cwnd adjustment decision. The original Vegas implementation * assumed senders never went idle. */ #include <linux/mm.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/inet_diag.h> #include <net/tcp.h> #include "tcp_vegas.h" static int alpha = 2; static int beta = 4; static int gamma = 1; module_param(alpha, int, 0644); MODULE_PARM_DESC(alpha, "lower bound of packets in network"); module_param(beta, int, 0644); MODULE_PARM_DESC(beta, "upper bound of packets in network"); module_param(gamma, int, 0644); MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); /* There are several situations when we must "re-start" Vegas: * * o when a connection is established * o after an RTO * o after fast recovery * o when we send a packet and there is no outstanding * unacknowledged data (restarting an idle connection) * * In these circumstances we cannot do a Vegas calculation at the * end of the first RTT, because any calculation we do is using * stale info -- both the saved cwnd and congestion feedback are * stale. * * Instead we must wait until the completion of an RTT during * which we actually receive ACKs. */ static void vegas_enable(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct vegas *vegas = inet_csk_ca(sk); /* Begin taking Vegas samples next time we send something. */ vegas->doing_vegas_now = 1; /* Set the beginning of the next send window. */ vegas->beg_snd_nxt = tp->snd_nxt; vegas->cntRTT = 0; vegas->minRTT = 0x7fffffff; } /* Stop taking Vegas samples for now. */ static inline void vegas_disable(struct sock *sk) { struct vegas *vegas = inet_csk_ca(sk); vegas->doing_vegas_now = 0; } void tcp_vegas_init(struct sock *sk) { struct vegas *vegas = inet_csk_ca(sk); vegas->baseRTT = 0x7fffffff; vegas_enable(sk); } EXPORT_SYMBOL_GPL(tcp_vegas_init); /* Do RTT sampling needed for Vegas. * Basically we: * o min-filter RTT samples from within an RTT to get the current * propagation delay + queuing delay (we are min-filtering to try to * avoid the effects of delayed ACKs) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample) { struct vegas *vegas = inet_csk_ca(sk); u32 vrtt; if (sample->rtt_us < 0) return; /* Never allow zero rtt or baseRTT */ vrtt = sample->rtt_us + 1; /* Filter to find propagation delay: */ if (vrtt < vegas->baseRTT) vegas->baseRTT = vrtt; /* Find the min RTT during the last RTT to find * the current prop. delay + queuing delay: */ vegas->minRTT = min(vegas->minRTT, vrtt); vegas->cntRTT++; } EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked); void tcp_vegas_state(struct sock *sk, u8 ca_state) { if (ca_state == TCP_CA_Open) vegas_enable(sk); else vegas_disable(sk); } EXPORT_SYMBOL_GPL(tcp_vegas_state); /* * If the connection is idle and we are restarting, * then we don't want to do any Vegas calculations * until we get fresh RTT samples. So when we * restart, we reset our Vegas state to a clean * slate. After we get acks for this flight of * packets, _then_ we can make Vegas calculations * again. */ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) { if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) tcp_vegas_init(sk); } EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) { return min(tp->snd_ssthresh, tcp_snd_cwnd(tp)); } static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); struct vegas *vegas = inet_csk_ca(sk); if (!vegas->doing_vegas_now) { tcp_reno_cong_avoid(sk, ack, acked); return; } if (after(ack, vegas->beg_snd_nxt)) { /* Do the Vegas once-per-RTT cwnd adjustment. */ /* Save the extent of the current window so we can use this * at the end of the next RTT. */ vegas->beg_snd_nxt = tp->snd_nxt; /* We do the Vegas calculations only if we got enough RTT * samples that we can be reasonably sure that we got * at least one RTT sample that wasn't from a delayed ACK. * If we only had 2 samples total, * then that means we're getting only 1 ACK per RTT, which * means they're almost certainly delayed ACKs. * If we have 3 samples, we should be OK. */ if (vegas->cntRTT <= 2) { /* We don't have enough RTT samples to do the Vegas * calculation, so we'll behave like Reno. */ tcp_reno_cong_avoid(sk, ack, acked); } else { u32 rtt, diff; u64 target_cwnd; /* We have enough RTT samples, so, using the Vegas * algorithm, we determine if we should increase or * decrease cwnd, and by how much. */ /* Pluck out the RTT we are using for the Vegas * calculations. This is the min RTT seen during the * last RTT. Taking the min filters out the effects * of delayed ACKs, at the cost of noticing congestion * a bit later. */ rtt = vegas->minRTT; /* Calculate the cwnd we should have, if we weren't * going too fast. * * This is: * (actual rate in segments) * baseRTT */ target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT; do_div(target_cwnd, rtt); /* Calculate the difference between the window we had, * and the window we would like to have. This quantity * is the "Diff" from the Arizona Vegas papers. */ diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT; if (diff > gamma && tcp_in_slow_start(tp)) { /* Going too fast. Time to slow down * and switch to congestion avoidance. */ /* Set cwnd to match the actual rate * exactly: * cwnd = (actual rate) * baseRTT * Then we add 1 because the integer * truncation robs us of full link * utilization. */ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), (u32)target_cwnd + 1)); tp->snd_ssthresh = tcp_vegas_ssthresh(tp); } else if (tcp_in_slow_start(tp)) { /* Slow start. */ tcp_slow_start(tp, acked); } else { /* Congestion avoidance. */ /* Figure out where we would like cwnd * to be. */ if (diff > beta) { /* The old window was too fast, so * we slow down. */ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); tp->snd_ssthresh = tcp_vegas_ssthresh(tp); } else if (diff < alpha) { /* We don't have enough extra packets * in the network, so speed up. */ tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); } else { /* Sending just as fast as we * should be. */ } } if (tcp_snd_cwnd(tp) < 2) tcp_snd_cwnd_set(tp, 2); else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp); tp->snd_ssthresh = tcp_current_ssthresh(sk); } /* Wipe the slate clean for the next RTT. */ vegas->cntRTT = 0; vegas->minRTT = 0x7fffffff; } /* Use normal slow start */ else if (tcp_in_slow_start(tp)) tcp_slow_start(tp, acked); } /* Extract info for Tcp socket info provided via netlink. */ size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info) { const struct vegas *ca = inet_csk_ca(sk); if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { info->vegas.tcpv_enabled = ca->doing_vegas_now; info->vegas.tcpv_rttcnt = ca->cntRTT; info->vegas.tcpv_rtt = ca->baseRTT; info->vegas.tcpv_minrtt = ca->minRTT; *attr = INET_DIAG_VEGASINFO; return sizeof(struct tcpvegas_info); } return 0; } EXPORT_SYMBOL_GPL(tcp_vegas_get_info); static struct tcp_congestion_ops tcp_vegas __read_mostly = { .init = tcp_vegas_init, .ssthresh = tcp_reno_ssthresh, .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = tcp_vegas_cong_avoid, .pkts_acked = tcp_vegas_pkts_acked, .set_state = tcp_vegas_state, .cwnd_event = tcp_vegas_cwnd_event, .get_info = tcp_vegas_get_info, .owner = THIS_MODULE, .name = "vegas", }; static int __init tcp_vegas_register(void) { BUILD_BUG_ON(sizeof(struct vegas) > ICSK_CA_PRIV_SIZE); tcp_register_congestion_control(&tcp_vegas); return 0; } static void __exit tcp_vegas_unregister(void) { tcp_unregister_congestion_control(&tcp_vegas); } module_init(tcp_vegas_register); module_exit(tcp_vegas_unregister); MODULE_AUTHOR("Stephen Hemminger"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP Vegas");
5 1 3 1 2 2 2 2 2 4 1 1 1 1 1 3 3 3 3 3 9 9 4 4 15 14 3 3 3 3 1 1 2 3 3 2 2 1 1 1 1 1 1 1 12 12 12 14 14 1 1 1 1 2 2 2 2 2 2 2 5 1 2 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 // SPDX-License-Identifier: GPL-2.0 /* * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption * * Copyright (c) 2019, Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <crypto/aead.h> #include <crypto/aes.h> #include <crypto/rng.h> #include "crypto.h" #include "msg.h" #include "bcast.h" #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */ #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */ #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */ #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */ #define TIPC_MAX_TFMS_DEF 10 #define TIPC_MAX_TFMS_LIM 1000 #define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */ /* * TIPC Key ids */ enum { KEY_MASTER = 0, KEY_MIN = KEY_MASTER, KEY_1 = 1, KEY_2, KEY_3, KEY_MAX = KEY_3, }; /* * TIPC Crypto statistics */ enum { STAT_OK, STAT_NOK, STAT_ASYNC, STAT_ASYNC_OK, STAT_ASYNC_NOK, STAT_BADKEYS, /* tx only */ STAT_BADMSGS = STAT_BADKEYS, /* rx only */ STAT_NOKEYS, STAT_SWITCHES, MAX_STATS, }; /* TIPC crypto statistics' header */ static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok", "async_nok", "badmsgs", "nokeys", "switches"}; /* Max TFMs number per key */ int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF; /* Key exchange switch, default: on */ int sysctl_tipc_key_exchange_enabled __read_mostly = 1; /* * struct tipc_key - TIPC keys' status indicator * * 7 6 5 4 3 2 1 0 * +-----+-----+-----+-----+-----+-----+-----+-----+ * key: | (reserved)|passive idx| active idx|pending idx| * +-----+-----+-----+-----+-----+-----+-----+-----+ */ struct tipc_key { #define KEY_BITS (2) #define KEY_MASK ((1 << KEY_BITS) - 1) union { struct { #if defined(__LITTLE_ENDIAN_BITFIELD) u8 pending:2, active:2, passive:2, /* rx only */ reserved:2; #elif defined(__BIG_ENDIAN_BITFIELD) u8 reserved:2, passive:2, /* rx only */ active:2, pending:2; #else #error "Please fix <asm/byteorder.h>" #endif } __packed; u8 keys; }; }; /** * struct tipc_tfm - TIPC TFM structure to form a list of TFMs * @tfm: cipher handle/key * @list: linked list of TFMs */ struct tipc_tfm { struct crypto_aead *tfm; struct list_head list; }; /** * struct tipc_aead - TIPC AEAD key structure * @tfm_entry: per-cpu pointer to one entry in TFM list * @crypto: TIPC crypto owns this key * @cloned: reference to the source key in case cloning * @users: the number of the key users (TX/RX) * @salt: the key's SALT value * @authsize: authentication tag size (max = 16) * @mode: crypto mode is applied to the key * @hint: a hint for user key * @rcu: struct rcu_head * @key: the aead key * @gen: the key's generation * @seqno: the key seqno (cluster scope) * @refcnt: the key reference counter */ struct tipc_aead { #define TIPC_AEAD_HINT_LEN (5) struct tipc_tfm * __percpu *tfm_entry; struct tipc_crypto *crypto; struct tipc_aead *cloned; atomic_t users; u32 salt; u8 authsize; u8 mode; char hint[2 * TIPC_AEAD_HINT_LEN + 1]; struct rcu_head rcu; struct tipc_aead_key *key; u16 gen; atomic64_t seqno ____cacheline_aligned; refcount_t refcnt ____cacheline_aligned; } ____cacheline_aligned; /** * struct tipc_crypto_stats - TIPC Crypto statistics * @stat: array of crypto statistics */ struct tipc_crypto_stats { unsigned int stat[MAX_STATS]; }; /** * struct tipc_crypto - TIPC TX/RX crypto structure * @net: struct net * @node: TIPC node (RX) * @aead: array of pointers to AEAD keys for encryption/decryption * @peer_rx_active: replicated peer RX active key index * @key_gen: TX/RX key generation * @key: the key states * @skey_mode: session key's mode * @skey: received session key * @wq: common workqueue on TX crypto * @work: delayed work sched for TX/RX * @key_distr: key distributing state * @rekeying_intv: rekeying interval (in minutes) * @stats: the crypto statistics * @name: the crypto name * @sndnxt: the per-peer sndnxt (TX) * @timer1: general timer 1 (jiffies) * @timer2: general timer 2 (jiffies) * @working: the crypto is working or not * @key_master: flag indicates if master key exists * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.) * @nokey: no key indication * @flags: combined flags field * @lock: tipc_key lock */ struct tipc_crypto { struct net *net; struct tipc_node *node; struct tipc_aead __rcu *aead[KEY_MAX + 1]; atomic_t peer_rx_active; u16 key_gen; struct tipc_key key; u8 skey_mode; struct tipc_aead_key *skey; struct workqueue_struct *wq; struct delayed_work work; #define KEY_DISTR_SCHED 1 #define KEY_DISTR_COMPL 2 atomic_t key_distr; u32 rekeying_intv; struct tipc_crypto_stats __percpu *stats; char name[48]; atomic64_t sndnxt ____cacheline_aligned; unsigned long timer1; unsigned long timer2; union { struct { u8 working:1; u8 key_master:1; u8 legacy_user:1; u8 nokey: 1; }; u8 flags; }; spinlock_t lock; /* crypto lock */ } ____cacheline_aligned; /* struct tipc_crypto_tx_ctx - TX context for callbacks */ struct tipc_crypto_tx_ctx { struct tipc_aead *aead; struct tipc_bearer *bearer; struct tipc_media_addr dst; }; /* struct tipc_crypto_rx_ctx - RX context for callbacks */ struct tipc_crypto_rx_ctx { struct tipc_aead *aead; struct tipc_bearer *bearer; }; static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead); static inline void tipc_aead_put(struct tipc_aead *aead); static void tipc_aead_free(struct rcu_head *rp); static int tipc_aead_users(struct tipc_aead __rcu *aead); static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim); static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim); static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val); static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead); static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, u8 mode); static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src); static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, unsigned int crypto_ctx_size, u8 **iv, struct aead_request **req, struct scatterlist **sg, int nsg); static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, struct tipc_bearer *b, struct tipc_media_addr *dst, struct tipc_node *__dnode); static void tipc_aead_encrypt_done(void *data, int err); static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, struct sk_buff *skb, struct tipc_bearer *b); static void tipc_aead_decrypt_done(void *data, int err); static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr); static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, u8 tx_key, struct sk_buff *skb, struct tipc_crypto *__rx); static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, u8 new_passive, u8 new_active, u8 new_pending); static int tipc_crypto_key_attach(struct tipc_crypto *c, struct tipc_aead *aead, u8 pos, bool master_key); static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending); static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, struct tipc_crypto *rx, struct sk_buff *skb, u8 tx_key); static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb); static int tipc_crypto_key_revoke(struct net *net, u8 tx_key); static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, struct tipc_bearer *b, struct tipc_media_addr *dst, struct tipc_node *__dnode, u8 type); static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, struct tipc_bearer *b, struct sk_buff **skb, int err); static void tipc_crypto_do_cmd(struct net *net, int cmd); static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf); static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, char *buf); static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, u16 gen, u8 mode, u32 dnode); static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr); static void tipc_crypto_work_tx(struct work_struct *work); static void tipc_crypto_work_rx(struct work_struct *work); static int tipc_aead_key_generate(struct tipc_aead_key *skey); #define is_tx(crypto) (!(crypto)->node) #define is_rx(crypto) (!is_tx(crypto)) #define key_next(cur) ((cur) % KEY_MAX + 1) #define tipc_aead_rcu_ptr(rcu_ptr, lock) \ rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock)) #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ do { \ struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ lockdep_is_held(lock)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ tipc_aead_put(__tmp); \ } while (0) #define tipc_crypto_key_detach(rcu_ptr, lock) \ tipc_aead_rcu_replace((rcu_ptr), NULL, lock) /** * tipc_aead_key_validate - Validate a AEAD user key * @ukey: pointer to user key data * @info: netlink info pointer */ int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info) { int keylen; /* Check if algorithm exists */ if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)"); return -ENODEV; } /* Currently, we only support the "gcm(aes)" cipher algorithm */ if (strcmp(ukey->alg_name, "gcm(aes)")) { GENL_SET_ERR_MSG(info, "not supported yet the algorithm"); return -ENOTSUPP; } /* Check if key size is correct */ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 && keylen != TIPC_AES_GCM_KEY_SIZE_192 && keylen != TIPC_AES_GCM_KEY_SIZE_256)) { GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)"); return -EKEYREJECTED; } return 0; } /** * tipc_aead_key_generate - Generate new session key * @skey: input/output key with new content * * Return: 0 in case of success, otherwise < 0 */ static int tipc_aead_key_generate(struct tipc_aead_key *skey) { int rc = 0; /* Fill the key's content with a random value via RNG cipher */ rc = crypto_get_default_rng(); if (likely(!rc)) { rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, skey->keylen); crypto_put_default_rng(); } return rc; } static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead) { struct tipc_aead *tmp; rcu_read_lock(); tmp = rcu_dereference(aead); if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) tmp = NULL; rcu_read_unlock(); return tmp; } static inline void tipc_aead_put(struct tipc_aead *aead) { if (aead && refcount_dec_and_test(&aead->refcnt)) call_rcu(&aead->rcu, tipc_aead_free); } /** * tipc_aead_free - Release AEAD key incl. all the TFMs in the list * @rp: rcu head pointer */ static void tipc_aead_free(struct rcu_head *rp) { struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu); struct tipc_tfm *tfm_entry, *head, *tmp; if (aead->cloned) { tipc_aead_put(aead->cloned); } else { head = *get_cpu_ptr(aead->tfm_entry); put_cpu_ptr(aead->tfm_entry); list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { crypto_free_aead(tfm_entry->tfm); list_del(&tfm_entry->list); kfree(tfm_entry); } /* Free the head */ crypto_free_aead(head->tfm); list_del(&head->list); kfree(head); } free_percpu(aead->tfm_entry); kfree_sensitive(aead->key); kfree_sensitive(aead); } static int tipc_aead_users(struct tipc_aead __rcu *aead) { struct tipc_aead *tmp; int users = 0; rcu_read_lock(); tmp = rcu_dereference(aead); if (tmp) users = atomic_read(&tmp->users); rcu_read_unlock(); return users; } static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim) { struct tipc_aead *tmp; rcu_read_lock(); tmp = rcu_dereference(aead); if (tmp) atomic_add_unless(&tmp->users, 1, lim); rcu_read_unlock(); } static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim) { struct tipc_aead *tmp; rcu_read_lock(); tmp = rcu_dereference(aead); if (tmp) atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); rcu_read_unlock(); } static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val) { struct tipc_aead *tmp; int cur; rcu_read_lock(); tmp = rcu_dereference(aead); if (tmp) { do { cur = atomic_read(&tmp->users); if (cur == val) break; } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); } rcu_read_unlock(); } /** * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it * @aead: the AEAD key pointer */ static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead) { struct tipc_tfm **tfm_entry; struct crypto_aead *tfm; tfm_entry = get_cpu_ptr(aead->tfm_entry); *tfm_entry = list_next_entry(*tfm_entry, list); tfm = (*tfm_entry)->tfm; put_cpu_ptr(tfm_entry); return tfm; } /** * tipc_aead_init - Initiate TIPC AEAD * @aead: returned new TIPC AEAD key handle pointer * @ukey: pointer to user key data * @mode: the key mode * * Allocate a (list of) new cipher transformation (TFM) with the specific user * key data if valid. The number of the allocated TFMs can be set via the sysfs * "net/tipc/max_tfms" first. * Also, all the other AEAD data are also initialized. * * Return: 0 if the initiation is successful, otherwise: < 0 */ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey, u8 mode) { struct tipc_tfm *tfm_entry, *head; struct crypto_aead *tfm; struct tipc_aead *tmp; int keylen, err, cpu; int tfm_cnt = 0; if (unlikely(*aead)) return -EEXIST; /* Allocate a new AEAD */ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); if (unlikely(!tmp)) return -ENOMEM; /* The key consists of two parts: [AES-KEY][SALT] */ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; /* Allocate per-cpu TFM entry pointer */ tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); if (!tmp->tfm_entry) { kfree_sensitive(tmp); return -ENOMEM; } /* Make a list of TFMs with the user key data */ do { tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); if (IS_ERR(tfm)) { err = PTR_ERR(tfm); break; } if (unlikely(!tfm_cnt && crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) { crypto_free_aead(tfm); err = -ENOTSUPP; break; } err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE); err |= crypto_aead_setkey(tfm, ukey->key, keylen); if (unlikely(err)) { crypto_free_aead(tfm); break; } tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL); if (unlikely(!tfm_entry)) { crypto_free_aead(tfm); err = -ENOMEM; break; } INIT_LIST_HEAD(&tfm_entry->list); tfm_entry->tfm = tfm; /* First entry? */ if (!tfm_cnt) { head = tfm_entry; for_each_possible_cpu(cpu) { *per_cpu_ptr(tmp->tfm_entry, cpu) = head; } } else { list_add_tail(&tfm_entry->list, &head->list); } } while (++tfm_cnt < sysctl_tipc_max_tfms); /* Not any TFM is allocated? */ if (!tfm_cnt) { free_percpu(tmp->tfm_entry); kfree_sensitive(tmp); return err; } /* Form a hex string of some last bytes as the key's hint */ bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, TIPC_AEAD_HINT_LEN); /* Initialize the other data */ tmp->mode = mode; tmp->cloned = NULL; tmp->authsize = TIPC_AES_GCM_TAG_SIZE; tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); if (!tmp->key) { tipc_aead_free(&tmp->rcu); return -ENOMEM; } memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); atomic_set(&tmp->users, 0); atomic64_set(&tmp->seqno, 0); refcount_set(&tmp->refcnt, 1); *aead = tmp; return 0; } /** * tipc_aead_clone - Clone a TIPC AEAD key * @dst: dest key for the cloning * @src: source key to clone from * * Make a "copy" of the source AEAD key data to the dest, the TFMs list is * common for the keys. * A reference to the source is hold in the "cloned" pointer for the later * freeing purposes. * * Note: this must be done in cluster-key mode only! * Return: 0 in case of success, otherwise < 0 */ static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src) { struct tipc_aead *aead; int cpu; if (!src) return -ENOKEY; if (src->mode != CLUSTER_KEY) return -EINVAL; if (unlikely(*dst)) return -EEXIST; aead = kzalloc(sizeof(*aead), GFP_ATOMIC); if (unlikely(!aead)) return -ENOMEM; aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); if (unlikely(!aead->tfm_entry)) { kfree_sensitive(aead); return -ENOMEM; } for_each_possible_cpu(cpu) { *per_cpu_ptr(aead->tfm_entry, cpu) = *per_cpu_ptr(src->tfm_entry, cpu); } memcpy(aead->hint, src->hint, sizeof(src->hint)); aead->mode = src->mode; aead->salt = src->salt; aead->authsize = src->authsize; atomic_set(&aead->users, 0); atomic64_set(&aead->seqno, 0); refcount_set(&aead->refcnt, 1); WARN_ON(!refcount_inc_not_zero(&src->refcnt)); aead->cloned = src; *dst = aead; return 0; } /** * tipc_aead_mem_alloc - Allocate memory for AEAD request operations * @tfm: cipher handle to be registered with the request * @crypto_ctx_size: size of crypto context for callback * @iv: returned pointer to IV data * @req: returned pointer to AEAD request data * @sg: returned pointer to SG lists * @nsg: number of SG lists to be allocated * * Allocate memory to store the crypto context data, AEAD request, IV and SG * lists, the memory layout is as follows: * crypto_ctx || iv || aead_req || sg[] * * Return: the pointer to the memory areas in case of success, otherwise NULL */ static void *tipc_aead_mem_alloc(struct crypto_aead *tfm, unsigned int crypto_ctx_size, u8 **iv, struct aead_request **req, struct scatterlist **sg, int nsg) { unsigned int iv_size, req_size; unsigned int len; u8 *mem; iv_size = crypto_aead_ivsize(tfm); req_size = sizeof(**req) + crypto_aead_reqsize(tfm); len = crypto_ctx_size; len += iv_size; len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); len += req_size; len = ALIGN(len, __alignof__(struct scatterlist)); len += nsg * sizeof(**sg); mem = kmalloc(len, GFP_ATOMIC); if (!mem) return NULL; *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size, crypto_aead_alignmask(tfm) + 1); *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size, crypto_tfm_ctx_alignment()); *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size, __alignof__(struct scatterlist)); return (void *)mem; } /** * tipc_aead_encrypt - Encrypt a message * @aead: TIPC AEAD key for the message encryption * @skb: the input/output skb * @b: TIPC bearer where the message will be delivered after the encryption * @dst: the destination media address * @__dnode: TIPC dest node if "known" * * Return: * * 0 : if the encryption has completed * * -EINPROGRESS/-EBUSY : if a callback will be performed * * < 0 : the encryption has failed */ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, struct tipc_bearer *b, struct tipc_media_addr *dst, struct tipc_node *__dnode) { struct crypto_aead *tfm = tipc_aead_tfm_next(aead); struct tipc_crypto_tx_ctx *tx_ctx; struct aead_request *req; struct sk_buff *trailer; struct scatterlist *sg; struct tipc_ehdr *ehdr; int ehsz, len, tailen, nsg, rc; void *ctx; u32 salt; u8 *iv; /* Make sure message len at least 4-byte aligned */ len = ALIGN(skb->len, 4); tailen = len - skb->len + aead->authsize; /* Expand skb tail for authentication tag: * As for simplicity, we'd have made sure skb having enough tailroom * for authentication tag @skb allocation. Even when skb is nonlinear * but there is no frag_list, it should be still fine! * Otherwise, we must cow it to be a writable buffer with the tailroom. */ SKB_LINEAR_ASSERT(skb); if (tailen > skb_tailroom(skb)) { pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", skb_tailroom(skb), tailen); } nsg = skb_cow_data(skb, tailen, &trailer); if (unlikely(nsg < 0)) { pr_err("TX: skb_cow_data() returned %d\n", nsg); return nsg; } pskb_put(skb, trailer, tailen); /* Allocate memory for the AEAD operation */ ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg); if (unlikely(!ctx)) return -ENOMEM; TIPC_SKB_CB(skb)->crypto_ctx = ctx; /* Map skb to the sg lists */ sg_init_table(sg, nsg); rc = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(rc < 0)) { pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); goto exit; } /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)] * In case we're in cluster-key mode, SALT is varied by xor-ing with * the source address (or w0 of id), otherwise with the dest address * if dest is known. */ ehdr = (struct tipc_ehdr *)skb->data; salt = aead->salt; if (aead->mode == CLUSTER_KEY) salt ^= __be32_to_cpu(ehdr->addr); else if (__dnode) salt ^= tipc_node_get_addr(__dnode); memcpy(iv, &salt, 4); memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); /* Prepare request */ ehsz = tipc_ehdr_size(ehdr); aead_request_set_tfm(req, tfm); aead_request_set_ad(req, ehsz); aead_request_set_crypt(req, sg, sg, len - ehsz, iv); /* Set callback function & data */ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tipc_aead_encrypt_done, skb); tx_ctx = (struct tipc_crypto_tx_ctx *)ctx; tx_ctx->aead = aead; tx_ctx->bearer = b; memcpy(&tx_ctx->dst, dst, sizeof(*dst)); /* Hold bearer */ if (unlikely(!tipc_bearer_hold(b))) { rc = -ENODEV; goto exit; } /* Get net to avoid freed tipc_crypto when delete namespace */ if (!maybe_get_net(aead->crypto->net)) { tipc_bearer_put(b); rc = -ENODEV; goto exit; } /* Now, do encrypt */ rc = crypto_aead_encrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) return rc; tipc_bearer_put(b); put_net(aead->crypto->net); exit: kfree(ctx); TIPC_SKB_CB(skb)->crypto_ctx = NULL; return rc; } static void tipc_aead_encrypt_done(void *data, int err) { struct sk_buff *skb = data; struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; struct tipc_bearer *b = tx_ctx->bearer; struct tipc_aead *aead = tx_ctx->aead; struct tipc_crypto *tx = aead->crypto; struct net *net = tx->net; switch (err) { case 0: this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); rcu_read_lock(); if (likely(test_bit(0, &b->up))) b->media->send_msg(net, skb, b, &tx_ctx->dst); else kfree_skb(skb); rcu_read_unlock(); break; case -EINPROGRESS: return; default: this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); kfree_skb(skb); break; } kfree(tx_ctx); tipc_bearer_put(b); tipc_aead_put(aead); put_net(net); } /** * tipc_aead_decrypt - Decrypt an encrypted message * @net: struct net * @aead: TIPC AEAD for the message decryption * @skb: the input/output skb * @b: TIPC bearer where the message has been received * * Return: * * 0 : if the decryption has completed * * -EINPROGRESS/-EBUSY : if a callback will be performed * * < 0 : the decryption has failed */ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, struct sk_buff *skb, struct tipc_bearer *b) { struct tipc_crypto_rx_ctx *rx_ctx; struct aead_request *req; struct crypto_aead *tfm; struct sk_buff *unused; struct scatterlist *sg; struct tipc_ehdr *ehdr; int ehsz, nsg, rc; void *ctx; u32 salt; u8 *iv; if (unlikely(!aead)) return -ENOKEY; nsg = skb_cow_data(skb, 0, &unused); if (unlikely(nsg < 0)) { pr_err("RX: skb_cow_data() returned %d\n", nsg); return nsg; } /* Allocate memory for the AEAD operation */ tfm = tipc_aead_tfm_next(aead); ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg); if (unlikely(!ctx)) return -ENOMEM; TIPC_SKB_CB(skb)->crypto_ctx = ctx; /* Map skb to the sg lists */ sg_init_table(sg, nsg); rc = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(rc < 0)) { pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); goto exit; } /* Reconstruct IV: */ ehdr = (struct tipc_ehdr *)skb->data; salt = aead->salt; if (aead->mode == CLUSTER_KEY) salt ^= __be32_to_cpu(ehdr->addr); else if (ehdr->destined) salt ^= tipc_own_addr(net); memcpy(iv, &salt, 4); memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); /* Prepare request */ ehsz = tipc_ehdr_size(ehdr); aead_request_set_tfm(req, tfm); aead_request_set_ad(req, ehsz); aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); /* Set callback function & data */ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tipc_aead_decrypt_done, skb); rx_ctx = (struct tipc_crypto_rx_ctx *)ctx; rx_ctx->aead = aead; rx_ctx->bearer = b; /* Hold bearer */ if (unlikely(!tipc_bearer_hold(b))) { rc = -ENODEV; goto exit; } /* Now, do decrypt */ rc = crypto_aead_decrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) return rc; tipc_bearer_put(b); exit: kfree(ctx); TIPC_SKB_CB(skb)->crypto_ctx = NULL; return rc; } static void tipc_aead_decrypt_done(void *data, int err) { struct sk_buff *skb = data; struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; struct tipc_bearer *b = rx_ctx->bearer; struct tipc_aead *aead = rx_ctx->aead; struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; struct net *net = aead->crypto->net; switch (err) { case 0: this_cpu_inc(stats->stat[STAT_ASYNC_OK]); break; case -EINPROGRESS: return; default: this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); break; } kfree(rx_ctx); tipc_crypto_rcv_complete(net, aead, b, &skb, err); if (likely(skb)) { if (likely(test_bit(0, &b->up))) tipc_rcv(net, skb, b); else kfree_skb(skb); } tipc_bearer_put(b); } static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr) { return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; } /** * tipc_ehdr_validate - Validate an encryption message * @skb: the message buffer * * Return: "true" if this is a valid encryption message, otherwise "false" */ bool tipc_ehdr_validate(struct sk_buff *skb) { struct tipc_ehdr *ehdr; int ehsz; if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE))) return false; ehdr = (struct tipc_ehdr *)skb->data; if (unlikely(ehdr->version != TIPC_EVERSION)) return false; ehsz = tipc_ehdr_size(ehdr); if (unlikely(!pskb_may_pull(skb, ehsz))) return false; if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) return false; return true; } /** * tipc_ehdr_build - Build TIPC encryption message header * @net: struct net * @aead: TX AEAD key to be used for the message encryption * @tx_key: key id used for the message encryption * @skb: input/output message skb * @__rx: RX crypto handle if dest is "known" * * Return: the header size if the building is successful, otherwise < 0 */ static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead, u8 tx_key, struct sk_buff *skb, struct tipc_crypto *__rx) { struct tipc_msg *hdr = buf_msg(skb); struct tipc_ehdr *ehdr; u32 user = msg_user(hdr); u64 seqno; int ehsz; /* Make room for encryption header */ ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; WARN_ON(skb_headroom(skb) < ehsz); ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz); /* Obtain a seqno first: * Use the key seqno (= cluster wise) if dest is unknown or we're in * cluster key mode, otherwise it's better for a per-peer seqno! */ if (!__rx || aead->mode == CLUSTER_KEY) seqno = atomic64_inc_return(&aead->seqno); else seqno = atomic64_inc_return(&__rx->sndnxt); /* Revoke the key if seqno is wrapped around */ if (unlikely(!seqno)) return tipc_crypto_key_revoke(net, tx_key); /* Word 1-2 */ ehdr->seqno = cpu_to_be64(seqno); /* Words 0, 3- */ ehdr->version = TIPC_EVERSION; ehdr->user = 0; ehdr->keepalive = 0; ehdr->tx_key = tx_key; ehdr->destined = (__rx) ? 1 : 0; ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; ehdr->master_key = aead->crypto->key_master; ehdr->reserved_1 = 0; ehdr->reserved_2 = 0; switch (user) { case LINK_CONFIG: ehdr->user = LINK_CONFIG; memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); break; default: if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) { ehdr->user = LINK_PROTOCOL; ehdr->keepalive = msg_is_keepalive(hdr); } ehdr->addr = hdr->hdr[3]; break; } return ehsz; } static inline void tipc_crypto_key_set_state(struct tipc_crypto *c, u8 new_passive, u8 new_active, u8 new_pending) { struct tipc_key old = c->key; char buf[32]; c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | ((new_active & KEY_MASK) << (KEY_BITS)) | ((new_pending & KEY_MASK)); pr_debug("%s: key changing %s ::%pS\n", c->name, tipc_key_change_dump(old, c->key, buf), __builtin_return_address(0)); } /** * tipc_crypto_key_init - Initiate a new user / AEAD key * @c: TIPC crypto to which new key is attached * @ukey: the user key * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY) * @master_key: specify this is a cluster master key * * A new TIPC AEAD key will be allocated and initiated with the specified user * key, then attached to the TIPC crypto. * * Return: new key id in case of success, otherwise: < 0 */ int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey, u8 mode, bool master_key) { struct tipc_aead *aead = NULL; int rc = 0; /* Initiate with the new user key */ rc = tipc_aead_init(&aead, ukey, mode); /* Attach it to the crypto */ if (likely(!rc)) { rc = tipc_crypto_key_attach(c, aead, 0, master_key); if (rc < 0) tipc_aead_free(&aead->rcu); } return rc; } /** * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto * @c: TIPC crypto to which the new AEAD key is attached * @aead: the new AEAD key pointer * @pos: desired slot in the crypto key array, = 0 if any! * @master_key: specify this is a cluster master key * * Return: new key id in case of success, otherwise: -EBUSY */ static int tipc_crypto_key_attach(struct tipc_crypto *c, struct tipc_aead *aead, u8 pos, bool master_key) { struct tipc_key key; int rc = -EBUSY; u8 new_key; spin_lock_bh(&c->lock); key = c->key; if (master_key) { new_key = KEY_MASTER; goto attach; } if (key.active && key.passive) goto exit; if (key.pending) { if (tipc_aead_users(c->aead[key.pending]) > 0) goto exit; /* if (pos): ok with replacing, will be aligned when needed */ /* Replace it */ new_key = key.pending; } else { if (pos) { if (key.active && pos != key_next(key.active)) { key.passive = pos; new_key = pos; goto attach; } else if (!key.active && !key.passive) { key.pending = pos; new_key = pos; goto attach; } } key.pending = key_next(key.active ?: key.passive); new_key = key.pending; } attach: aead->crypto = c; aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); if (likely(c->key.keys != key.keys)) tipc_crypto_key_set_state(c, key.passive, key.active, key.pending); c->working = 1; c->nokey = 0; c->key_master |= master_key; rc = new_key; exit: spin_unlock_bh(&c->lock); return rc; } void tipc_crypto_key_flush(struct tipc_crypto *c) { struct tipc_crypto *tx, *rx; int k; spin_lock_bh(&c->lock); if (is_rx(c)) { /* Try to cancel pending work */ rx = c; tx = tipc_net(rx->net)->crypto_tx; if (cancel_delayed_work(&rx->work)) { kfree(rx->skey); rx->skey = NULL; atomic_xchg(&rx->key_distr, 0); tipc_node_put(rx->node); } /* RX stopping => decrease TX key users if any */ k = atomic_xchg(&rx->peer_rx_active, 0); if (k) { tipc_aead_users_dec(tx->aead[k], 0); /* Mark the point TX key users changed */ tx->timer1 = jiffies; } } c->flags = 0; tipc_crypto_key_set_state(c, 0, 0, 0); for (k = KEY_MIN; k <= KEY_MAX; k++) tipc_crypto_key_detach(c->aead[k], &c->lock); atomic64_set(&c->sndnxt, 0); spin_unlock_bh(&c->lock); } /** * tipc_crypto_key_try_align - Align RX keys if possible * @rx: RX crypto handle * @new_pending: new pending slot if aligned (= TX key from peer) * * Peer has used an unknown key slot, this only happens when peer has left and * rejoned, or we are newcomer. * That means, there must be no active key but a pending key at unaligned slot. * If so, we try to move the pending key to the new slot. * Note: A potential passive key can exist, it will be shifted correspondingly! * * Return: "true" if key is successfully aligned, otherwise "false" */ static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) { struct tipc_aead *tmp1, *tmp2 = NULL; struct tipc_key key; bool aligned = false; u8 new_passive = 0; int x; spin_lock(&rx->lock); key = rx->key; if (key.pending == new_pending) { aligned = true; goto exit; } if (key.active) goto exit; if (!key.pending) goto exit; if (tipc_aead_users(rx->aead[key.pending]) > 0) goto exit; /* Try to "isolate" this pending key first */ tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); if (!refcount_dec_if_one(&tmp1->refcnt)) goto exit; rcu_assign_pointer(rx->aead[key.pending], NULL); /* Move passive key if any */ if (key.passive) { tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); x = (key.passive - key.pending + new_pending) % KEY_MAX; new_passive = (x <= 0) ? x + KEY_MAX : x; } /* Re-allocate the key(s) */ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); rcu_assign_pointer(rx->aead[new_pending], tmp1); if (new_passive) rcu_assign_pointer(rx->aead[new_passive], tmp2); refcount_set(&tmp1->refcnt, 1); aligned = true; pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, new_pending); exit: spin_unlock(&rx->lock); return aligned; } /** * tipc_crypto_key_pick_tx - Pick one TX key for message decryption * @tx: TX crypto handle * @rx: RX crypto handle (can be NULL) * @skb: the message skb which will be decrypted later * @tx_key: peer TX key id * * This function looks up the existing TX keys and pick one which is suitable * for the message decryption, that must be a cluster key and not used before * on the same message (i.e. recursive). * * Return: the TX AEAD key handle in case of success, otherwise NULL */ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, struct tipc_crypto *rx, struct sk_buff *skb, u8 tx_key) { struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb); struct tipc_aead *aead = NULL; struct tipc_key key = tx->key; u8 k, i = 0; /* Initialize data if not yet */ if (!skb_cb->tx_clone_deferred) { skb_cb->tx_clone_deferred = 1; memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); } skb_cb->tx_clone_ctx.rx = rx; if (++skb_cb->tx_clone_ctx.recurs > 2) return NULL; /* Pick one TX key */ spin_lock(&tx->lock); if (tx_key == KEY_MASTER) { aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); goto done; } do { k = (i == 0) ? key.pending : ((i == 1) ? key.active : key.passive); if (!k) continue; aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); if (!aead) continue; if (aead->mode != CLUSTER_KEY || aead == skb_cb->tx_clone_ctx.last) { aead = NULL; continue; } /* Ok, found one cluster key */ skb_cb->tx_clone_ctx.last = aead; WARN_ON(skb->next); skb->next = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb->next)) pr_warn("Failed to clone skb for next round if any\n"); break; } while (++i < 3); done: if (likely(aead)) WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); spin_unlock(&tx->lock); return aead; } /** * tipc_crypto_key_synch: Synch own key data according to peer key status * @rx: RX crypto handle * @skb: TIPCv2 message buffer (incl. the ehdr from peer) * * This function updates the peer node related data as the peer RX active key * has changed, so the number of TX keys' users on this node are increased and * decreased correspondingly. * * It also considers if peer has no key, then we need to make own master key * (if any) taking over i.e. starting grace period and also trigger key * distributing process. * * The "per-peer" sndnxt is also reset when the peer key has switched. */ static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) { struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb); struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; struct tipc_msg *hdr = buf_msg(skb); u32 self = tipc_own_addr(rx->net); u8 cur, new; unsigned long delay; /* Update RX 'key_master' flag according to peer, also mark "legacy" if * a peer has no master key. */ rx->key_master = ehdr->master_key; if (!rx->key_master) tx->legacy_user = 1; /* For later cases, apply only if message is destined to this node */ if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) return; /* Case 1: Peer has no keys, let's make master key take over */ if (ehdr->rx_nokey) { /* Set or extend grace period */ tx->timer2 = jiffies; /* Schedule key distributing for the peer if not yet */ if (tx->key.keys && !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { get_random_bytes(&delay, 2); delay %= 5; delay = msecs_to_jiffies(500 * ++delay); if (queue_delayed_work(tx->wq, &rx->work, delay)) tipc_node_get(rx->node); } } else { /* Cancel a pending key distributing if any */ atomic_xchg(&rx->key_distr, 0); } /* Case 2: Peer RX active key has changed, let's update own TX users */ cur = atomic_read(&rx->peer_rx_active); new = ehdr->rx_key_active; if (tx->key.keys && cur != new && atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { if (new) tipc_aead_users_inc(tx->aead[new], INT_MAX); if (cur) tipc_aead_users_dec(tx->aead[cur], 0); atomic64_set(&rx->sndnxt, 0); /* Mark the point TX key users changed */ tx->timer1 = jiffies; pr_debug("%s: key users changed %d-- %d++, peer %s\n", tx->name, cur, new, rx->name); } } static int tipc_crypto_key_revoke(struct net *net, u8 tx_key) { struct tipc_crypto *tx = tipc_net(net)->crypto_tx; struct tipc_key key; spin_lock_bh(&tx->lock); key = tx->key; WARN_ON(!key.active || tx_key != key.active); /* Free the active key */ tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); spin_unlock_bh(&tx->lock); pr_warn("%s: key is revoked\n", tx->name); return -EKEYREVOKED; } int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net, struct tipc_node *node) { struct tipc_crypto *c; if (*crypto) return -EEXIST; /* Allocate crypto */ c = kzalloc(sizeof(*c), GFP_ATOMIC); if (!c) return -ENOMEM; /* Allocate workqueue on TX */ if (!node) { c->wq = alloc_ordered_workqueue("tipc_crypto", 0); if (!c->wq) { kfree(c); return -ENOMEM; } } /* Allocate statistic structure */ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); if (!c->stats) { if (c->wq) destroy_workqueue(c->wq); kfree_sensitive(c); return -ENOMEM; } c->flags = 0; c->net = net; c->node = node; get_random_bytes(&c->key_gen, 2); tipc_crypto_key_set_state(c, 0, 0, 0); atomic_set(&c->key_distr, 0); atomic_set(&c->peer_rx_active, 0); atomic64_set(&c->sndnxt, 0); c->timer1 = jiffies; c->timer2 = jiffies; c->rekeying_intv = TIPC_REKEYING_INTV_DEF; spin_lock_init(&c->lock); scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", (is_rx(c)) ? tipc_node_get_id_str(c->node) : tipc_own_id_string(c->net)); if (is_rx(c)) INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); else INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); *crypto = c; return 0; } void tipc_crypto_stop(struct tipc_crypto **crypto) { struct tipc_crypto *c = *crypto; u8 k; if (!c) return; /* Flush any queued works & destroy wq */ if (is_tx(c)) { c->rekeying_intv = 0; cancel_delayed_work_sync(&c->work); destroy_workqueue(c->wq); } /* Release AEAD keys */ rcu_read_lock(); for (k = KEY_MIN; k <= KEY_MAX; k++) tipc_aead_put(rcu_dereference(c->aead[k])); rcu_read_unlock(); pr_debug("%s: has been stopped\n", c->name); /* Free this crypto statistics */ free_percpu(c->stats); *crypto = NULL; kfree_sensitive(c); } void tipc_crypto_timeout(struct tipc_crypto *rx) { struct tipc_net *tn = tipc_net(rx->net); struct tipc_crypto *tx = tn->crypto_tx; struct tipc_key key; int cmd; /* TX pending: taking all users & stable -> active */ spin_lock(&tx->lock); key = tx->key; if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) goto s1; if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) goto s1; if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) goto s1; tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); if (key.active) tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); pr_info("%s: key[%d] is activated\n", tx->name, key.pending); s1: spin_unlock(&tx->lock); /* RX pending: having user -> active */ spin_lock(&rx->lock); key = rx->key; if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) goto s2; if (key.active) key.passive = key.active; key.active = key.pending; rx->timer2 = jiffies; tipc_crypto_key_set_state(rx, key.passive, key.active, 0); this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); pr_info("%s: key[%d] is activated\n", rx->name, key.pending); goto s5; s2: /* RX pending: not working -> remove */ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) goto s3; tipc_crypto_key_set_state(rx, key.passive, key.active, 0); tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); goto s5; s3: /* RX active: timed out or no user -> pending */ if (!key.active) goto s4; if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && tipc_aead_users(rx->aead[key.active]) > 0) goto s4; if (key.pending) key.passive = key.active; else key.pending = key.active; rx->timer2 = jiffies; tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); tipc_aead_users_set(rx->aead[key.pending], 0); pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); goto s5; s4: /* RX passive: outdated or not working -> free */ if (!key.passive) goto s5; if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && tipc_aead_users(rx->aead[key.passive]) > -10) goto s5; tipc_crypto_key_set_state(rx, 0, key.active, key.pending); tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); s5: spin_unlock(&rx->lock); /* Relax it here, the flag will be set again if it really is, but only * when we are not in grace period for safety! */ if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) tx->legacy_user = 0; /* Limit max_tfms & do debug commands if needed */ if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM)) return; cmd = sysctl_tipc_max_tfms; sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF; tipc_crypto_do_cmd(rx->net, cmd); } static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb, struct tipc_bearer *b, struct tipc_media_addr *dst, struct tipc_node *__dnode, u8 type) { struct sk_buff *skb; skb = skb_clone(_skb, GFP_ATOMIC); if (skb) { TIPC_SKB_CB(skb)->xmit_type = type; tipc_crypto_xmit(net, &skb, b, dst, __dnode); if (skb) b->media->send_msg(net, skb, b, dst); } } /** * tipc_crypto_xmit - Build & encrypt TIPC message for xmit * @net: struct net * @skb: input/output message skb pointer * @b: bearer used for xmit later * @dst: destination media address * @__dnode: destination node for reference if any * * First, build an encryption message header on the top of the message, then * encrypt the original TIPC message by using the pending, master or active * key with this preference order. * If the encryption is successful, the encrypted skb is returned directly or * via the callback. * Otherwise, the skb is freed! * * Return: * * 0 : the encryption has succeeded (or no encryption) * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made * * -ENOKEK : the encryption has failed due to no key * * -EKEYREVOKED : the encryption has failed due to key revoked * * -ENOMEM : the encryption has failed due to no memory * * < 0 : the encryption has failed due to other reasons */ int tipc_crypto_xmit(struct net *net, struct sk_buff **skb, struct tipc_bearer *b, struct tipc_media_addr *dst, struct tipc_node *__dnode) { struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode); struct tipc_crypto *tx = tipc_net(net)->crypto_tx; struct tipc_crypto_stats __percpu *stats = tx->stats; struct tipc_msg *hdr = buf_msg(*skb); struct tipc_key key = tx->key; struct tipc_aead *aead = NULL; u32 user = msg_user(hdr); u32 type = msg_type(hdr); int rc = -ENOKEY; u8 tx_key = 0; /* No encryption? */ if (!tx->working) return 0; /* Pending key if peer has active on it or probing time */ if (unlikely(key.pending)) { tx_key = key.pending; if (!tx->key_master && !key.active) goto encrypt; if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) goto encrypt; if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { pr_debug("%s: probing for key[%d]\n", tx->name, key.pending); goto encrypt; } if (user == LINK_CONFIG || user == LINK_PROTOCOL) tipc_crypto_clone_msg(net, *skb, b, dst, __dnode, SKB_PROBING); } /* Master key if this is a *vital* message or in grace period */ if (tx->key_master) { tx_key = KEY_MASTER; if (!key.active) goto encrypt; if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { pr_debug("%s: gracing for msg (%d %d)\n", tx->name, user, type); goto encrypt; } if (user == LINK_CONFIG || (user == LINK_PROTOCOL && type == RESET_MSG) || (user == MSG_CRYPTO && type == KEY_DISTR_MSG) || time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { if (__rx && __rx->key_master && !atomic_read(&__rx->peer_rx_active)) goto encrypt; if (!__rx) { if (likely(!tx->legacy_user)) goto encrypt; tipc_crypto_clone_msg(net, *skb, b, dst, __dnode, SKB_GRACING); } } } /* Else, use the active key if any */ if (likely(key.active)) { tx_key = key.active; goto encrypt; } goto exit; encrypt: aead = tipc_aead_get(tx->aead[tx_key]); if (unlikely(!aead)) goto exit; rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx); if (likely(rc > 0)) rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode); exit: switch (rc) { case 0: this_cpu_inc(stats->stat[STAT_OK]); break; case -EINPROGRESS: case -EBUSY: this_cpu_inc(stats->stat[STAT_ASYNC]); *skb = NULL; return rc; default: this_cpu_inc(stats->stat[STAT_NOK]); if (rc == -ENOKEY) this_cpu_inc(stats->stat[STAT_NOKEYS]); else if (rc == -EKEYREVOKED) this_cpu_inc(stats->stat[STAT_BADKEYS]); kfree_skb(*skb); *skb = NULL; break; } tipc_aead_put(aead); return rc; } /** * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer * @net: struct net * @rx: RX crypto handle * @skb: input/output message skb pointer * @b: bearer where the message has been received * * If the decryption is successful, the decrypted skb is returned directly or * as the callback, the encryption header and auth tag will be trimed out * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete(). * Otherwise, the skb will be freed! * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX * cluster key(s) can be taken for decryption (- recursive). * * Return: * * 0 : the decryption has successfully completed * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made * * -ENOKEY : the decryption has failed due to no key * * -EBADMSG : the decryption has failed due to bad message * * -ENOMEM : the decryption has failed due to no memory * * < 0 : the decryption has failed due to other reasons */ int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, struct sk_buff **skb, struct tipc_bearer *b) { struct tipc_crypto *tx = tipc_net(net)->crypto_tx; struct tipc_crypto_stats __percpu *stats; struct tipc_aead *aead = NULL; struct tipc_key key; int rc = -ENOKEY; u8 tx_key, n; tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; /* New peer? * Let's try with TX key (i.e. cluster mode) & verify the skb first! */ if (unlikely(!rx || tx_key == KEY_MASTER)) goto pick_tx; /* Pick RX key according to TX key if any */ key = rx->key; if (tx_key == key.active || tx_key == key.pending || tx_key == key.passive) goto decrypt; /* Unknown key, let's try to align RX key(s) */ if (tipc_crypto_key_try_align(rx, tx_key)) goto decrypt; pick_tx: /* No key suitable? Try to pick one from TX... */ aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); if (aead) goto decrypt; goto exit; decrypt: rcu_read_lock(); if (!aead) aead = tipc_aead_get(rx->aead[tx_key]); rc = tipc_aead_decrypt(net, aead, *skb, b); rcu_read_unlock(); exit: stats = ((rx) ?: tx)->stats; switch (rc) { case 0: this_cpu_inc(stats->stat[STAT_OK]); break; case -EINPROGRESS: case -EBUSY: this_cpu_inc(stats->stat[STAT_ASYNC]); *skb = NULL; return rc; default: this_cpu_inc(stats->stat[STAT_NOK]); if (rc == -ENOKEY) { kfree_skb(*skb); *skb = NULL; if (rx) { /* Mark rx->nokey only if we dont have a * pending received session key, nor a newer * one i.e. in the next slot. */ n = key_next(tx_key); rx->nokey = !(rx->skey || rcu_access_pointer(rx->aead[n])); pr_debug_ratelimited("%s: nokey %d, key %d/%x\n", rx->name, rx->nokey, tx_key, rx->key.keys); tipc_node_put(rx->node); } this_cpu_inc(stats->stat[STAT_NOKEYS]); return rc; } else if (rc == -EBADMSG) { this_cpu_inc(stats->stat[STAT_BADMSGS]); } break; } tipc_crypto_rcv_complete(net, aead, b, skb, rc); return rc; } static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, struct tipc_bearer *b, struct sk_buff **skb, int err) { struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb); struct tipc_crypto *rx = aead->crypto; struct tipc_aead *tmp = NULL; struct tipc_ehdr *ehdr; struct tipc_node *n; /* Is this completed by TX? */ if (unlikely(is_tx(aead->crypto))) { rx = skb_cb->tx_clone_ctx.rx; pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, (*skb)->next, skb_cb->flags); pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, aead->crypto->aead[1], aead->crypto->aead[2], aead->crypto->aead[3]); if (unlikely(err)) { if (err == -EBADMSG && (*skb)->next) tipc_rcv(net, (*skb)->next, b); goto free_skb; } if (likely((*skb)->next)) { kfree_skb((*skb)->next); (*skb)->next = NULL; } ehdr = (struct tipc_ehdr *)(*skb)->data; if (!rx) { WARN_ON(ehdr->user != LINK_CONFIG); n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, true); rx = tipc_node_crypto_rx(n); if (unlikely(!rx)) goto free_skb; } /* Ignore cloning if it was TX master key */ if (ehdr->tx_key == KEY_MASTER) goto rcv; if (tipc_aead_clone(&tmp, aead) < 0) goto rcv; WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { tipc_aead_free(&tmp->rcu); goto rcv; } tipc_aead_put(aead); aead = tmp; } if (unlikely(err)) { tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN); goto free_skb; } /* Set the RX key's user */ tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1); /* Mark this point, RX works */ rx->timer1 = jiffies; rcv: /* Remove ehdr & auth. tag prior to tipc_rcv() */ ehdr = (struct tipc_ehdr *)(*skb)->data; /* Mark this point, RX passive still works */ if (rx->key.passive && ehdr->tx_key == rx->key.passive) rx->timer2 = jiffies; skb_reset_network_header(*skb); skb_pull(*skb, tipc_ehdr_size(ehdr)); if (pskb_trim(*skb, (*skb)->len - aead->authsize)) goto free_skb; /* Validate TIPCv2 message */ if (unlikely(!tipc_msg_validate(skb))) { pr_err_ratelimited("Packet dropped after decryption!\n"); goto free_skb; } /* Ok, everything's fine, try to synch own keys according to peers' */ tipc_crypto_key_synch(rx, *skb); /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */ skb_cb = TIPC_SKB_CB(*skb); /* Mark skb decrypted */ skb_cb->decrypted = 1; /* Clear clone cxt if any */ if (likely(!skb_cb->tx_clone_deferred)) goto exit; skb_cb->tx_clone_deferred = 0; memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); goto exit; free_skb: kfree_skb(*skb); *skb = NULL; exit: tipc_aead_put(aead); if (rx) tipc_node_put(rx->node); } static void tipc_crypto_do_cmd(struct net *net, int cmd) { struct tipc_net *tn = tipc_net(net); struct tipc_crypto *tx = tn->crypto_tx, *rx; struct list_head *p; unsigned int stat; int i, j, cpu; char buf[200]; /* Currently only one command is supported */ switch (cmd) { case 0xfff1: goto print_stats; default: return; } print_stats: /* Print a header */ pr_info("\n=============== TIPC Crypto Statistics ===============\n\n"); /* Print key status */ pr_info("Key status:\n"); pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), tipc_crypto_key_dump(tx, buf)); rcu_read_lock(); for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { rx = tipc_node_crypto_rx_by_list(p); pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), tipc_crypto_key_dump(rx, buf)); } rcu_read_unlock(); /* Print crypto statistics */ for (i = 0, j = 0; i < MAX_STATS; i++) j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); pr_info("Counter %s", buf); memset(buf, '-', 115); buf[115] = '\0'; pr_info("%s\n", buf); j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); for_each_possible_cpu(cpu) { for (i = 0; i < MAX_STATS; i++) { stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; j += scnprintf(buf + j, 200 - j, "|%11d ", stat); } pr_info("%s", buf); j = scnprintf(buf, 200, "%12s", " "); } rcu_read_lock(); for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { rx = tipc_node_crypto_rx_by_list(p); j = scnprintf(buf, 200, "RX(%7.7s) ", tipc_node_get_id_str(rx->node)); for_each_possible_cpu(cpu) { for (i = 0; i < MAX_STATS; i++) { stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; j += scnprintf(buf + j, 200 - j, "|%11d ", stat); } pr_info("%s", buf); j = scnprintf(buf, 200, "%12s", " "); } } rcu_read_unlock(); pr_info("\n======================== Done ========================\n"); } static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf) { struct tipc_key key = c->key; struct tipc_aead *aead; int k, i = 0; char *s; for (k = KEY_MIN; k <= KEY_MAX; k++) { if (k == KEY_MASTER) { if (is_rx(c)) continue; if (time_before(jiffies, c->timer2 + TIPC_TX_GRACE_PERIOD)) s = "ACT"; else s = "PAS"; } else { if (k == key.passive) s = "PAS"; else if (k == key.active) s = "ACT"; else if (k == key.pending) s = "PEN"; else s = "-"; } i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); rcu_read_lock(); aead = rcu_dereference(c->aead[k]); if (aead) i += scnprintf(buf + i, 200 - i, "{\"0x...%s\", \"%s\"}/%d:%d", aead->hint, (aead->mode == CLUSTER_KEY) ? "c" : "p", atomic_read(&aead->users), refcount_read(&aead->refcnt)); rcu_read_unlock(); i += scnprintf(buf + i, 200 - i, "\n"); } if (is_rx(c)) i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", atomic_read(&c->peer_rx_active)); return buf; } static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new, char *buf) { struct tipc_key *key = &old; int k, i = 0; char *s; /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ again: i += scnprintf(buf + i, 32 - i, "["); for (k = KEY_1; k <= KEY_3; k++) { if (k == key->passive) s = "pas"; else if (k == key->active) s = "act"; else if (k == key->pending) s = "pen"; else s = "-"; i += scnprintf(buf + i, 32 - i, (k != KEY_3) ? "%s " : "%s", s); } if (key != &new) { i += scnprintf(buf + i, 32 - i, "] -> "); key = &new; goto again; } i += scnprintf(buf + i, 32 - i, "]"); return buf; } /** * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point * @net: the struct net * @skb: the receiving message buffer */ void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb) { struct tipc_crypto *rx; struct tipc_msg *hdr; if (unlikely(skb_linearize(skb))) goto exit; hdr = buf_msg(skb); rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); if (unlikely(!rx)) goto exit; switch (msg_type(hdr)) { case KEY_DISTR_MSG: if (tipc_crypto_key_rcv(rx, hdr)) goto exit; break; default: break; } tipc_node_put(rx->node); exit: kfree_skb(skb); } /** * tipc_crypto_key_distr - Distribute a TX key * @tx: the TX crypto * @key: the key's index * @dest: the destination tipc node, = NULL if distributing to all nodes * * Return: 0 in case of success, otherwise < 0 */ int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, struct tipc_node *dest) { struct tipc_aead *aead; u32 dnode = tipc_node_get_addr(dest); int rc = -ENOKEY; if (!sysctl_tipc_key_exchange_enabled) return 0; if (key) { rcu_read_lock(); aead = tipc_aead_get(tx->aead[key]); if (likely(aead)) { rc = tipc_crypto_key_xmit(tx->net, aead->key, aead->gen, aead->mode, dnode); tipc_aead_put(aead); } rcu_read_unlock(); } return rc; } /** * tipc_crypto_key_xmit - Send a session key * @net: the struct net * @skey: the session key to be sent * @gen: the key's generation * @mode: the key's mode * @dnode: the destination node address, = 0 if broadcasting to all nodes * * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG' * as its data section, then xmit-ed through the uc/bc link. * * Return: 0 in case of success, otherwise < 0 */ static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, u16 gen, u8 mode, u32 dnode) { struct sk_buff_head pkts; struct tipc_msg *hdr; struct sk_buff *skb; u16 size, cong_link_cnt; u8 *data; int rc; size = tipc_aead_key_size(skey); skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); if (!skb) return -ENOMEM; hdr = buf_msg(skb); tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG, INT_H_SIZE, dnode); msg_set_size(hdr, INT_H_SIZE + size); msg_set_key_gen(hdr, gen); msg_set_key_mode(hdr, mode); data = msg_data(hdr); *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, skey->keylen); __skb_queue_head_init(&pkts); __skb_queue_tail(&pkts, skb); if (dnode) rc = tipc_node_xmit(net, &pkts, dnode, 0); else rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt); return rc; } /** * tipc_crypto_key_rcv - Receive a session key * @rx: the RX crypto * @hdr: the TIPC v2 message incl. the receiving session key in its data * * This function retrieves the session key in the message from peer, then * schedules a RX work to attach the key to the corresponding RX crypto. * * Return: "true" if the key has been scheduled for attaching, otherwise * "false". */ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) { struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; struct tipc_aead_key *skey = NULL; u16 key_gen = msg_key_gen(hdr); u32 size = msg_data_sz(hdr); u8 *data = msg_data(hdr); unsigned int keylen; /* Verify whether the size can exist in the packet */ if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) { pr_debug("%s: message data size is too small\n", rx->name); goto exit; } keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); /* Verify the supplied size values */ if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX || size != keylen + sizeof(struct tipc_aead_key))) { pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); goto exit; } spin_lock(&rx->lock); if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, rx->skey, key_gen, rx->key_gen); goto exit_unlock; } /* Allocate memory for the key */ skey = kmalloc(size, GFP_ATOMIC); if (unlikely(!skey)) { pr_err("%s: unable to allocate memory for skey\n", rx->name); goto exit_unlock; } /* Copy key from msg data */ skey->keylen = keylen; memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->keylen); rx->key_gen = key_gen; rx->skey_mode = msg_key_mode(hdr); rx->skey = skey; rx->nokey = 0; mb(); /* for nokey flag */ exit_unlock: spin_unlock(&rx->lock); exit: /* Schedule the key attaching on this crypto */ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) return true; return false; } /** * tipc_crypto_work_rx - Scheduled RX works handler * @work: the struct RX work * * The function processes the previous scheduled works i.e. distributing TX key * or attaching a received session key on RX crypto. */ static void tipc_crypto_work_rx(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; unsigned long delay = msecs_to_jiffies(5000); bool resched = false; u8 key; int rc; /* Case 1: Distribute TX key to peer if scheduled */ if (atomic_cmpxchg(&rx->key_distr, KEY_DISTR_SCHED, KEY_DISTR_COMPL) == KEY_DISTR_SCHED) { /* Always pick the newest one for distributing */ key = tx->key.pending ?: tx->key.active; rc = tipc_crypto_key_distr(tx, key, rx->node); if (unlikely(rc)) pr_warn("%s: unable to distr key[%d] to %s, err %d\n", tx->name, key, tipc_node_get_id_str(rx->node), rc); /* Sched for key_distr releasing */ resched = true; } else { atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); } /* Case 2: Attach a pending received session key from peer if any */ if (rx->skey) { rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); if (unlikely(rc < 0)) pr_warn("%s: unable to attach received skey, err %d\n", rx->name, rc); switch (rc) { case -EBUSY: case -ENOMEM: /* Resched the key attaching */ resched = true; break; default: synchronize_rcu(); kfree(rx->skey); rx->skey = NULL; break; } } if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) return; tipc_node_put(rx->node); } /** * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval * @tx: TX crypto * @changed: if the rekeying needs to be rescheduled with new interval * @new_intv: new rekeying interval (when "changed" = true) */ void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, u32 new_intv) { unsigned long delay; bool now = false; if (changed) { if (new_intv == TIPC_REKEYING_NOW) now = true; else tx->rekeying_intv = new_intv; cancel_delayed_work_sync(&tx->work); } if (tx->rekeying_intv || now) { delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); } } /** * tipc_crypto_work_tx - Scheduled TX works handler * @work: the struct TX work * * The function processes the previous scheduled work, i.e. key rekeying, by * generating a new session key based on current one, then attaching it to the * TX crypto and finally distributing it to peers. It also re-schedules the * rekeying if needed. */ static void tipc_crypto_work_tx(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); struct tipc_aead_key *skey = NULL; struct tipc_key key = tx->key; struct tipc_aead *aead; int rc = -ENOMEM; if (unlikely(key.pending)) goto resched; /* Take current key as a template */ rcu_read_lock(); aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); if (unlikely(!aead)) { rcu_read_unlock(); /* At least one key should exist for securing */ return; } /* Lets duplicate it first */ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); rcu_read_unlock(); /* Now, generate new key, initiate & distribute it */ if (likely(skey)) { rc = tipc_aead_key_generate(skey) ?: tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); if (likely(rc > 0)) rc = tipc_crypto_key_distr(tx, rc, NULL); kfree_sensitive(skey); } if (unlikely(rc)) pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); resched: /* Re-schedule rekeying if any */ tipc_crypto_rekeying_sched(tx, false, 0); }
88 2 86 86 49 46 5 5 2 18 18 18 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 // SPDX-License-Identifier: GPL-2.0-or-later /* * ip_vs_proto_tcp.c: TCP load balancing support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Julian Anastasov <ja@ssi.bg> * * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com> * * Network name space (netns) aware. * Global data moved to netns i.e struct netns_ipvs * tcp_timeouts table has copy per netns in a hash table per * protocol ip_vs_proto_data and is handled by netns */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/ip.h> #include <linux/tcp.h> /* for tcphdr */ #include <net/ip.h> #include <net/tcp.h> /* for csum_tcpudp_magic */ #include <net/ip6_checksum.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/indirect_call_wrapper.h> #include <net/ip_vs.h> static int tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); static int tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, int *verdict, struct ip_vs_conn **cpp, struct ip_vs_iphdr *iph) { struct ip_vs_service *svc; struct tcphdr _tcph, *th; __be16 _ports[2], *ports = NULL; /* In the event of icmp, we're only guaranteed to have the first 8 * bytes of the transport header, so we only check the rest of the * TCP packet for non-ICMP packets */ if (likely(!ip_vs_iph_icmp(iph))) { th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); if (th) { if (th->rst || !(sysctl_sloppy_tcp(ipvs) || th->syn)) return 1; ports = &th->source; } } else { ports = skb_header_pointer( skb, iph->len, sizeof(_ports), &_ports); } if (!ports) { *verdict = NF_DROP; return 0; } /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ if (likely(!ip_vs_iph_inverse(iph))) svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol, &iph->daddr, ports[1]); else svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol, &iph->saddr, ports[0]); if (svc) { int ignored; if (ip_vs_todrop(ipvs)) { /* * It seems that we are very loaded. * We have to drop this packet :( */ *verdict = NF_DROP; return 0; } /* * Let the virtual server select a real server for the * incoming connection, and create a connection entry. */ *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); if (!*cpp && ignored <= 0) { if (!ignored) *verdict = ip_vs_leave(svc, skb, pd, iph); else *verdict = NF_DROP; return 0; } } /* NF_ACCEPT */ return 1; } static inline void tcp_fast_csum_update(int af, struct tcphdr *tcph, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldport, __be16 newport) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcph->check = csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldport, newport, ~csum_unfold(tcph->check)))); else #endif tcph->check = csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldport, newport, ~csum_unfold(tcph->check)))); } static inline void tcp_partial_csum_update(int af, struct tcphdr *tcph, const union nf_inet_addr *oldip, const union nf_inet_addr *newip, __be16 oldlen, __be16 newlen) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcph->check = ~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6, ip_vs_check_diff2(oldlen, newlen, csum_unfold(tcph->check)))); else #endif tcph->check = ~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip, ip_vs_check_diff2(oldlen, newlen, csum_unfold(tcph->check)))); } INDIRECT_CALLABLE_SCOPE int tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) { struct tcphdr *tcph; unsigned int tcphoff = iph->len; bool payload_csum = false; int oldlen; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6 && iph->fragoffs) return 1; #endif oldlen = skb->len - tcphoff; /* csum_check requires unshared skb */ if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph))) return 0; if (unlikely(cp->app != NULL)) { int ret; /* Some checks before mangling */ if (!tcp_csum_check(cp->af, skb, pp)) return 0; /* Call application helper if needed */ if (!(ret = ip_vs_app_pkt_out(cp, skb, iph))) return 0; /* ret=2: csum update is needed after payload mangling */ if (ret == 1) oldlen = skb->len - tcphoff; else payload_csum = true; } tcph = (void *)skb_network_header(skb) + tcphoff; tcph->source = cp->vport; /* Adjust TCP checksums */ if (skb->ip_summed == CHECKSUM_PARTIAL) { tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, htons(oldlen), htons(skb->len - tcphoff)); } else if (!payload_csum) { /* Only port and addr are changed, do fast csum update */ tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr, cp->dport, cp->vport); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = cp->app ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; } else { /* full checksum calculation */ tcph->check = 0; skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) tcph->check = csum_ipv6_magic(&cp->vaddr.in6, &cp->caddr.in6, skb->len - tcphoff, cp->protocol, skb->csum); else #endif tcph->check = csum_tcpudp_magic(cp->vaddr.ip, cp->caddr.ip, skb->len - tcphoff, cp->protocol, skb->csum); skb->ip_summed = CHECKSUM_UNNECESSARY; IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", pp->name, tcph->check, (char*)&(tcph->check) - (char*)tcph); } return 1; } static int tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) { struct tcphdr *tcph; unsigned int tcphoff = iph->len; bool payload_csum = false; int oldlen; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6 && iph->fragoffs) return 1; #endif oldlen = skb->len - tcphoff; /* csum_check requires unshared skb */ if (skb_ensure_writable(skb, tcphoff + sizeof(*tcph))) return 0; if (unlikely(cp->app != NULL)) { int ret; /* Some checks before mangling */ if (!tcp_csum_check(cp->af, skb, pp)) return 0; /* * Attempt ip_vs_app call. * It will fix ip_vs_conn and iph ack_seq stuff */ if (!(ret = ip_vs_app_pkt_in(cp, skb, iph))) return 0; /* ret=2: csum update is needed after payload mangling */ if (ret == 1) oldlen = skb->len - tcphoff; else payload_csum = true; } tcph = (void *)skb_network_header(skb) + tcphoff; tcph->dest = cp->dport; /* * Adjust TCP checksums */ if (skb->ip_summed == CHECKSUM_PARTIAL) { tcp_partial_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, htons(oldlen), htons(skb->len - tcphoff)); } else if (!payload_csum) { /* Only port and addr are changed, do fast csum update */ tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr, cp->vport, cp->dport); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = cp->app ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; } else { /* full checksum calculation */ tcph->check = 0; skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) tcph->check = csum_ipv6_magic(&cp->caddr.in6, &cp->daddr.in6, skb->len - tcphoff, cp->protocol, skb->csum); else #endif tcph->check = csum_tcpudp_magic(cp->caddr.ip, cp->daddr.ip, skb->len - tcphoff, cp->protocol, skb->csum); skb->ip_summed = CHECKSUM_UNNECESSARY; } return 1; } static int tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) { unsigned int tcphoff; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) tcphoff = sizeof(struct ipv6hdr); else #endif tcphoff = ip_hdrlen(skb); switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); fallthrough; case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, skb->len - tcphoff, ipv6_hdr(skb)->nexthdr, skb->csum)) { IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, "Failed checksum for"); return 0; } } else #endif if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, skb->len - tcphoff, ip_hdr(skb)->protocol, skb->csum)) { IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, "Failed checksum for"); return 0; } break; default: /* No need to checksum. */ break; } return 1; } #define TCP_DIR_INPUT 0 #define TCP_DIR_OUTPUT 4 #define TCP_DIR_INPUT_ONLY 8 static const int tcp_state_off[IP_VS_DIR_LAST] = { [IP_VS_DIR_INPUT] = TCP_DIR_INPUT, [IP_VS_DIR_OUTPUT] = TCP_DIR_OUTPUT, [IP_VS_DIR_INPUT_ONLY] = TCP_DIR_INPUT_ONLY, }; /* * Timeout table[state] */ static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = { [IP_VS_TCP_S_NONE] = 2*HZ, [IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ, [IP_VS_TCP_S_SYN_SENT] = 2*60*HZ, [IP_VS_TCP_S_SYN_RECV] = 1*60*HZ, [IP_VS_TCP_S_FIN_WAIT] = 2*60*HZ, [IP_VS_TCP_S_TIME_WAIT] = 2*60*HZ, [IP_VS_TCP_S_CLOSE] = 10*HZ, [IP_VS_TCP_S_CLOSE_WAIT] = 60*HZ, [IP_VS_TCP_S_LAST_ACK] = 30*HZ, [IP_VS_TCP_S_LISTEN] = 2*60*HZ, [IP_VS_TCP_S_SYNACK] = 120*HZ, [IP_VS_TCP_S_LAST] = 2*HZ, }; static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = { [IP_VS_TCP_S_NONE] = "NONE", [IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED", [IP_VS_TCP_S_SYN_SENT] = "SYN_SENT", [IP_VS_TCP_S_SYN_RECV] = "SYN_RECV", [IP_VS_TCP_S_FIN_WAIT] = "FIN_WAIT", [IP_VS_TCP_S_TIME_WAIT] = "TIME_WAIT", [IP_VS_TCP_S_CLOSE] = "CLOSE", [IP_VS_TCP_S_CLOSE_WAIT] = "CLOSE_WAIT", [IP_VS_TCP_S_LAST_ACK] = "LAST_ACK", [IP_VS_TCP_S_LISTEN] = "LISTEN", [IP_VS_TCP_S_SYNACK] = "SYNACK", [IP_VS_TCP_S_LAST] = "BUG!", }; static const bool tcp_state_active_table[IP_VS_TCP_S_LAST] = { [IP_VS_TCP_S_NONE] = false, [IP_VS_TCP_S_ESTABLISHED] = true, [IP_VS_TCP_S_SYN_SENT] = true, [IP_VS_TCP_S_SYN_RECV] = true, [IP_VS_TCP_S_FIN_WAIT] = false, [IP_VS_TCP_S_TIME_WAIT] = false, [IP_VS_TCP_S_CLOSE] = false, [IP_VS_TCP_S_CLOSE_WAIT] = false, [IP_VS_TCP_S_LAST_ACK] = false, [IP_VS_TCP_S_LISTEN] = false, [IP_VS_TCP_S_SYNACK] = true, }; #define sNO IP_VS_TCP_S_NONE #define sES IP_VS_TCP_S_ESTABLISHED #define sSS IP_VS_TCP_S_SYN_SENT #define sSR IP_VS_TCP_S_SYN_RECV #define sFW IP_VS_TCP_S_FIN_WAIT #define sTW IP_VS_TCP_S_TIME_WAIT #define sCL IP_VS_TCP_S_CLOSE #define sCW IP_VS_TCP_S_CLOSE_WAIT #define sLA IP_VS_TCP_S_LAST_ACK #define sLI IP_VS_TCP_S_LISTEN #define sSA IP_VS_TCP_S_SYNACK struct tcp_states_t { int next_state[IP_VS_TCP_S_LAST]; }; static const char * tcp_state_name(int state) { if (state >= IP_VS_TCP_S_LAST) return "ERR!"; return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?"; } static bool tcp_state_active(int state) { if (state >= IP_VS_TCP_S_LAST) return false; return tcp_state_active_table[state]; } static struct tcp_states_t tcp_states[] = { /* INPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sSR }}, /* OUTPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSS, sES, sSS, sSR, sSS, sSS, sSS, sSS, sSS, sLI, sSR }}, /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }}, /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }}, /* INPUT-ONLY */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }}, /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; static struct tcp_states_t tcp_states_dos[] = { /* INPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }}, /*fin*/ {{sCL, sCW, sSS, sTW, sTW, sTW, sCL, sCW, sLA, sLI, sSA }}, /*ack*/ {{sES, sES, sSS, sSR, sFW, sTW, sCL, sCW, sCL, sLI, sSA }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, /* OUTPUT */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSS, sES, sSS, sSA, sSS, sSS, sSS, sSS, sSS, sLI, sSA }}, /*fin*/ {{sTW, sFW, sSS, sTW, sFW, sTW, sCL, sTW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sLA, sES, sES }}, /*rst*/ {{sCL, sCL, sSS, sCL, sCL, sTW, sCL, sCL, sCL, sCL, sCL }}, /* INPUT-ONLY */ /* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */ /*syn*/ {{sSA, sES, sES, sSR, sSA, sSA, sSA, sSA, sSA, sSA, sSA }}, /*fin*/ {{sCL, sFW, sSS, sTW, sFW, sTW, sCL, sCW, sLA, sLI, sTW }}, /*ack*/ {{sES, sES, sSS, sES, sFW, sTW, sCL, sCW, sCL, sLI, sES }}, /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }}, }; static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags) { int on = (flags & 1); /* secure_tcp */ /* ** FIXME: change secure_tcp to independent sysctl var ** or make it per-service or per-app because it is valid ** for most if not for all of the applications. Something ** like "capabilities" (flags) for each object. */ pd->tcp_state_table = (on ? tcp_states_dos : tcp_states); } static inline int tcp_state_idx(struct tcphdr *th) { if (th->rst) return 3; if (th->syn) return 0; if (th->fin) return 1; if (th->ack) return 2; return -1; } static inline void set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, int direction, struct tcphdr *th) { int state_idx; int new_state = IP_VS_TCP_S_CLOSE; int state_off = tcp_state_off[direction]; /* * Update state offset to INPUT_ONLY if necessary * or delete NO_OUTPUT flag if output packet detected */ if (cp->flags & IP_VS_CONN_F_NOOUTPUT) { if (state_off == TCP_DIR_OUTPUT) cp->flags &= ~IP_VS_CONN_F_NOOUTPUT; else state_off = TCP_DIR_INPUT_ONLY; } if ((state_idx = tcp_state_idx(th)) < 0) { IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx); goto tcp_state_out; } new_state = pd->tcp_state_table[state_off+state_idx].next_state[cp->state]; tcp_state_out: if (new_state != cp->state) { struct ip_vs_dest *dest = cp->dest; IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] c:%s:%d v:%s:%d " "d:%s:%d state: %s->%s conn->refcnt:%d\n", pd->pp->name, ((state_off == TCP_DIR_OUTPUT) ? "output " : "input "), th->syn ? 'S' : '.', th->fin ? 'F' : '.', th->ack ? 'A' : '.', th->rst ? 'R' : '.', IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), tcp_state_name(cp->state), tcp_state_name(new_state), refcount_read(&cp->refcnt)); if (dest) { if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && !tcp_state_active(new_state)) { atomic_dec(&dest->activeconns); atomic_inc(&dest->inactconns); cp->flags |= IP_VS_CONN_F_INACTIVE; } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) && tcp_state_active(new_state)) { atomic_inc(&dest->activeconns); atomic_dec(&dest->inactconns); cp->flags &= ~IP_VS_CONN_F_INACTIVE; } } if (new_state == IP_VS_TCP_S_ESTABLISHED) ip_vs_control_assure_ct(cp); } if (likely(pd)) cp->timeout = pd->timeout_table[cp->state = new_state]; else /* What to do ? */ cp->timeout = tcp_timeouts[cp->state = new_state]; } /* * Handle state transitions */ static void tcp_state_transition(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd) { struct tcphdr _tcph, *th; #ifdef CONFIG_IP_VS_IPV6 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); #else int ihl = ip_hdrlen(skb); #endif th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph); if (th == NULL) return; spin_lock_bh(&cp->lock); set_tcp_state(pd, cp, direction, th); spin_unlock_bh(&cp->lock); } static inline __u16 tcp_app_hashkey(__be16 port) { return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port) & TCP_APP_TAB_MASK; } static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc) { struct ip_vs_app *i; __u16 hash; __be16 port = inc->port; int ret = 0; struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); hash = tcp_app_hashkey(port); list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) { if (i->port == port) { ret = -EEXIST; goto out; } } list_add_rcu(&inc->p_list, &ipvs->tcp_apps[hash]); atomic_inc(&pd->appcnt); out: return ret; } static void tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc) { struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); atomic_dec(&pd->appcnt); list_del_rcu(&inc->p_list); } static int tcp_app_conn_bind(struct ip_vs_conn *cp) { struct netns_ipvs *ipvs = cp->ipvs; int hash; struct ip_vs_app *inc; int result = 0; /* Default binding: bind app only for NAT */ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) return 0; /* Lookup application incarnations and bind the right one */ hash = tcp_app_hashkey(cp->vport); list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", __func__, IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), inc->name, ntohs(inc->port)); cp->app = inc; if (inc->init_conn) result = inc->init_conn(inc, cp); break; } } return result; } /* * Set LISTEN timeout. (ip_vs_conn_put will setup timer) */ void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp) { struct ip_vs_proto_data *pd = ip_vs_proto_data_get(cp->ipvs, IPPROTO_TCP); spin_lock_bh(&cp->lock); cp->state = IP_VS_TCP_S_LISTEN; cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] : tcp_timeouts[IP_VS_TCP_S_LISTEN]); spin_unlock_bh(&cp->lock); } /* --------------------------------------------- * timeouts is netns related now. * --------------------------------------------- */ static int __ip_vs_tcp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd) { ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE); pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, sizeof(tcp_timeouts)); if (!pd->timeout_table) return -ENOMEM; pd->tcp_state_table = tcp_states; return 0; } static void __ip_vs_tcp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd) { kfree(pd->timeout_table); } struct ip_vs_protocol ip_vs_protocol_tcp = { .name = "TCP", .protocol = IPPROTO_TCP, .num_states = IP_VS_TCP_S_LAST, .dont_defrag = 0, .init = NULL, .exit = NULL, .init_netns = __ip_vs_tcp_init, .exit_netns = __ip_vs_tcp_exit, .register_app = tcp_register_app, .unregister_app = tcp_unregister_app, .conn_schedule = tcp_conn_schedule, .conn_in_get = ip_vs_conn_in_get_proto, .conn_out_get = ip_vs_conn_out_get_proto, .snat_handler = tcp_snat_handler, .dnat_handler = tcp_dnat_handler, .state_name = tcp_state_name, .state_transition = tcp_state_transition, .app_conn_bind = tcp_app_conn_bind, .debug_packet = ip_vs_tcpudp_debug_packet, .timeout_change = tcp_timeout_change, };
5 4 1 3 3 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 // SPDX-License-Identifier: GPL-2.0-or-later /* Kernel cryptographic api. * cast5.c - Cast5 cipher algorithm (rfc2144). * * Derived from GnuPG implementation of cast5. * * Major Changes. * Complete conformance to rfc2144. * Supports key size from 40 to 128 bits. * * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc. * Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>. */ #include <linux/unaligned.h> #include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <crypto/cast5.h> static const u32 s5[256] = { 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4 }; static const u32 s6[256] = { 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f }; static const u32 s7[256] = { 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3 }; static const u32 sb8[256] = { 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e }; #define s1 cast_s1 #define s2 cast_s2 #define s3 cast_s3 #define s4 cast_s4 #define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff])) #define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff])) #define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \ (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff])) void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { u32 l, r, t; u32 I; /* used by the Fx macros */ u32 *Km; u8 *Kr; Km = c->Km; Kr = c->Kr; /* (L0,R0) <-- (m1...m64). (Split the plaintext into left and * right 32-bit halves L0 = m1...m32 and R0 = m33...m64.) */ l = get_unaligned_be32(inbuf); r = get_unaligned_be32(inbuf + 4); /* (16 rounds) for i from 1 to 16, compute Li and Ri as follows: * Li = Ri-1; * Ri = Li-1 ^ f(Ri-1,Kmi,Kri), where f is defined in Section 2.2 * Rounds 1, 4, 7, 10, 13, and 16 use f function Type 1. * Rounds 2, 5, 8, 11, and 14 use f function Type 2. * Rounds 3, 6, 9, 12, and 15 use f function Type 3. */ t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); } /* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and * concatenate to form the ciphertext.) */ put_unaligned_be32(r, outbuf); put_unaligned_be32(l, outbuf + 4); } EXPORT_SYMBOL_GPL(__cast5_encrypt); static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast5_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf) { u32 l, r, t; u32 I; u32 *Km; u8 *Kr; Km = c->Km; Kr = c->Kr; l = get_unaligned_be32(inbuf); r = get_unaligned_be32(inbuf + 4); if (!(c->rr)) { t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]); t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]); t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]); t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]); } t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]); t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]); t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]); t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]); t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]); t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]); t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]); t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]); t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]); t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]); t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]); t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]); put_unaligned_be32(r, outbuf); put_unaligned_be32(l, outbuf + 4); } EXPORT_SYMBOL_GPL(__cast5_decrypt); static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { __cast5_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf); } static void key_schedule(u32 *x, u32 *z, u32 *k) { #define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff) #define zi(i) ((z[(i)/4] >> (8*(3-((i)%4)))) & 0xff) z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^ s7[xi(8)]; z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^ sb8[xi(10)]; z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s5[xi(9)]; z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ s6[xi(11)]; k[0] = s5[zi(8)] ^ s6[zi(9)] ^ s7[zi(7)] ^ sb8[zi(6)] ^ s5[zi(2)]; k[1] = s5[zi(10)] ^ s6[zi(11)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s6[zi(6)]; k[2] = s5[zi(12)] ^ s6[zi(13)] ^ s7[zi(3)] ^ sb8[zi(2)] ^ s7[zi(9)]; k[3] = s5[zi(14)] ^ s6[zi(15)] ^ s7[zi(1)] ^ sb8[zi(0)] ^ sb8[zi(12)]; x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^ s7[zi(0)]; x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^ sb8[zi(2)]; x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s5[zi(1)]; x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^ s6[zi(3)]; k[4] = s5[xi(3)] ^ s6[xi(2)] ^ s7[xi(12)] ^ sb8[xi(13)] ^ s5[xi(8)]; k[5] = s5[xi(1)] ^ s6[xi(0)] ^ s7[xi(14)] ^ sb8[xi(15)] ^ s6[xi(13)]; k[6] = s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(8)] ^ sb8[xi(9)] ^ s7[xi(3)]; k[7] = s5[xi(5)] ^ s6[xi(4)] ^ s7[xi(10)] ^ sb8[xi(11)] ^ sb8[xi(7)]; z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^ s7[xi(8)]; z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^ sb8[xi(10)]; z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^ s5[xi(9)]; z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^ s6[xi(11)]; k[8] = s5[zi(3)] ^ s6[zi(2)] ^ s7[zi(12)] ^ sb8[zi(13)] ^ s5[zi(9)]; k[9] = s5[zi(1)] ^ s6[zi(0)] ^ s7[zi(14)] ^ sb8[zi(15)] ^ s6[zi(12)]; k[10] = s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(8)] ^ sb8[zi(9)] ^ s7[zi(2)]; k[11] = s5[zi(5)] ^ s6[zi(4)] ^ s7[zi(10)] ^ sb8[zi(11)] ^ sb8[zi(6)]; x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^ s7[zi(0)]; x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^ sb8[zi(2)]; x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s5[zi(1)]; x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^ s6[zi(3)]; k[12] = s5[xi(8)] ^ s6[xi(9)] ^ s7[xi(7)] ^ sb8[xi(6)] ^ s5[xi(3)]; k[13] = s5[xi(10)] ^ s6[xi(11)] ^ s7[xi(5)] ^ sb8[xi(4)] ^ s6[xi(7)]; k[14] = s5[xi(12)] ^ s6[xi(13)] ^ s7[xi(3)] ^ sb8[xi(2)] ^ s7[xi(8)]; k[15] = s5[xi(14)] ^ s6[xi(15)] ^ s7[xi(1)] ^ sb8[xi(0)] ^ sb8[xi(13)]; #undef xi #undef zi } int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) { struct cast5_ctx *c = crypto_tfm_ctx(tfm); int i; u32 x[4]; u32 z[4]; u32 k[16]; __be32 p_key[4]; c->rr = key_len <= 10 ? 1 : 0; memset(p_key, 0, 16); memcpy(p_key, key, key_len); x[0] = be32_to_cpu(p_key[0]); x[1] = be32_to_cpu(p_key[1]); x[2] = be32_to_cpu(p_key[2]); x[3] = be32_to_cpu(p_key[3]); key_schedule(x, z, k); for (i = 0; i < 16; i++) c->Km[i] = k[i]; key_schedule(x, z, k); for (i = 0; i < 16; i++) c->Kr[i] = k[i] & 0x1f; return 0; } EXPORT_SYMBOL_GPL(cast5_setkey); static struct crypto_alg alg = { .cra_name = "cast5", .cra_driver_name = "cast5-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST5_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast5_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAST5_MIN_KEY_SIZE, .cia_max_keysize = CAST5_MAX_KEY_SIZE, .cia_setkey = cast5_setkey, .cia_encrypt = cast5_encrypt, .cia_decrypt = cast5_decrypt } } }; static int __init cast5_mod_init(void) { return crypto_register_alg(&alg); } static void __exit cast5_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(cast5_mod_init); module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); MODULE_ALIAS_CRYPTO("cast5"); MODULE_ALIAS_CRYPTO("cast5-generic");
8 8 1 7 7 22 22 4 16 18 18 18 71 72 72 63 11 8 47 22 64 64 64 48 54 22 1 64 10 2 8 10 43 23 56 53 15 64 63 22 45 22 2 2 1 2 1 1 1 42 42 42 26 11 25 12 2 30 22 22 21 12 10 21 1 21 3 22 22 14 10 10 10 10 2 1 8 1 10 1 1 8 8 1 8 8 48 36 28 48 29 15 15 47 11 46 1 46 2 26 34 48 48 47 48 47 1 10 41 48 47 47 2 40 41 40 11 11 9 1 2 2 1 49 31 28 2 4 30 27 10 3 9 6 5 2 10 6 5 10 2 10 10 41 40 41 3 45 2 1 1 44 23 33 1 45 46 46 41 5 31 14 2 40 40 41 41 22 28 2 3 9 10 8 2 10 10 10 10 8 2 10 1 1 10 3 3 1 1 3 53 33 44 10 10 10 10 1 43 41 41 42 50 2 52 52 44 40 46 45 39 15 1 3 46 42 42 40 53 50 50 51 10 10 52 53 53 39 1 46 46 28 26 33 51 50 49 1 51 43 44 44 16 17 3 14 4 16 3 3 3 2 1 1 1 1 6 15 14 3 2 11 11 15 18 14 3 4 14 18 1 1 1 14 3 4 15 7 2 8 8 2 2 4 2 1 3 8 6 9 8 8 28 1 1 3 4 4 1 32 32 1 19 10 34 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 // SPDX-License-Identifier: GPL-2.0 /* * Memory Migration functionality - linux/mm/migrate.c * * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter * * Page migration was first developed in the context of the memory hotplug * project. The main authors of the migration code are: * * IWAMOTO Toshihiro <iwamoto@valinux.co.jp> * Hirokazu Takahashi <taka@valinux.co.jp> * Dave Hansen <haveblue@us.ibm.com> * Christoph Lameter */ #include <linux/migrate.h> #include <linux/export.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/buffer_head.h> #include <linux/mm_inline.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/writeback.h> #include <linux/mempolicy.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/backing-dev.h> #include <linux/compaction.h> #include <linux/syscalls.h> #include <linux/compat.h> #include <linux/hugetlb.h> #include <linux/gfp.h> #include <linux/page_idle.h> #include <linux/page_owner.h> #include <linux/sched/mm.h> #include <linux/ptrace.h> #include <linux/memory.h> #include <linux/sched/sysctl.h> #include <linux/memory-tiers.h> #include <linux/pagewalk.h> #include <linux/balloon_compaction.h> #include <linux/zsmalloc.h> #include <asm/tlbflush.h> #include <trace/events/migrate.h> #include "internal.h" #include "swap.h" static const struct movable_operations *page_movable_ops(struct page *page) { VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); /* * If we enable page migration for a page of a certain type by marking * it as movable, the page type must be sticky until the page gets freed * back to the buddy. */ #ifdef CONFIG_BALLOON_COMPACTION if (PageOffline(page)) /* Only balloon compaction sets PageOffline pages movable. */ return &balloon_mops; #endif /* CONFIG_BALLOON_COMPACTION */ #if defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION) if (PageZsmalloc(page)) return &zsmalloc_mops; #endif /* defined(CONFIG_ZSMALLOC) && defined(CONFIG_COMPACTION) */ return NULL; } /** * isolate_movable_ops_page - isolate a movable_ops page for migration * @page: The page. * @mode: The isolation mode. * * Try to isolate a movable_ops page for migration. Will fail if the page is * not a movable_ops page, if the page is already isolated for migration * or if the page was just was released by its owner. * * Once isolated, the page cannot get freed until it is either putback * or migrated. * * Returns true if isolation succeeded, otherwise false. */ bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) { /* * TODO: these pages will not be folios in the future. All * folio dependencies will have to be removed. */ struct folio *folio = folio_get_nontail_page(page); const struct movable_operations *mops; /* * Avoid burning cycles with pages that are yet under __free_pages(), * or just got freed under us. * * In case we 'win' a race for a movable page being freed under us and * raise its refcount preventing __free_pages() from doing its job * the put_page() at the end of this block will take care of * release this page, thus avoiding a nasty leakage. */ if (!folio) goto out; /* * Check for movable_ops pages before taking the page lock because * we use non-atomic bitops on newly allocated page flags so * unconditionally grabbing the lock ruins page's owner side. * * Note that once a page has movable_ops, it will stay that way * until the page was freed. */ if (unlikely(!page_has_movable_ops(page))) goto out_putfolio; /* * As movable pages are not isolated from LRU lists, concurrent * compaction threads can race against page migration functions * as well as race against the releasing a page. * * In order to avoid having an already isolated movable page * being (wrongly) re-isolated while it is under migration, * or to avoid attempting to isolate pages being released, * lets be sure we have the page lock * before proceeding with the movable page isolation steps. */ if (unlikely(!folio_trylock(folio))) goto out_putfolio; VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); if (PageMovableOpsIsolated(page)) goto out_no_isolated; mops = page_movable_ops(page); if (WARN_ON_ONCE(!mops)) goto out_no_isolated; if (!mops->isolate_page(page, mode)) goto out_no_isolated; /* Driver shouldn't use the isolated flag */ VM_WARN_ON_ONCE_PAGE(PageMovableOpsIsolated(page), page); SetPageMovableOpsIsolated(page); folio_unlock(folio); return true; out_no_isolated: folio_unlock(folio); out_putfolio: folio_put(folio); out: return false; } /** * putback_movable_ops_page - putback an isolated movable_ops page * @page: The isolated page. * * Putback an isolated movable_ops page. * * After the page was putback, it might get freed instantly. */ static void putback_movable_ops_page(struct page *page) { /* * TODO: these pages will not be folios in the future. All * folio dependencies will have to be removed. */ struct folio *folio = page_folio(page); VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(page), page); VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(page), page); folio_lock(folio); page_movable_ops(page)->putback_page(page); ClearPageMovableOpsIsolated(page); folio_unlock(folio); folio_put(folio); } /** * migrate_movable_ops_page - migrate an isolated movable_ops page * @dst: The destination page. * @src: The source page. * @mode: The migration mode. * * Migrate an isolated movable_ops page. * * If the src page was already released by its owner, the src page is * un-isolated (putback) and migration succeeds; the migration core will be the * owner of both pages. * * If the src page was not released by its owner and the migration was * successful, the owner of the src page and the dst page are swapped and * the src page is un-isolated. * * If migration fails, the ownership stays unmodified and the src page * remains isolated: migration may be retried later or the page can be putback. * * TODO: migration core will treat both pages as folios and lock them before * this call to unlock them after this call. Further, the folio refcounts on * src and dst are also released by migration core. These pages will not be * folios in the future, so that must be reworked. * * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error * code. */ static int migrate_movable_ops_page(struct page *dst, struct page *src, enum migrate_mode mode) { int rc = MIGRATEPAGE_SUCCESS; VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src); VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src); rc = page_movable_ops(src)->migrate_page(dst, src, mode); if (rc == MIGRATEPAGE_SUCCESS) ClearPageMovableOpsIsolated(src); return rc; } /* * Put previously isolated pages back onto the appropriate lists * from where they were once taken off for compaction/migration. * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() * and folio_isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { struct folio *folio; struct folio *folio2; list_for_each_entry_safe(folio, folio2, l, lru) { if (unlikely(folio_test_hugetlb(folio))) { folio_putback_hugetlb(folio); continue; } list_del(&folio->lru); if (unlikely(page_has_movable_ops(&folio->page))) { putback_movable_ops_page(&folio->page); } else { node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), -folio_nr_pages(folio)); folio_putback_lru(folio); } } } /* Must be called with an elevated refcount on the non-hugetlb folio */ bool isolate_folio_to_list(struct folio *folio, struct list_head *list) { if (folio_test_hugetlb(folio)) return folio_isolate_hugetlb(folio, list); if (page_has_movable_ops(&folio->page)) { if (!isolate_movable_ops_page(&folio->page, ISOLATE_UNEVICTABLE)) return false; } else { if (!folio_isolate_lru(folio)) return false; node_stat_add_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio)); } list_add(&folio->lru, list); return true; } static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, struct folio *folio, unsigned long idx) { struct page *page = folio_page(folio, idx); bool contains_data; pte_t newpte; void *addr; if (PageCompound(page)) return false; VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page); if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) || mm_forbids_zeropage(pvmw->vma->vm_mm)) return false; /* * The pmd entry mapping the old thp was flushed and the pte mapping * this subpage has been non present. If the subpage is only zero-filled * then map it to the shared zeropage. */ addr = kmap_local_page(page); contains_data = memchr_inv(addr, 0, PAGE_SIZE); kunmap_local(addr); if (contains_data) return false; newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address), pvmw->vma->vm_page_prot)); set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte); dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio)); return true; } struct rmap_walk_arg { struct folio *folio; bool map_unused_to_zeropage; }; /* * Restore a potential migration pte to a working pte entry */ static bool remove_migration_pte(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { struct rmap_walk_arg *rmap_walk_arg = arg; DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION); while (page_vma_mapped_walk(&pvmw)) { rmap_t rmap_flags = RMAP_NONE; pte_t old_pte; pte_t pte; swp_entry_t entry; struct page *new; unsigned long idx = 0; /* pgoff is invalid for ksm pages, but they are never large */ if (folio_test_large(folio) && !folio_test_hugetlb(folio)) idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; new = folio_page(folio, idx); #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || !folio_test_pmd_mappable(folio), folio); remove_migration_pmd(&pvmw, new); continue; } #endif if (rmap_walk_arg->map_unused_to_zeropage && try_to_map_unused_to_zeropage(&pvmw, folio, idx)) continue; folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); old_pte = ptep_get(pvmw.pte); entry = pte_to_swp_entry(old_pte); if (!is_migration_entry_young(entry)) pte = pte_mkold(pte); if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) pte = pte_mkdirty(pte); if (pte_swp_soft_dirty(old_pte)) pte = pte_mksoft_dirty(pte); else pte = pte_clear_soft_dirty(pte); if (is_writable_migration_entry(entry)) pte = pte_mkwrite(pte, vma); else if (pte_swp_uffd_wp(old_pte)) pte = pte_mkuffd_wp(pte); if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) rmap_flags |= RMAP_EXCLUSIVE; if (unlikely(is_device_private_page(new))) { if (pte_write(pte)) entry = make_writable_device_private_entry( page_to_pfn(new)); else entry = make_readable_device_private_entry( page_to_pfn(new)); pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(old_pte)) pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(old_pte)) pte = pte_swp_mkuffd_wp(pte); } #ifdef CONFIG_HUGETLB_PAGE if (folio_test_hugetlb(folio)) { struct hstate *h = hstate_vma(vma); unsigned int shift = huge_page_shift(h); unsigned long psize = huge_page_size(h); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) hugetlb_add_anon_rmap(folio, vma, pvmw.address, rmap_flags); else hugetlb_add_file_rmap(folio); set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, psize); } else #endif { if (folio_test_anon(folio)) folio_add_anon_rmap_pte(folio, new, vma, pvmw.address, rmap_flags); else folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } if (READ_ONCE(vma->vm_flags) & VM_LOCKED) mlock_drain_local(); trace_remove_migration_pte(pvmw.address, pte_val(pte), compound_order(new)); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, pvmw.address, pvmw.pte); } return true; } /* * Get rid of all migration entries and replace them by * references to the indicated page. */ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags) { struct rmap_walk_arg rmap_walk_arg = { .folio = src, .map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE, }; struct rmap_walk_control rwc = { .rmap_one = remove_migration_pte, .arg = &rmap_walk_arg, }; VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src); if (flags & RMP_LOCKED) rmap_walk_locked(dst, &rwc); else rmap_walk(dst, &rwc); } /* * Something used the pte of a page under migration. We need to * get to the page and wait until migration is finished. * When we return f