2 2 2 1 1 2 2 2 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux driver for TerraTec DMX 6Fire USB * * Main routines and module definitions. * * Author: Torsten Schenk <torsten.schenk@zoho.com> * Created: Jan 01, 2011 * Copyright: (C) Torsten Schenk */ #include "chip.h" #include "firmware.h" #include "pcm.h" #include "control.h" #include "comm.h" #include "midi.h" #include <linux/moduleparam.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <sound/initval.h> MODULE_AUTHOR("Torsten Schenk <torsten.schenk@zoho.com>"); MODULE_DESCRIPTION("TerraTec DMX 6Fire USB audio driver"); MODULE_LICENSE("GPL v2"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable card */ static struct sfire_chip *chips[SNDRV_CARDS] = SNDRV_DEFAULT_PTR; static struct usb_device *devices[SNDRV_CARDS] = SNDRV_DEFAULT_PTR; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the 6fire sound device"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for the 6fire sound device."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable the 6fire sound device."); static DEFINE_MUTEX(register_mutex); static void usb6fire_chip_abort(struct sfire_chip *chip) { if (chip) { if (chip->pcm) usb6fire_pcm_abort(chip); if (chip->midi) usb6fire_midi_abort(chip); if (chip->comm) usb6fire_comm_abort(chip); if (chip->control) usb6fire_control_abort(chip); if (chip->card) { snd_card_disconnect(chip->card); snd_card_free_when_closed(chip->card); chip->card = NULL; } } } static void usb6fire_card_free(struct snd_card *card) { struct sfire_chip *chip = card->private_data; if (chip) { if (chip->pcm) usb6fire_pcm_destroy(chip); if (chip->midi) usb6fire_midi_destroy(chip); if (chip->comm) usb6fire_comm_destroy(chip); if (chip->control) usb6fire_control_destroy(chip); } } static int usb6fire_chip_probe(struct usb_interface *intf, const struct usb_device_id *usb_id) { int ret; int i; struct sfire_chip *chip = NULL; struct usb_device *device = interface_to_usbdev(intf); int regidx = -1; /* index in module parameter array */ struct snd_card *card = NULL; /* look if we already serve this card and return if so */ mutex_lock(®ister_mutex); for (i = 0; i < SNDRV_CARDS; i++) { if (devices[i] == device) { if (chips[i]) chips[i]->intf_count++; usb_set_intfdata(intf, chips[i]); mutex_unlock(®ister_mutex); return 0; } else if (!devices[i] && regidx < 0) regidx = i; } if (regidx < 0) { mutex_unlock(®ister_mutex); dev_err(&intf->dev, "too many cards registered.\n"); return -ENODEV; } devices[regidx] = device; mutex_unlock(®ister_mutex); /* check, if firmware is present on device, upload it if not */ ret = usb6fire_fw_init(intf); if (ret < 0) return ret; else if (ret == FW_NOT_READY) /* firmware update performed */ return 0; /* if we are here, card can be registered in alsa. */ if (usb_set_interface(device, 0, 0) != 0) { dev_err(&intf->dev, "can't set first interface.\n"); return -EIO; } ret = snd_card_new(&intf->dev, index[regidx], id[regidx], THIS_MODULE, sizeof(struct sfire_chip), &card); if (ret < 0) { dev_err(&intf->dev, "cannot create alsa card.\n"); return ret; } strcpy(card->driver, "6FireUSB"); strcpy(card->shortname, "TerraTec DMX6FireUSB"); sprintf(card->longname, "%s at %d:%d", card->shortname, device->bus->busnum, device->devnum); chip = card->private_data; chips[regidx] = chip; chip->dev = device; chip->regidx = regidx; chip->intf_count = 1; chip->card = card; card->private_free = usb6fire_card_free; ret = usb6fire_comm_init(chip); if (ret < 0) goto destroy_chip; ret = usb6fire_midi_init(chip); if (ret < 0) goto destroy_chip; ret = usb6fire_pcm_init(chip); if (ret < 0) goto destroy_chip; ret = usb6fire_control_init(chip); if (ret < 0) goto destroy_chip; ret = snd_card_register(card); if (ret < 0) { dev_err(&intf->dev, "cannot register card."); goto destroy_chip; } usb_set_intfdata(intf, chip); return 0; destroy_chip: snd_card_free(card); return ret; } static void usb6fire_chip_disconnect(struct usb_interface *intf) { struct sfire_chip *chip; chip = usb_get_intfdata(intf); if (chip) { /* if !chip, fw upload has been performed */ chip->intf_count--; if (!chip->intf_count) { mutex_lock(®ister_mutex); devices[chip->regidx] = NULL; chips[chip->regidx] = NULL; mutex_unlock(®ister_mutex); chip->shutdown = true; usb6fire_chip_abort(chip); } } } static const struct usb_device_id device_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x0ccd, .idProduct = 0x0080 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); static struct usb_driver usb_driver = { .name = "snd-usb-6fire", .probe = usb6fire_chip_probe, .disconnect = usb6fire_chip_disconnect, .id_table = device_table, }; module_usb_driver(usb_driver); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/truncate.h * * Common inline functions needed for truncate support */ /* * Truncate blocks that were not used by write. We have to truncate the * pagecache as well so that corresponding buffers get properly unmapped. */ static inline void ext4_truncate_failed_write(struct inode *inode) { struct address_space *mapping = inode->i_mapping; /* * We don't need to call ext4_break_layouts() because the blocks we * are truncating were never visible to userspace. */ filemap_invalidate_lock(mapping); truncate_inode_pages(mapping, inode->i_size); ext4_truncate(inode); filemap_invalidate_unlock(mapping); } /* * Work out how many blocks we need to proceed with the next chunk of a * truncate transaction. */ static inline unsigned long ext4_blocks_for_truncate(struct inode *inode) { ext4_lblk_t needed; needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); /* Give ourselves just enough room to cope with inodes in which * i_blocks is corrupt: we've seen disk corruptions in the past * which resulted in random data in an inode which looked enough * like a regular file for ext4 to try to delete it. Things * will go a bit crazy if that happens, but at least we should * try not to panic the whole kernel. */ if (needed < 2) needed = 2; /* But we need to bound the transaction so we don't overflow the * journal. */ if (needed > EXT4_MAX_TRANS_DATA) needed = EXT4_MAX_TRANS_DATA; return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; } |
13 13 13 13 12 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 | // SPDX-License-Identifier: GPL-2.0-or-later /* Service connection management * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/slab.h> #include "ar-internal.h" /* * Find a service connection under RCU conditions. * * We could use a hash table, but that is subject to bucket stuffing by an * attacker as the client gets to pick the epoch and cid values and would know * the hash function. So, instead, we use a hash table for the peer and from * that an rbtree to find the service connection. Under ordinary circumstances * it might be slower than a large hash table, but it is at least limited in * depth. */ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer, struct sk_buff *skb) { struct rxrpc_connection *conn = NULL; struct rxrpc_conn_proto k; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rb_node *p; unsigned int seq = 1; k.epoch = sp->hdr.epoch; k.cid = sp->hdr.cid & RXRPC_CIDMASK; do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ seq++; /* 2 on the 1st/lockless path, otherwise odd */ read_seqbegin_or_lock(&peer->service_conn_lock, &seq); p = rcu_dereference_raw(peer->service_conns.rb_node); while (p) { conn = rb_entry(p, struct rxrpc_connection, service_node); if (conn->proto.index_key < k.index_key) p = rcu_dereference_raw(p->rb_left); else if (conn->proto.index_key > k.index_key) p = rcu_dereference_raw(p->rb_right); else break; conn = NULL; } } while (need_seqretry(&peer->service_conn_lock, seq)); done_seqretry(&peer->service_conn_lock, seq); _leave(" = %d", conn ? conn->debug_id : -1); return conn; } /* * Insert a service connection into a peer's tree, thereby making it a target * for incoming packets. */ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, struct rxrpc_connection *conn) { struct rxrpc_connection *cursor = NULL; struct rxrpc_conn_proto k = conn->proto; struct rb_node **pp, *parent; write_seqlock(&peer->service_conn_lock); pp = &peer->service_conns.rb_node; parent = NULL; while (*pp) { parent = *pp; cursor = rb_entry(parent, struct rxrpc_connection, service_node); if (cursor->proto.index_key < k.index_key) pp = &(*pp)->rb_left; else if (cursor->proto.index_key > k.index_key) pp = &(*pp)->rb_right; else goto found_extant_conn; } rb_link_node_rcu(&conn->service_node, parent, pp); rb_insert_color(&conn->service_node, &peer->service_conns); conn_published: set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags); write_sequnlock(&peer->service_conn_lock); _leave(" = %d [new]", conn->debug_id); return; found_extant_conn: if (refcount_read(&cursor->ref) == 0) goto replace_old_connection; write_sequnlock(&peer->service_conn_lock); /* We should not be able to get here. rxrpc_incoming_connection() is * called in a non-reentrant context, so there can't be a race to * insert a new connection. */ BUG(); replace_old_connection: /* The old connection is from an outdated epoch. */ _debug("replace conn"); rb_replace_node_rcu(&cursor->service_node, &conn->service_node, &peer->service_conns); clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags); goto conn_published; } /* * Preallocate a service connection. The connection is placed on the proc and * reap lists so that we don't have to get the lock from BH context. */ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet, gfp_t gfp) { struct rxrpc_connection *conn = rxrpc_alloc_connection(rxnet, gfp); if (conn) { /* We maintain an extra ref on the connection whilst it is on * the rxrpc_connections list. */ conn->state = RXRPC_CONN_SERVICE_PREALLOC; refcount_set(&conn->ref, 2); atomic_inc(&rxnet->nr_conns); write_lock(&rxnet->conn_lock); list_add_tail(&conn->link, &rxnet->service_conns); list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); write_unlock(&rxnet->conn_lock); rxrpc_see_connection(conn, rxrpc_conn_new_service); } return conn; } /* * Set up an incoming connection. This is called in BH context with the RCU * read lock held. */ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, struct rxrpc_connection *conn, const struct rxrpc_security *sec, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); _enter(""); conn->proto.epoch = sp->hdr.epoch; conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; conn->orig_service_id = sp->hdr.serviceId; conn->service_id = sp->hdr.serviceId; conn->security_ix = sp->hdr.securityIndex; conn->out_clientflag = 0; conn->security = sec; if (conn->security_ix) conn->state = RXRPC_CONN_SERVICE_UNSECURED; else conn->state = RXRPC_CONN_SERVICE; /* See if we should upgrade the service. This can only happen on the * first packet on a new connection. Once done, it applies to all * subsequent calls on that connection. */ if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE && conn->service_id == rx->service_upgrade.from) conn->service_id = rx->service_upgrade.to; atomic_set(&conn->active, 1); /* Make the connection a target for incoming packets. */ rxrpc_publish_service_conn(conn->peer, conn); } /* * Remove the service connection from the peer's tree, thereby removing it as a * target for incoming packets. */ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn) { struct rxrpc_peer *peer = conn->peer; write_seqlock(&peer->service_conn_lock); if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags)) rb_erase(&conn->service_node, &peer->service_conns); write_sequnlock(&peer->service_conn_lock); } |
47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 47 46 19 19 19 1939 1938 1939 1008 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 | // SPDX-License-Identifier: GPL-2.0-only /* * mm/percpu-vm.c - vmalloc area based chunk allocation * * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * Chunks are mapped into vmalloc areas and populated page by page. * This is the default chunk allocator. */ #include "internal.h" static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, unsigned int cpu, int page_idx) { /* must not be used on pre-mapped chunk */ WARN_ON(chunk->immutable); return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); } /** * pcpu_get_pages - get temp pages array * * Returns pointer to array of pointers to struct page which can be indexed * with pcpu_page_idx(). Note that there is only one array and accesses * should be serialized by pcpu_alloc_mutex. * * RETURNS: * Pointer to temp pages array on success. */ static struct page **pcpu_get_pages(void) { static struct page **pages; size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); lockdep_assert_held(&pcpu_alloc_mutex); if (!pages) pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); return pages; } /** * pcpu_free_pages - free pages which were allocated for @chunk * @chunk: chunk pages were allocated for * @pages: array of pages to be freed, indexed by pcpu_page_idx() * @page_start: page index of the first page to be freed * @page_end: page index of the last page to be freed + 1 * * Free pages [@page_start and @page_end) in @pages for all units. * The pages were allocated for @chunk. */ static void pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page = pages[pcpu_page_idx(cpu, i)]; if (page) __free_page(page); } } } /** * pcpu_alloc_pages - allocates pages for @chunk * @chunk: target chunk * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() * @page_start: page index of the first page to be allocated * @page_end: page index of the last page to be allocated + 1 * @gfp: allocation flags passed to the underlying allocator * * Allocate pages [@page_start,@page_end) into @pages for all units. * The allocation is for @chunk. Percpu core doesn't care about the * content of @pages and will pass it verbatim to pcpu_map_pages(). */ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) { unsigned int cpu, tcpu; int i; gfp |= __GFP_HIGHMEM; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); if (!*pagep) goto err; } } return 0; err: while (--i >= page_start) __free_page(pages[pcpu_page_idx(cpu, i)]); for_each_possible_cpu(tcpu) { if (tcpu == cpu) break; for (i = page_start; i < page_end; i++) __free_page(pages[pcpu_page_idx(tcpu, i)]); } return -ENOMEM; } /** * pcpu_pre_unmap_flush - flush cache prior to unmapping * @chunk: chunk the regions to be flushed belongs to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages in [@page_start,@page_end) of @chunk are about to be * unmapped. Flush cache. As each flushing trial can be very * expensive, issue flush on the whole region at once rather than * doing it for each cpu. This could be an overkill but is more * scalable. */ static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vunmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) { vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); } /** * pcpu_unmap_pages - unmap pages out of a pcpu_chunk * @chunk: chunk of interest * @pages: pages array which can be used to pass information to free * @page_start: page index of the first page to unmap * @page_end: page index of the last page to unmap + 1 * * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. * Corresponding elements in @pages were cleared by the caller and can * be used to carry information to pcpu_free_pages() which will be * called after all unmaps are finished. The caller should call * proper pre/post flush functions. */ static void pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu; int i; for_each_possible_cpu(cpu) { for (i = page_start; i < page_end; i++) { struct page *page; page = pcpu_chunk_page(chunk, cpu, i); WARN_ON(!page); pages[pcpu_page_idx(cpu, i)] = page; } __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), page_end - page_start); } } /** * pcpu_post_unmap_tlb_flush - flush TLB after unmapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush * TLB for the regions. This can be skipped if the area is to be * returned to vmalloc as vmalloc will handle TLB flushing lazily. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_tlb_kernel_range( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } static int __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) { return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), PAGE_KERNEL, pages, PAGE_SHIFT); } /** * pcpu_map_pages - map pages into a pcpu_chunk * @chunk: chunk of interest * @pages: pages array containing pages to be mapped * @page_start: page index of the first page to map * @page_end: page index of the last page to map + 1 * * For each cpu, map pages [@page_start,@page_end) into @chunk. The * caller is responsible for calling pcpu_post_map_flush() after all * mappings are complete. * * This function is responsible for setting up whatever is necessary for * reverse lookup (addr -> chunk). */ static int pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) { unsigned int cpu, tcpu; int i, err; for_each_possible_cpu(cpu) { err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), &pages[pcpu_page_idx(cpu, page_start)], page_end - page_start); if (err < 0) goto err; for (i = page_start; i < page_end; i++) pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], chunk); } return 0; err: for_each_possible_cpu(tcpu) { __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), page_end - page_start); if (tcpu == cpu) break; } pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); return err; } /** * pcpu_post_map_flush - flush cache after mapping * @chunk: pcpu_chunk the regions to be flushed belong to * @page_start: page index of the first page to be flushed * @page_end: page index of the last page to be flushed + 1 * * Pages [@page_start,@page_end) of @chunk have been mapped. Flush * cache. * * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once * for the whole region. */ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, int page_start, int page_end) { flush_cache_vmap( pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); } /** * pcpu_populate_chunk - populate and map an area of a pcpu_chunk * @chunk: chunk of interest * @page_start: the start page * @page_end: the end page * @gfp: allocation flags passed to the underlying memory allocator * * For each cpu, populate and map pages [@page_start,@page_end) into * @chunk. * * CONTEXT: * pcpu_alloc_mutex, does GFP_KERNEL allocation. */ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end, gfp_t gfp) { struct page **pages; pages = pcpu_get_pages(); if (!pages) return -ENOMEM; if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) return -ENOMEM; if (pcpu_map_pages(chunk, pages, page_start, page_end)) { pcpu_free_pages(chunk, pages, page_start, page_end); return -ENOMEM; } pcpu_post_map_flush(chunk, page_start, page_end); return 0; } /** * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk * @chunk: chunk to depopulate * @page_start: the start page * @page_end: the end page * * For each cpu, depopulate and unmap pages [@page_start,@page_end) * from @chunk. * * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the * region back to vmalloc() which will lazily flush the tlb. * * CONTEXT: * pcpu_alloc_mutex. */ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int page_start, int page_end) { struct page **pages; /* * If control reaches here, there must have been at least one * successful population attempt so the temp pages array must * be available now. */ pages = pcpu_get_pages(); BUG_ON(!pages); /* unmap and free */ pcpu_pre_unmap_flush(chunk, page_start, page_end); pcpu_unmap_pages(chunk, pages, page_start, page_end); pcpu_free_pages(chunk, pages, page_start, page_end); } static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) { struct pcpu_chunk *chunk; struct vm_struct **vms; chunk = pcpu_alloc_chunk(gfp); if (!chunk) return NULL; vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, pcpu_nr_groups, pcpu_atom_size); if (!vms) { pcpu_free_chunk(chunk); return NULL; } chunk->data = vms; chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; pcpu_stats_chunk_alloc(); trace_percpu_create_chunk(chunk->base_addr); return chunk; } static void pcpu_destroy_chunk(struct pcpu_chunk *chunk) { if (!chunk) return; pcpu_stats_chunk_dealloc(); trace_percpu_destroy_chunk(chunk->base_addr); if (chunk->data) pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); pcpu_free_chunk(chunk); } static struct page *pcpu_addr_to_page(void *addr) { return vmalloc_to_page(addr); } static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) { /* no extra restriction */ return 0; } /** * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim * @chunk: chunk of interest * * This is the entry point for percpu reclaim. If a chunk qualifies, it is then * isolated and managed in separate lists at the back of pcpu_slot: sidelined * and to_depopulate respectively. The to_depopulate list holds chunks slated * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once * they are on this list. Once depopulated, they are moved onto the sidelined * list which enables them to be pulled back in for allocation if no other chunk * can suffice the allocation. */ static bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) { /* do not reclaim either the first chunk or reserved chunk */ if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) return false; /* * If it is isolated, it may be on the sidelined list so move it back to * the to_depopulate list. If we hit at least 1/4 pages empty pages AND * there is no system-wide shortage of empty pages aside from this * chunk, move it to the to_depopulate list. */ return ((chunk->isolated && chunk->nr_empty_pop_pages) || (pcpu_nr_empty_pop_pages > (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) && chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); } |
8 666 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _TRACE_SYSCALL_H #define _TRACE_SYSCALL_H #include <linux/tracepoint.h> #include <linux/unistd.h> #include <linux/trace_events.h> #include <linux/thread_info.h> #include <asm/ptrace.h> /* * A syscall entry in the ftrace syscalls array. * * @name: name of the syscall * @syscall_nr: number of the syscall * @nb_args: number of parameters it takes * @types: list of types as strings * @args: list of args as strings (args[i] matches types[i]) * @enter_fields: list of fields for syscall_enter trace event * @enter_event: associated syscall_enter trace event * @exit_event: associated syscall_exit trace event */ struct syscall_metadata { const char *name; int syscall_nr; int nb_args; const char **types; const char **args; struct list_head enter_fields; struct trace_event_call *enter_event; struct trace_event_call *exit_event; }; #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) static inline void syscall_tracepoint_update(struct task_struct *p) { if (test_syscall_work(SYSCALL_TRACEPOINT)) set_task_syscall_work(p, SYSCALL_TRACEPOINT); else clear_task_syscall_work(p, SYSCALL_TRACEPOINT); } #else static inline void syscall_tracepoint_update(struct task_struct *p) { } #endif #endif /* _TRACE_SYSCALL_H */ |
22 22 22 22 22 22 11 11 11 11 11 14 14 14 14 14 14 14 14 14 11 11 11 11 14 11 14 14 14 14 8 8 8 8 8 8 8 8 8 8 8 8 8 14 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #include "send.h" #include "main.h" #include <linux/atomic.h> #include <linux/bug.h> #include <linux/byteorder/generic.h> #include <linux/container_of.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/printk.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/workqueue.h> #include "distributed-arp-table.h" #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" #include "log.h" #include "network-coding.h" #include "originator.h" #include "routing.h" #include "soft-interface.h" #include "translation-table.h" static void batadv_send_outstanding_bcast_packet(struct work_struct *work); /** * batadv_send_skb_packet() - send an already prepared packet * @skb: the packet to send * @hard_iface: the interface to use to send the broadcast packet * @dst_addr: the payload destination * * Send out an already prepared packet to the given neighbor or broadcast it * using the specified interface. Either hard_iface or neigh_node must be not * NULL. * If neigh_node is NULL, then the packet is broadcasted using hard_iface, * otherwise it is sent as unicast to the given neighbor. * * Regardless of the return value, the skb is consumed. * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_skb_packet(struct sk_buff *skb, struct batadv_hard_iface *hard_iface, const u8 *dst_addr) { struct batadv_priv *bat_priv; struct ethhdr *ethhdr; int ret; bat_priv = netdev_priv(hard_iface->soft_iface); if (hard_iface->if_status != BATADV_IF_ACTIVE) goto send_skb_err; if (unlikely(!hard_iface->net_dev)) goto send_skb_err; if (!(hard_iface->net_dev->flags & IFF_UP)) { pr_warn("Interface %s is not up - can't send packet via that interface!\n", hard_iface->net_dev->name); goto send_skb_err; } /* push to the ethernet header. */ if (batadv_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); ethhdr = eth_hdr(skb); ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr); ether_addr_copy(ethhdr->h_dest, dst_addr); ethhdr->h_proto = htons(ETH_P_BATMAN); skb_set_network_header(skb, ETH_HLEN); skb->protocol = htons(ETH_P_BATMAN); skb->dev = hard_iface->net_dev; /* Save a clone of the skb to use when decoding coded packets */ batadv_nc_skb_store_for_decoding(bat_priv, skb); /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. */ ret = dev_queue_xmit(skb); return net_xmit_eval(ret); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; } /** * batadv_send_broadcast_skb() - Send broadcast packet via hard interface * @skb: packet to be transmitted (with batadv header and no outer eth header) * @hard_iface: outgoing interface * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_broadcast_skb(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr); } /** * batadv_send_unicast_skb() - Send unicast packet to neighbor * @skb: packet to be transmitted (with batadv header and no outer eth header) * @neigh: neighbor which is used as next hop to destination * * Return: A negative errno code is returned on a failure. A success does not * guarantee the frame will be transmitted as it may be dropped due * to congestion or traffic shaping. */ int batadv_send_unicast_skb(struct sk_buff *skb, struct batadv_neigh_node *neigh) { #ifdef CONFIG_BATMAN_ADV_BATMAN_V struct batadv_hardif_neigh_node *hardif_neigh; #endif int ret; ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr); #ifdef CONFIG_BATMAN_ADV_BATMAN_V hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr); if (hardif_neigh && ret != NET_XMIT_DROP) hardif_neigh->bat_v.last_unicast_tx = jiffies; batadv_hardif_neigh_put(hardif_neigh); #endif return ret; } /** * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb. * @skb: Packet to be transmitted. * @orig_node: Final destination of the packet. * @recv_if: Interface used when receiving the packet (can be NULL). * * Looks up the best next-hop towards the passed originator and passes the * skb on for preparation of MAC header. If the packet originated from this * host, NULL can be passed as recv_if and no interface alternating is * attempted. * * Return: negative errno code on a failure, -EINPROGRESS if the skb is * buffered for later transmit or the NET_XMIT status returned by the * lower routine if the packet has been passed down. */ int batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if) { struct batadv_priv *bat_priv = orig_node->bat_priv; struct batadv_neigh_node *neigh_node; int ret; /* batadv_find_router() increases neigh_nodes refcount if found. */ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); if (!neigh_node) { ret = -EINVAL; goto free_skb; } /* Check if the skb is too large to send in one piece and fragment * it if needed. */ if (atomic_read(&bat_priv->fragmentation) && skb->len > neigh_node->if_incoming->net_dev->mtu) { /* Fragment and send packet. */ ret = batadv_frag_send_packet(skb, orig_node, neigh_node); /* skb was consumed */ skb = NULL; goto put_neigh_node; } /* try to network code the packet, if it is received on an interface * (i.e. being forwarded). If the packet originates from this node or if * network coding fails, then send the packet as usual. */ if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) ret = -EINPROGRESS; else ret = batadv_send_unicast_skb(skb, neigh_node); /* skb was consumed */ skb = NULL; put_neigh_node: batadv_neigh_node_put(neigh_node); free_skb: kfree_skb(skb); return ret; } /** * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the * common fields for unicast packets * @skb: the skb carrying the unicast header to initialize * @hdr_size: amount of bytes to push at the beginning of the skb * @orig_node: the destination node * * Return: false if the buffer extension was not possible or true otherwise. */ static bool batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_unicast_packet *unicast_packet; u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn); if (batadv_skb_head_push(skb, hdr_size) < 0) return false; unicast_packet = (struct batadv_unicast_packet *)skb->data; unicast_packet->version = BATADV_COMPAT_VERSION; /* batman packet type: unicast */ unicast_packet->packet_type = BATADV_UNICAST; /* set unicast ttl */ unicast_packet->ttl = BATADV_TTL; /* copy the destination for faster routing */ ether_addr_copy(unicast_packet->dest, orig_node->orig); /* set the destination tt version number */ unicast_packet->ttvn = ttvn; return true; } /** * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header * @skb: the skb containing the payload to encapsulate * @orig_node: the destination node * * Return: false if the payload could not be encapsulated or true otherwise. */ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, struct batadv_orig_node *orig_node) { size_t uni_size = sizeof(struct batadv_unicast_packet); return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); } /** * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a * unicast 4addr header * @bat_priv: the bat priv with all the soft interface information * @skb: the skb containing the payload to encapsulate * @orig: the destination node * @packet_subtype: the unicast 4addr packet subtype to use * * Return: false if the payload could not be encapsulated or true otherwise. */ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node *orig, int packet_subtype) { struct batadv_hard_iface *primary_if; struct batadv_unicast_4addr_packet *uc_4addr_packet; bool ret = false; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* Pull the header space and fill the unicast_packet substructure. * We can do that because the first member of the uc_4addr_packet * is of type struct unicast_packet */ if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), orig)) goto out; uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR; ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr); uc_4addr_packet->subtype = packet_subtype; uc_4addr_packet->reserved = 0; ret = true; out: batadv_hardif_put(primary_if); return ret; } /** * batadv_send_skb_unicast() - encapsulate and send an skb via unicast * @bat_priv: the bat priv with all the soft interface information * @skb: payload to send * @packet_type: the batman unicast packet type to use * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast * 4addr packets) * @orig_node: the originator to send the packet to * @vid: the vid to be used to search the translation table * * Wrap the given skb into a batman-adv unicast or unicast-4addr header * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied * as packet_type. Then send this frame to the given orig_node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, struct sk_buff *skb, int packet_type, int packet_subtype, struct batadv_orig_node *orig_node, unsigned short vid) { struct batadv_unicast_packet *unicast_packet; struct ethhdr *ethhdr; int ret = NET_XMIT_DROP; if (!orig_node) goto out; switch (packet_type) { case BATADV_UNICAST: if (!batadv_send_skb_prepare_unicast(skb, orig_node)) goto out; break; case BATADV_UNICAST_4ADDR: if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, orig_node, packet_subtype)) goto out; break; default: /* this function supports UNICAST and UNICAST_4ADDR only. It * should never be invoked with any other packet type */ goto out; } /* skb->data might have been reallocated by * batadv_send_skb_prepare_unicast{,_4addr}() */ ethhdr = eth_hdr(skb); unicast_packet = (struct batadv_unicast_packet *)skb->data; /* inform the destination node that we are still missing a correct route * for this client. The destination will receive this packet and will * try to reroute it because the ttvn contained in the header is less * than the current one */ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) unicast_packet->ttvn = unicast_packet->ttvn - 1; ret = batadv_send_skb_to_orig(skb, orig_node, NULL); /* skb was consumed */ skb = NULL; out: kfree_skb(skb); return ret; } /** * batadv_send_skb_via_tt_generic() - send an skb via TT lookup * @bat_priv: the bat priv with all the soft interface information * @skb: payload to send * @packet_type: the batman unicast packet type to use * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast * 4addr packets) * @dst_hint: can be used to override the destination contained in the skb * @vid: the vid to be used to search the translation table * * Look up the recipient node for the destination address in the ethernet * header via the translation table. Wrap the given skb into a batman-adv * unicast or unicast-4addr header depending on whether BATADV_UNICAST or * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame * to the according destination node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, struct sk_buff *skb, int packet_type, int packet_subtype, u8 *dst_hint, unsigned short vid) { struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct batadv_orig_node *orig_node; u8 *src, *dst; int ret; src = ethhdr->h_source; dst = ethhdr->h_dest; /* if we got an hint! let's send the packet to this client (if any) */ if (dst_hint) { src = NULL; dst = dst_hint; } orig_node = batadv_transtable_search(bat_priv, src, dst, vid); ret = batadv_send_skb_unicast(bat_priv, skb, packet_type, packet_subtype, orig_node, vid); batadv_orig_node_put(orig_node); return ret; } /** * batadv_send_skb_via_gw() - send an skb via gateway lookup * @bat_priv: the bat priv with all the soft interface information * @skb: payload to send * @vid: the vid to be used to search the translation table * * Look up the currently selected gateway. Wrap the given skb into a batman-adv * unicast header and send this frame to this gateway node. * * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. */ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { struct batadv_orig_node *orig_node; int ret; orig_node = batadv_gw_get_selected_orig(bat_priv); ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, BATADV_P_DATA, orig_node, vid); batadv_orig_node_put(orig_node); return ret; } /** * batadv_forw_packet_free() - free a forwarding packet * @forw_packet: The packet to free * @dropped: whether the packet is freed because is dropped * * This frees a forwarding packet and releases any resources it might * have claimed. */ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet, bool dropped) { if (dropped) kfree_skb(forw_packet->skb); else consume_skb(forw_packet->skb); batadv_hardif_put(forw_packet->if_incoming); batadv_hardif_put(forw_packet->if_outgoing); if (forw_packet->queue_left) atomic_inc(forw_packet->queue_left); kfree(forw_packet); } /** * batadv_forw_packet_alloc() - allocate a forwarding packet * @if_incoming: The (optional) if_incoming to be grabbed * @if_outgoing: The (optional) if_outgoing to be grabbed * @queue_left: The (optional) queue counter to decrease * @bat_priv: The bat_priv for the mesh of this forw_packet * @skb: The raw packet this forwarding packet shall contain * * Allocates a forwarding packet and tries to get a reference to the * (optional) if_incoming, if_outgoing and queue_left. If queue_left * is NULL then bat_priv is optional, too. * * Return: An allocated forwarding packet on success, NULL otherwise. */ struct batadv_forw_packet * batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing, atomic_t *queue_left, struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_forw_packet *forw_packet; const char *qname; if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) { qname = "unknown"; if (queue_left == &bat_priv->bcast_queue_left) qname = "bcast"; if (queue_left == &bat_priv->batman_queue_left) qname = "batman"; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s queue is full\n", qname); return NULL; } forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); if (!forw_packet) goto err; if (if_incoming) kref_get(&if_incoming->refcount); if (if_outgoing) kref_get(&if_outgoing->refcount); INIT_HLIST_NODE(&forw_packet->list); INIT_HLIST_NODE(&forw_packet->cleanup_list); forw_packet->skb = skb; forw_packet->queue_left = queue_left; forw_packet->if_incoming = if_incoming; forw_packet->if_outgoing = if_outgoing; forw_packet->num_packets = 0; return forw_packet; err: if (queue_left) atomic_inc(queue_left); return NULL; } /** * batadv_forw_packet_was_stolen() - check whether someone stole this packet * @forw_packet: the forwarding packet to check * * This function checks whether the given forwarding packet was claimed by * someone else for free(). * * Return: True if someone stole it, false otherwise. */ static bool batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet) { return !hlist_unhashed(&forw_packet->cleanup_list); } /** * batadv_forw_packet_steal() - claim a forw_packet for free() * @forw_packet: the forwarding packet to steal * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock) * * This function tries to steal a specific forw_packet from global * visibility for the purpose of getting it for free(). That means * the caller is *not* allowed to requeue it afterwards. * * Return: True if stealing was successful. False if someone else stole it * before us. */ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, spinlock_t *lock) { /* did purging routine steal it earlier? */ spin_lock_bh(lock); if (batadv_forw_packet_was_stolen(forw_packet)) { spin_unlock_bh(lock); return false; } hlist_del_init(&forw_packet->list); /* Just to spot misuse of this function */ hlist_add_fake(&forw_packet->cleanup_list); spin_unlock_bh(lock); return true; } /** * batadv_forw_packet_list_steal() - claim a list of forward packets for free() * @forw_list: the to be stolen forward packets * @cleanup_list: a backup pointer, to be able to dispose the packet later * @hard_iface: the interface to steal forward packets from * * This function claims responsibility to free any forw_packet queued on the * given hard_iface. If hard_iface is NULL forwarding packets on all hard * interfaces will be claimed. * * The packets are being moved from the forw_list to the cleanup_list. This * makes it possible for already running threads to notice the claim. */ static void batadv_forw_packet_list_steal(struct hlist_head *forw_list, struct hlist_head *cleanup_list, const struct batadv_hard_iface *hard_iface) { struct batadv_forw_packet *forw_packet; struct hlist_node *safe_tmp_node; hlist_for_each_entry_safe(forw_packet, safe_tmp_node, forw_list, list) { /* if purge_outstanding_packets() was called with an argument * we delete only packets belonging to the given interface */ if (hard_iface && forw_packet->if_incoming != hard_iface && forw_packet->if_outgoing != hard_iface) continue; hlist_del(&forw_packet->list); hlist_add_head(&forw_packet->cleanup_list, cleanup_list); } } /** * batadv_forw_packet_list_free() - free a list of forward packets * @head: a list of to be freed forw_packets * * This function cancels the scheduling of any packet in the provided list, * waits for any possibly running packet forwarding thread to finish and * finally, safely frees this forward packet. * * This function might sleep. */ static void batadv_forw_packet_list_free(struct hlist_head *head) { struct batadv_forw_packet *forw_packet; struct hlist_node *safe_tmp_node; hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head, cleanup_list) { cancel_delayed_work_sync(&forw_packet->delayed_work); hlist_del(&forw_packet->cleanup_list); batadv_forw_packet_free(forw_packet, true); } } /** * batadv_forw_packet_queue() - try to queue a forwarding packet * @forw_packet: the forwarding packet to queue * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock) * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list) * @send_time: timestamp (jiffies) when the packet is to be sent * * This function tries to (re)queue a forwarding packet. Requeuing * is prevented if the according interface is shutting down * (e.g. if batadv_forw_packet_list_steal() was called for this * packet earlier). * * Calling batadv_forw_packet_queue() after a call to * batadv_forw_packet_steal() is forbidden! * * Caller needs to ensure that forw_packet->delayed_work was initialized. */ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet, spinlock_t *lock, struct hlist_head *head, unsigned long send_time) { spin_lock_bh(lock); /* did purging routine steal it from us? */ if (batadv_forw_packet_was_stolen(forw_packet)) { /* If you got it for free() without trouble, then * don't get back into the queue after stealing... */ WARN_ONCE(hlist_fake(&forw_packet->cleanup_list), "Requeuing after batadv_forw_packet_steal() not allowed!\n"); spin_unlock_bh(lock); return; } hlist_del_init(&forw_packet->list); hlist_add_head(&forw_packet->list, head); queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, send_time - jiffies); spin_unlock_bh(lock); } /** * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet * @bat_priv: the bat priv with all the soft interface information * @forw_packet: the forwarding packet to queue * @send_time: timestamp (jiffies) when the packet is to be sent * * This function tries to (re)queue a broadcast packet. * * Caller needs to ensure that forw_packet->delayed_work was initialized. */ static void batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock, &bat_priv->forw_bcast_list, send_time); } /** * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet * @bat_priv: the bat priv with all the soft interface information * @forw_packet: the forwarding packet to queue * @send_time: timestamp (jiffies) when the packet is to be sent * * This function tries to (re)queue an OGMv1 packet. * * Caller needs to ensure that forw_packet->delayed_work was initialized. */ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv, struct batadv_forw_packet *forw_packet, unsigned long send_time) { batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock, &bat_priv->forw_bat_list, send_time); } /** * batadv_forw_bcast_packet_to_list() - queue broadcast packet for transmissions * @bat_priv: the bat priv with all the soft interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * @if_in: the interface where the packet was received on * @if_out: the outgoing interface to queue on * * Adds a broadcast packet to the queue and sets up timers. Broadcast packets * are sent multiple times to increase probability for being received. * * This call clones the given skb, hence the caller needs to take into * account that the data segment of the original skb might not be * modifiable anymore. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet, struct batadv_hard_iface *if_in, struct batadv_hard_iface *if_out) { struct batadv_forw_packet *forw_packet; unsigned long send_time = jiffies; struct sk_buff *newskb; newskb = skb_clone(skb, GFP_ATOMIC); if (!newskb) goto err; forw_packet = batadv_forw_packet_alloc(if_in, if_out, &bat_priv->bcast_queue_left, bat_priv, newskb); if (!forw_packet) goto err_packet_free; forw_packet->own = own_packet; INIT_DELAYED_WORK(&forw_packet->delayed_work, batadv_send_outstanding_bcast_packet); send_time += delay ? delay : msecs_to_jiffies(5); batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time); return NETDEV_TX_OK; err_packet_free: kfree_skb(newskb); err: return NETDEV_TX_BUSY; } /** * batadv_forw_bcast_packet_if() - forward and queue a broadcast packet * @bat_priv: the bat priv with all the soft interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * @if_in: the interface where the packet was received on * @if_out: the outgoing interface to forward to * * Transmits a broadcast packet on the specified interface either immediately * or if a delay is given after that. Furthermore, queues additional * retransmissions if this interface is a wireless one. * * This call clones the given skb, hence the caller needs to take into * account that the data segment of the original skb might not be * modifiable anymore. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet, struct batadv_hard_iface *if_in, struct batadv_hard_iface *if_out) { unsigned int num_bcasts = if_out->num_bcasts; struct sk_buff *newskb; int ret = NETDEV_TX_OK; if (!delay) { newskb = skb_clone(skb, GFP_ATOMIC); if (!newskb) return NETDEV_TX_BUSY; batadv_send_broadcast_skb(newskb, if_out); num_bcasts--; } /* delayed broadcast or rebroadcasts? */ if (num_bcasts >= 1) { BATADV_SKB_CB(skb)->num_bcasts = num_bcasts; ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay, own_packet, if_in, if_out); } return ret; } /** * batadv_send_no_broadcast() - check whether (re)broadcast is necessary * @bat_priv: the bat priv with all the soft interface information * @skb: broadcast packet to check * @own_packet: true if it is a self-generated broadcast packet * @if_out: the outgoing interface checked and considered for (re)broadcast * * Return: False if a packet needs to be (re)broadcasted on the given interface, * true otherwise. */ static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv, struct sk_buff *skb, bool own_packet, struct batadv_hard_iface *if_out) { struct batadv_hardif_neigh_node *neigh_node = NULL; struct batadv_bcast_packet *bcast_packet; u8 *orig_neigh; u8 *neigh_addr; char *type; int ret; if (!own_packet) { neigh_addr = eth_hdr(skb)->h_source; neigh_node = batadv_hardif_neigh_get(if_out, neigh_addr); } bcast_packet = (struct batadv_bcast_packet *)skb->data; orig_neigh = neigh_node ? neigh_node->orig : NULL; ret = batadv_hardif_no_broadcast(if_out, bcast_packet->orig, orig_neigh); batadv_hardif_neigh_put(neigh_node); /* ok, may broadcast */ if (!ret) return false; /* no broadcast */ switch (ret) { case BATADV_HARDIF_BCAST_NORECIPIENT: type = "no neighbor"; break; case BATADV_HARDIF_BCAST_DUPFWD: type = "single neighbor is source"; break; case BATADV_HARDIF_BCAST_DUPORIG: type = "single neighbor is originator"; break; default: type = "unknown"; } batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n", bcast_packet->orig, if_out->net_dev->name, type); return true; } /** * __batadv_forw_bcast_packet() - forward and queue a broadcast packet * @bat_priv: the bat priv with all the soft interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * * Transmits a broadcast packet either immediately or if a delay is given * after that. Furthermore, queues additional retransmissions on wireless * interfaces. * * This call clones the given skb, hence the caller needs to take into * account that the data segment of the given skb might not be * modifiable anymore. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet) { struct batadv_hard_iface *hard_iface; struct batadv_hard_iface *primary_if; int ret = NETDEV_TX_OK; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return NETDEV_TX_BUSY; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { if (hard_iface->soft_iface != bat_priv->soft_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) continue; if (batadv_send_no_broadcast(bat_priv, skb, own_packet, hard_iface)) { batadv_hardif_put(hard_iface); continue; } ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay, own_packet, primary_if, hard_iface); batadv_hardif_put(hard_iface); if (ret == NETDEV_TX_BUSY) break; } rcu_read_unlock(); batadv_hardif_put(primary_if); return ret; } /** * batadv_forw_bcast_packet() - forward and queue a broadcast packet * @bat_priv: the bat priv with all the soft interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * * Transmits a broadcast packet either immediately or if a delay is given * after that. Furthermore, queues additional retransmissions on wireless * interfaces. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors. */ int batadv_forw_bcast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet) { return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet); } /** * batadv_send_bcast_packet() - send and queue a broadcast packet * @bat_priv: the bat priv with all the soft interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet * * Transmits a broadcast packet either immediately or if a delay is given * after that. Furthermore, queues additional retransmissions on wireless * interfaces. * * Consumes the provided skb. */ void batadv_send_bcast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned long delay, bool own_packet) { __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet); consume_skb(skb); } /** * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary * @forw_packet: the forwarding packet to check * * Checks whether a given packet has any (re)transmissions left on the provided * interface. * * hard_iface may be NULL: In that case the number of transmissions this skb had * so far is compared with the maximum amount of retransmissions independent of * any interface instead. * * Return: True if (re)transmissions are left, false otherwise. */ static bool batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet) { return BATADV_SKB_CB(forw_packet->skb)->num_bcasts; } /** * batadv_forw_packet_bcasts_dec() - decrement retransmission counter of a * packet * @forw_packet: the packet to decrease the counter for */ static void batadv_forw_packet_bcasts_dec(struct batadv_forw_packet *forw_packet) { BATADV_SKB_CB(forw_packet->skb)->num_bcasts--; } /** * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions * @forw_packet: the packet to check * * Return: True if this packet was transmitted before, false otherwise. */ bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet) { unsigned char num_bcasts = BATADV_SKB_CB(forw_packet->skb)->num_bcasts; return num_bcasts != forw_packet->if_outgoing->num_bcasts; } /** * batadv_send_outstanding_bcast_packet() - transmit a queued broadcast packet * @work: work queue item * * Transmits a queued broadcast packet and if necessary reschedules it. */ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) { unsigned long send_time = jiffies + msecs_to_jiffies(5); struct batadv_forw_packet *forw_packet; struct delayed_work *delayed_work; struct batadv_priv *bat_priv; struct sk_buff *skb1; bool dropped = false; delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { dropped = true; goto out; } if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) { dropped = true; goto out; } /* send a copy of the saved skb */ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); if (!skb1) goto out; batadv_send_broadcast_skb(skb1, forw_packet->if_outgoing); batadv_forw_packet_bcasts_dec(forw_packet); if (batadv_forw_packet_bcasts_left(forw_packet)) { batadv_forw_packet_bcast_queue(bat_priv, forw_packet, send_time); return; } out: /* do we get something for free()? */ if (batadv_forw_packet_steal(forw_packet, &bat_priv->forw_bcast_list_lock)) batadv_forw_packet_free(forw_packet, dropped); } /** * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets * @bat_priv: the bat priv with all the soft interface information * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on * * This method cancels and purges any broadcast and OGMv1 packet on the given * hard_iface. If hard_iface is NULL, broadcast and OGMv1 packets on all hard * interfaces will be canceled and purged. * * This function might sleep. */ void batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, const struct batadv_hard_iface *hard_iface) { struct hlist_head head = HLIST_HEAD_INIT; if (hard_iface) batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s(): %s\n", __func__, hard_iface->net_dev->name); else batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "%s()\n", __func__); /* claim bcast list for free() */ spin_lock_bh(&bat_priv->forw_bcast_list_lock); batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bcast_list_lock); /* claim batman packet list for free() */ spin_lock_bh(&bat_priv->forw_bat_list_lock); batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head, hard_iface); spin_unlock_bh(&bat_priv->forw_bat_list_lock); /* then cancel or wait for packet workers to finish and free */ batadv_forw_packet_list_free(&head); } |
35 219 48 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl> */ #ifndef _NF_CONNTRACK_ACCT_H #define _NF_CONNTRACK_ACCT_H #include <net/net_namespace.h> #include <linux/netfilter/nf_conntrack_common.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_extend.h> struct nf_conn_counter { atomic64_t packets; atomic64_t bytes; }; struct nf_conn_acct { struct nf_conn_counter counter[IP_CT_DIR_MAX]; }; static inline struct nf_conn_acct *nf_conn_acct_find(const struct nf_conn *ct) { return nf_ct_ext_find(ct, NF_CT_EXT_ACCT); } static inline struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) struct net *net = nf_ct_net(ct); struct nf_conn_acct *acct; if (!net->ct.sysctl_acct) return NULL; acct = nf_ct_ext_add(ct, NF_CT_EXT_ACCT, gfp); if (!acct) pr_debug("failed to add accounting extension area"); return acct; #else return NULL; #endif } /* Check if connection tracking accounting is enabled */ static inline bool nf_ct_acct_enabled(struct net *net) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) return net->ct.sysctl_acct != 0; #else return false; #endif } /* Enable/disable connection tracking accounting */ static inline void nf_ct_set_acct(struct net *net, bool enable) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) net->ct.sysctl_acct = enable; #endif } void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets, unsigned int bytes); static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir, unsigned int bytes) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) nf_ct_acct_add(ct, dir, 1, bytes); #endif } void nf_conntrack_acct_pernet_init(struct net *net); #endif /* _NF_CONNTRACK_ACCT_H */ |
3 24 27 26 26 23 24 23 8 23 23 24 3 3 286 286 286 34 34 6 5 5 5 5 6 5 4 3 3 3 2 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | // SPDX-License-Identifier: GPL-2.0 /* * KVM coalesced MMIO * * Copyright (c) 2008 Bull S.A.S. * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Author: Laurent Vivier <Laurent.Vivier@bull.net> * */ #include <kvm/iodev.h> #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/kvm.h> #include "coalesced_mmio.h" static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev) { return container_of(dev, struct kvm_coalesced_mmio_dev, dev); } static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, gpa_t addr, int len) { /* is it in a batchable area ? * (addr,len) is fully included in * (zone->addr, zone->size) */ if (len < 0) return 0; if (addr + len < addr) return 0; if (addr < dev->zone.addr) return 0; if (addr + len > dev->zone.addr + dev->zone.size) return 0; return 1; } static int coalesced_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, const void *val) { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; __u32 insert; if (!coalesced_mmio_in_range(dev, addr, len)) return -EOPNOTSUPP; spin_lock(&dev->kvm->ring_lock); /* * last is the index of the entry to fill. Verify userspace hasn't * set last to be out of range, and that there is room in the ring. * Leave one entry free in the ring so that userspace can differentiate * between an empty ring and a full ring. */ insert = READ_ONCE(ring->last); if (insert >= KVM_COALESCED_MMIO_MAX || (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) { spin_unlock(&dev->kvm->ring_lock); return -EOPNOTSUPP; } /* copy data in first free entry of the ring */ ring->coalesced_mmio[insert].phys_addr = addr; ring->coalesced_mmio[insert].len = len; memcpy(ring->coalesced_mmio[insert].data, val, len); ring->coalesced_mmio[insert].pio = dev->zone.pio; smp_wmb(); ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; spin_unlock(&dev->kvm->ring_lock); return 0; } static void coalesced_mmio_destructor(struct kvm_io_device *this) { struct kvm_coalesced_mmio_dev *dev = to_mmio(this); list_del(&dev->list); kfree(dev); } static const struct kvm_io_device_ops coalesced_mmio_ops = { .write = coalesced_mmio_write, .destructor = coalesced_mmio_destructor, }; int kvm_coalesced_mmio_init(struct kvm *kvm) { struct page *page; page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); if (!page) return -ENOMEM; kvm->coalesced_mmio_ring = page_address(page); /* * We're using this spinlock to sync access to the coalesced ring. * The list doesn't need its own lock since device registration and * unregistration should only happen when kvm->slots_lock is held. */ spin_lock_init(&kvm->ring_lock); INIT_LIST_HEAD(&kvm->coalesced_zones); return 0; } void kvm_coalesced_mmio_free(struct kvm *kvm) { if (kvm->coalesced_mmio_ring) free_page((unsigned long)kvm->coalesced_mmio_ring); } int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { int ret; struct kvm_coalesced_mmio_dev *dev; if (zone->pio != 1 && zone->pio != 0) return -EINVAL; dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL_ACCOUNT); if (!dev) return -ENOMEM; kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops); dev->kvm = kvm; dev->zone = *zone; mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, zone->addr, zone->size, &dev->dev); if (ret < 0) goto out_free_dev; list_add_tail(&dev->list, &kvm->coalesced_zones); mutex_unlock(&kvm->slots_lock); return 0; out_free_dev: mutex_unlock(&kvm->slots_lock); kfree(dev); return ret; } int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm, struct kvm_coalesced_mmio_zone *zone) { struct kvm_coalesced_mmio_dev *dev, *tmp; int r; if (zone->pio != 1 && zone->pio != 0) return -EINVAL; mutex_lock(&kvm->slots_lock); list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) { if (zone->pio == dev->zone.pio && coalesced_mmio_in_range(dev, zone->addr, zone->size)) { r = kvm_io_bus_unregister_dev(kvm, zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev); /* * On failure, unregister destroys all devices on the * bus, including the target device. There's no need * to restart the walk as there aren't any zones left. */ if (r) break; } } mutex_unlock(&kvm->slots_lock); /* * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's * perspective, the coalesced MMIO is most definitely unregistered. */ return 0; } |
206 234 195 159 34 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_EXPORTFS_H #define LINUX_EXPORTFS_H 1 #include <linux/types.h> #include <linux/path.h> struct dentry; struct iattr; struct inode; struct iomap; struct super_block; struct vfsmount; /* limit the handle size to NFSv4 handle size now */ #define MAX_HANDLE_SZ 128 /* * The fileid_type identifies how the file within the filesystem is encoded. * In theory this is freely set and parsed by the filesystem, but we try to * stick to conventions so we can share some generic code and don't confuse * sniffers like ethereal/wireshark. * * The filesystem must not use the value '0' or '0xff'. */ enum fid_type { /* * The root, or export point, of the filesystem. * (Never actually passed down to the filesystem. */ FILEID_ROOT = 0, /* * 32bit inode number, 32 bit generation number. */ FILEID_INO32_GEN = 1, /* * 32bit inode number, 32 bit generation number, * 32 bit parent directory inode number. */ FILEID_INO32_GEN_PARENT = 2, /* * 64 bit object ID, 64 bit root object ID, * 32 bit generation number. */ FILEID_BTRFS_WITHOUT_PARENT = 0x4d, /* * 64 bit object ID, 64 bit root object ID, * 32 bit generation number, * 64 bit parent object ID, 32 bit parent generation. */ FILEID_BTRFS_WITH_PARENT = 0x4e, /* * 64 bit object ID, 64 bit root object ID, * 32 bit generation number, * 64 bit parent object ID, 32 bit parent generation, * 64 bit parent root object ID. */ FILEID_BTRFS_WITH_PARENT_ROOT = 0x4f, /* * 32 bit block number, 16 bit partition reference, * 16 bit unused, 32 bit generation number. */ FILEID_UDF_WITHOUT_PARENT = 0x51, /* * 32 bit block number, 16 bit partition reference, * 16 bit unused, 32 bit generation number, * 32 bit parent block number, 32 bit parent generation number */ FILEID_UDF_WITH_PARENT = 0x52, /* * 64 bit checkpoint number, 64 bit inode number, * 32 bit generation number. */ FILEID_NILFS_WITHOUT_PARENT = 0x61, /* * 64 bit checkpoint number, 64 bit inode number, * 32 bit generation number, 32 bit parent generation. * 64 bit parent inode number. */ FILEID_NILFS_WITH_PARENT = 0x62, /* * 32 bit generation number, 40 bit i_pos. */ FILEID_FAT_WITHOUT_PARENT = 0x71, /* * 32 bit generation number, 40 bit i_pos, * 32 bit parent generation number, 40 bit parent i_pos */ FILEID_FAT_WITH_PARENT = 0x72, /* * 64 bit inode number, 32 bit generation number. */ FILEID_INO64_GEN = 0x81, /* * 64 bit inode number, 32 bit generation number, * 64 bit parent inode number, 32 bit parent generation. */ FILEID_INO64_GEN_PARENT = 0x82, /* * 128 bit child FID (struct lu_fid) * 128 bit parent FID (struct lu_fid) */ FILEID_LUSTRE = 0x97, /* * 64 bit inode number, 32 bit subvolume, 32 bit generation number: */ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1, FILEID_BCACHEFS_WITH_PARENT = 0xb2, /* * 64 bit unique kernfs id */ FILEID_KERNFS = 0xfe, /* * Filesystems must not use 0xff file ID. */ FILEID_INVALID = 0xff, }; struct fid { union { struct { u32 ino; u32 gen; u32 parent_ino; u32 parent_gen; } i32; struct { u64 ino; u32 gen; } __packed i64; struct { u32 block; u16 partref; u16 parent_partref; u32 generation; u32 parent_block; u32 parent_generation; } udf; DECLARE_FLEX_ARRAY(__u32, raw); }; }; enum handle_to_path_flags { HANDLE_CHECK_PERMS = (1 << 0), HANDLE_CHECK_SUBTREE = (1 << 1), }; struct handle_to_path_ctx { struct path root; enum handle_to_path_flags flags; unsigned int fh_flags; }; #define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */ #define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */ #define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */ /* * Filesystems use only lower 8 bits of file_handle type for fid_type. * name_to_handle_at() uses upper 16 bits of type as user flags to be * interpreted by open_by_handle_at(). */ #define FILEID_USER_FLAGS_MASK 0xffff0000 #define FILEID_USER_FLAGS(type) ((type) & FILEID_USER_FLAGS_MASK) /* Flags supported in encoded handle_type that is exported to user */ #define FILEID_IS_CONNECTABLE 0x10000 #define FILEID_IS_DIR 0x20000 #define FILEID_VALID_USER_FLAGS (FILEID_IS_CONNECTABLE | FILEID_IS_DIR) /** * struct export_operations - for nfsd to communicate with file systems * @encode_fh: encode a file handle fragment from a dentry * @fh_to_dentry: find the implied object and get a dentry for it * @fh_to_parent: find the implied object's parent and get a dentry for it * @get_name: find the name for a given inode in a given directory * @get_parent: find the parent of a given directory * @commit_metadata: commit metadata changes to stable storage * * See Documentation/filesystems/nfs/exporting.rst for details on how to use * this interface correctly. * * encode_fh: * @encode_fh should store in the file handle fragment @fh (using at most * @max_len bytes) information that can be used by @decode_fh to recover the * file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit * set, the encode_fh() should store sufficient information so that a good * attempt can be made to find not only the file but also it's place in the * filesystem. This typically means storing a reference to de->d_parent in * the filehandle fragment. encode_fh() should return the fileid_type on * success and on error returns 255 (if the space needed to encode fh is * greater than @max_len*4 bytes). On error @max_len contains the minimum * size(in 4 byte unit) needed to encode the file handle. * * fh_to_dentry: * @fh_to_dentry is given a &struct super_block (@sb) and a file handle * fragment (@fh, @fh_len). It should return a &struct dentry which refers * to the same file that the file handle fragment refers to. If it cannot, * it should return a %NULL pointer if the file cannot be found, or an * %ERR_PTR error code of %ENOMEM if a memory allocation failure occurred. * Any other error code is treated like %NULL, and will cause an %ESTALE error * for callers of exportfs_decode_fh(). * Any suitable dentry can be returned including, if necessary, a new dentry * created with d_alloc_root. The caller can then find any other extant * dentries by following the d_alias links. * * fh_to_parent: * Same as @fh_to_dentry, except that it returns a pointer to the parent * dentry if it was encoded into the filehandle fragment by @encode_fh. * * get_name: * @get_name should find a name for the given @child in the given @parent * directory. The name should be stored in the @name (with the * understanding that it is already pointing to a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code * or error. @get_name will be called without @parent->i_mutex held. * * get_parent: * @get_parent should find the parent directory for the given @child which * is also a directory. In the event that it cannot be found, or storage * space cannot be allocated, a %ERR_PTR should be returned. * * permission: * Allow filesystems to specify a custom permission function. * * open: * Allow filesystems to specify a custom open function. * * commit_metadata: * @commit_metadata should commit metadata changes to stable storage. * * Locking rules: * get_parent is called with child->d_inode->i_mutex down * get_name is not (which is possibly inconsistent) */ struct export_operations { int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent); struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); int (*get_name)(struct dentry *parent, char *name, struct dentry *child); struct dentry * (*get_parent)(struct dentry *child); int (*commit_metadata)(struct inode *inode); int (*get_uuid)(struct super_block *sb, u8 *buf, u32 *len, u64 *offset); int (*map_blocks)(struct inode *inode, loff_t offset, u64 len, struct iomap *iomap, bool write, u32 *device_generation); int (*commit_blocks)(struct inode *inode, struct iomap *iomaps, int nr_iomaps, struct iattr *iattr); int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags); struct file * (*open)(struct path *path, unsigned int oflags); #define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */ #define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */ #define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */ #define EXPORT_OP_REMOTE_FS (0x8) /* Filesystem is remote */ #define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply atomic attribute updates */ #define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */ #define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */ unsigned long flags; }; extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent, int flags); extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len, int flags); static inline bool exportfs_can_encode_fid(const struct export_operations *nop) { return !nop || nop->encode_fh; } static inline bool exportfs_can_decode_fh(const struct export_operations *nop) { return nop && nop->fh_to_dentry; } static inline bool exportfs_can_encode_fh(const struct export_operations *nop, int fh_flags) { /* * If a non-decodeable file handle was requested, we only need to make * sure that filesystem did not opt-out of encoding fid. */ if (fh_flags & EXPORT_FH_FID) return exportfs_can_encode_fid(nop); /* * If a decodeable file handle was requested, we need to make sure that * filesystem can also decode file handles. */ return exportfs_can_decode_fh(nop); } static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid, int *max_len) { return exportfs_encode_inode_fh(inode, fid, max_len, NULL, EXPORT_FH_FID); } extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len, int fileid_type, unsigned int flags, int (*acceptable)(void *, struct dentry *), void *context); extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *), void *context); /* * Generic helpers for filesystems. */ int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len, struct inode *parent); struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type, struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen)); #endif /* LINUX_EXPORTFS_H */ |
6 6 6 6 6 6 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * Common Blowfish algorithm parts shared between the c and assembler * implementations. * * Blowfish Cipher Algorithm, by Bruce Schneier. * http://www.counterpane.com/blowfish.html * * Adapted from Kerneli implementation. * * Copyright (c) Herbert Valerio Riedel <hvr@hvrlab.org> * Copyright (c) Kyle McMartin <kyle@debian.org> * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> */ #include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/types.h> #include <crypto/blowfish.h> static const u32 bf_pbox[16 + 2] = { 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, }; static const u32 bf_sbox[256 * 4] = { 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, }; /* * Round loop unrolling macros, S is a pointer to a S-Box array * organized in 4 unsigned longs at a row. */ #define GET32_3(x) (((x) & 0xff)) #define GET32_2(x) (((x) >> (8)) & (0xff)) #define GET32_1(x) (((x) >> (16)) & (0xff)) #define GET32_0(x) (((x) >> (24)) & (0xff)) #define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \ S[512 + GET32_2(x)]) + S[768 + GET32_3(x)]) #define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); }) /* * The blowfish encipher, processes 64-bit blocks. * NOTE: This function MUSTN'T respect endianess */ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) { const u32 *P = bctx->p; const u32 *S = bctx->s; u32 yl = src[0]; u32 yr = src[1]; ROUND(yr, yl, 0); ROUND(yl, yr, 1); ROUND(yr, yl, 2); ROUND(yl, yr, 3); ROUND(yr, yl, 4); ROUND(yl, yr, 5); ROUND(yr, yl, 6); ROUND(yl, yr, 7); ROUND(yr, yl, 8); ROUND(yl, yr, 9); ROUND(yr, yl, 10); ROUND(yl, yr, 11); ROUND(yr, yl, 12); ROUND(yl, yr, 13); ROUND(yr, yl, 14); ROUND(yl, yr, 15); yl ^= P[16]; yr ^= P[17]; dst[0] = yr; dst[1] = yl; } /* * Calculates the blowfish S and P boxes for encryption and decryption. */ int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); u32 *P = ctx->p; u32 *S = ctx->s; short i, j, count; u32 data[2], temp; /* Copy the initialization s-boxes */ for (i = 0, count = 0; i < 256; i++) for (j = 0; j < 4; j++, count++) S[count] = bf_sbox[count]; /* Set the p-boxes */ for (i = 0; i < 16 + 2; i++) P[i] = bf_pbox[i]; /* Actual subkey generation */ for (j = 0, i = 0; i < 16 + 2; i++) { temp = (((u32)key[j] << 24) | ((u32)key[(j + 1) % keylen] << 16) | ((u32)key[(j + 2) % keylen] << 8) | ((u32)key[(j + 3) % keylen])); P[i] = P[i] ^ temp; j = (j + 4) % keylen; } data[0] = 0x00000000; data[1] = 0x00000000; for (i = 0; i < 16 + 2; i += 2) { encrypt_block((struct bf_ctx *)ctx, data, data); P[i] = data[0]; P[i + 1] = data[1]; } for (i = 0; i < 4; i++) { for (j = 0, count = i * 256; j < 256; j += 2, count += 2) { encrypt_block((struct bf_ctx *)ctx, data, data); S[count] = data[0]; S[count + 1] = data[1]; } } /* Bruce says not to bother with the weak key check. */ return 0; } EXPORT_SYMBOL_GPL(blowfish_setkey); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher common functions"); |
1473 14 13 4 13 1 13 13 2 11 2 9 9 9 9 8 2 2 2 15 14 15 15 15 9 15 15 15 2 16 16 9 9 9 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 | // SPDX-License-Identifier: GPL-2.0 /* Copyright 2011-2014 Autronica Fire and Security AS * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se * * Frame handler other utility functions for HSR and PRP. */ #include "hsr_slave.h" #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include "hsr_main.h" #include "hsr_device.h" #include "hsr_forward.h" #include "hsr_framereg.h" bool hsr_invalid_dan_ingress_frame(__be16 protocol) { return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR)); } static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct hsr_port *port; struct hsr_priv *hsr; __be16 protocol; /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; if (!skb_mac_header_was_set(skb)) { WARN_ONCE(1, "%s: skb invalid", __func__); return RX_HANDLER_PASS; } port = hsr_port_get_rcu(skb->dev); if (!port) goto finish_pass; hsr = port->hsr; if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) { /* Directly kill frames sent by ourselves */ kfree_skb(skb); goto finish_consume; } /* For HSR, only tagged frames are expected (unless the device offloads * HSR tag removal), but for PRP there could be non tagged frames as * well from Single attached nodes (SANs). */ protocol = eth_hdr(skb)->h_proto; if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) && port->type != HSR_PT_INTERLINK && hsr->proto_ops->invalid_dan_ingress_frame && hsr->proto_ops->invalid_dan_ingress_frame(protocol)) goto finish_pass; skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) || protocol == htons(ETH_P_HSR)) skb_set_network_header(skb, ETH_HLEN + HSR_HLEN); skb_reset_mac_len(skb); /* Only the frames received over the interlink port will assign a * sequence number and require synchronisation vs other sender. */ if (port->type == HSR_PT_INTERLINK) { spin_lock_bh(&hsr->seqnr_lock); hsr_forward_skb(skb, port); spin_unlock_bh(&hsr->seqnr_lock); } else { hsr_forward_skb(skb, port); } finish_consume: return RX_HANDLER_CONSUMED; finish_pass: return RX_HANDLER_PASS; } bool hsr_port_exists(const struct net_device *dev) { return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame; } static int hsr_check_dev_ok(struct net_device *dev, struct netlink_ext_ack *extack) { /* Don't allow HSR on non-ethernet like devices */ if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN) { NL_SET_ERR_MSG_MOD(extack, "Cannot use loopback or non-ethernet device as HSR slave."); return -EINVAL; } /* Don't allow enslaving hsr devices */ if (is_hsr_master(dev)) { NL_SET_ERR_MSG_MOD(extack, "Cannot create trees of HSR devices."); return -EINVAL; } if (hsr_port_exists(dev)) { NL_SET_ERR_MSG_MOD(extack, "This device is already a HSR slave."); return -EINVAL; } if (is_vlan_dev(dev)) { NL_SET_ERR_MSG_MOD(extack, "HSR on top of VLAN is not yet supported in this driver."); return -EINVAL; } if (dev->priv_flags & IFF_DONT_BRIDGE) { NL_SET_ERR_MSG_MOD(extack, "This device does not support bridging."); return -EOPNOTSUPP; } /* HSR over bonded devices has not been tested, but I'm not sure it * won't work... */ return 0; } /* Setup device to be added to the HSR bridge. */ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev, struct hsr_port *port, struct netlink_ext_ack *extack) { struct net_device *hsr_dev; struct hsr_port *master; int res; /* Don't use promiscuous mode for offload since L2 frame forward * happens at the offloaded hardware. */ if (!port->hsr->fwd_offloaded) { res = dev_set_promiscuity(dev, 1); if (res) return res; } master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); hsr_dev = master->dev; res = netdev_upper_dev_link(dev, hsr_dev, extack); if (res) goto fail_upper_dev_link; res = netdev_rx_handler_register(dev, hsr_handle_frame, port); if (res) goto fail_rx_handler; dev_disable_lro(dev); return 0; fail_rx_handler: netdev_upper_dev_unlink(dev, hsr_dev); fail_upper_dev_link: if (!port->hsr->fwd_offloaded) dev_set_promiscuity(dev, -1); return res; } int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev, enum hsr_port_type type, struct netlink_ext_ack *extack) { struct hsr_port *port, *master; int res; if (type != HSR_PT_MASTER) { res = hsr_check_dev_ok(dev, extack); if (res) return res; } port = hsr_port_get_hsr(hsr, type); if (port) return -EBUSY; /* This port already exists */ port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; port->hsr = hsr; port->dev = dev; port->type = type; if (type != HSR_PT_MASTER) { res = hsr_portdev_setup(hsr, dev, port, extack); if (res) goto fail_dev_setup; } list_add_tail_rcu(&port->port_list, &hsr->ports); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); return 0; fail_dev_setup: kfree(port); return res; } void hsr_del_port(struct hsr_port *port) { struct hsr_priv *hsr; struct hsr_port *master; hsr = port->hsr; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); list_del_rcu(&port->port_list); if (port != master) { netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); netdev_rx_handler_unregister(port->dev); if (!port->hsr->fwd_offloaded) dev_set_promiscuity(port->dev, -1); netdev_upper_dev_unlink(port->dev, master->dev); } kfree_rcu(port, rcu); } |
101 167 208 55 35 31 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_EXTEND_H #define _NF_CONNTRACK_EXTEND_H #include <linux/slab.h> #include <net/netfilter/nf_conntrack.h> enum nf_ct_ext_id { NF_CT_EXT_HELPER, #if IS_ENABLED(CONFIG_NF_NAT) NF_CT_EXT_NAT, #endif NF_CT_EXT_SEQADJ, NF_CT_EXT_ACCT, #ifdef CONFIG_NF_CONNTRACK_EVENTS NF_CT_EXT_ECACHE, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP NF_CT_EXT_TSTAMP, #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT NF_CT_EXT_TIMEOUT, #endif #ifdef CONFIG_NF_CONNTRACK_LABELS NF_CT_EXT_LABELS, #endif #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) NF_CT_EXT_SYNPROXY, #endif #if IS_ENABLED(CONFIG_NET_ACT_CT) NF_CT_EXT_ACT_CT, #endif NF_CT_EXT_NUM, }; /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { u8 offset[NF_CT_EXT_NUM]; u8 len; unsigned int gen_id; char data[] __aligned(8); }; static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) { return !!ext->offset[id]; } static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) { return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); } void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id); static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id) { struct nf_ct_ext *ext = ct->ext; if (!ext || !__nf_ct_ext_exist(ext, id)) return NULL; if (unlikely(ext->gen_id)) return __nf_ct_ext_find(ext, id); return (void *)ct->ext + ct->ext->offset[id]; } /* Add this type, returns pointer to data or NULL. */ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); /* ext genid. if ext->id != ext_genid, extensions cannot be used * anymore unless conntrack has CONFIRMED bit set. */ extern atomic_t nf_conntrack_ext_genid; void nf_ct_ext_bump_genid(void); #endif /* _NF_CONNTRACK_EXTEND_H */ |
171 179 178 170 170 170 170 1 170 169 170 74 74 73 170 168 170 170 168 178 80 178 169 169 177 178 177 32 31 32 179 74 171 1 178 179 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2019 Intel Corporation. */ #include <linux/hash.h> #include <linux/bpf.h> #include <linux/filter.h> #include <linux/static_call.h> /* The BPF dispatcher is a multiway branch code generator. The * dispatcher is a mechanism to avoid the performance penalty of an * indirect call, which is expensive when retpolines are enabled. A * dispatch client registers a BPF program into the dispatcher, and if * there is available room in the dispatcher a direct call to the BPF * program will be generated. All calls to the BPF programs called via * the dispatcher will then be a direct call, instead of an * indirect. The dispatcher hijacks a trampoline function it via the * __fentry__ of the trampoline. The trampoline function has the * following signature: * * unsigned int trampoline(const void *ctx, const struct bpf_insn *insnsi, * unsigned int (*bpf_func)(const void *, * const struct bpf_insn *)); */ static struct bpf_dispatcher_prog *bpf_dispatcher_find_prog( struct bpf_dispatcher *d, struct bpf_prog *prog) { int i; for (i = 0; i < BPF_DISPATCHER_MAX; i++) { if (prog == d->progs[i].prog) return &d->progs[i]; } return NULL; } static struct bpf_dispatcher_prog *bpf_dispatcher_find_free( struct bpf_dispatcher *d) { return bpf_dispatcher_find_prog(d, NULL); } static bool bpf_dispatcher_add_prog(struct bpf_dispatcher *d, struct bpf_prog *prog) { struct bpf_dispatcher_prog *entry; if (!prog) return false; entry = bpf_dispatcher_find_prog(d, prog); if (entry) { refcount_inc(&entry->users); return false; } entry = bpf_dispatcher_find_free(d); if (!entry) return false; bpf_prog_inc(prog); entry->prog = prog; refcount_set(&entry->users, 1); d->num_progs++; return true; } static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d, struct bpf_prog *prog) { struct bpf_dispatcher_prog *entry; if (!prog) return false; entry = bpf_dispatcher_find_prog(d, prog); if (!entry) return false; if (refcount_dec_and_test(&entry->users)) { entry->prog = NULL; bpf_prog_put(prog); d->num_progs--; return true; } return false; } int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) { return -ENOTSUPP; } static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf) { s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0]; int i; for (i = 0; i < BPF_DISPATCHER_MAX; i++) { if (d->progs[i].prog) *ipsp++ = (s64)(uintptr_t)d->progs[i].prog->bpf_func; } return arch_prepare_bpf_dispatcher(image, buf, &ips[0], d->num_progs); } static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs) { void *new, *tmp; u32 noff = 0; if (prev_num_progs) noff = d->image_off ^ (PAGE_SIZE / 2); new = d->num_progs ? d->image + noff : NULL; tmp = d->num_progs ? d->rw_image + noff : NULL; if (new) { /* Prepare the dispatcher in d->rw_image. Then use * bpf_arch_text_copy to update d->image, which is RO+X. */ if (bpf_dispatcher_prepare(d, new, tmp)) return; if (IS_ERR(bpf_arch_text_copy(new, tmp, PAGE_SIZE / 2))) return; } __BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func); /* Make sure all the callers executing the previous/old half of the * image leave it, so following update call can modify it safely. */ synchronize_rcu(); if (new) d->image_off = noff; } void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to) { bool changed = false; int prev_num_progs; if (from == to) return; mutex_lock(&d->mutex); if (!d->image) { d->image = bpf_prog_pack_alloc(PAGE_SIZE, bpf_jit_fill_hole_with_zero); if (!d->image) goto out; d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE); if (!d->rw_image) { bpf_prog_pack_free(d->image, PAGE_SIZE); d->image = NULL; goto out; } bpf_image_ksym_init(d->image, PAGE_SIZE, &d->ksym); bpf_image_ksym_add(&d->ksym); } prev_num_progs = d->num_progs; changed |= bpf_dispatcher_remove_prog(d, from); changed |= bpf_dispatcher_add_prog(d, to); if (!changed) goto out; bpf_dispatcher_update(d, prev_num_progs); out: mutex_unlock(&d->mutex); } |
7 7 7 7 7 7 7 7 7 7 7 7 6 7 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 | // SPDX-License-Identifier: GPL-2.0 /* * attribute_container.c - implementation of a simple container for classes * * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> * * The basic idea here is to enable a device to be attached to an * aritrary numer of classes without having to allocate storage for them. * Instead, the contained classes select the devices they need to attach * to via a matching function. */ #include <linux/attribute_container.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include "base.h" /* This is a private structure used to tie the classdev and the * container .. it should never be visible outside this file */ struct internal_container { struct klist_node node; struct attribute_container *cont; struct device classdev; }; static void internal_container_klist_get(struct klist_node *n) { struct internal_container *ic = container_of(n, struct internal_container, node); get_device(&ic->classdev); } static void internal_container_klist_put(struct klist_node *n) { struct internal_container *ic = container_of(n, struct internal_container, node); put_device(&ic->classdev); } /** * attribute_container_classdev_to_container - given a classdev, return the container * * @classdev: the class device created by attribute_container_add_device. * * Returns the container associated with this classdev. */ struct attribute_container * attribute_container_classdev_to_container(struct device *classdev) { struct internal_container *ic = container_of(classdev, struct internal_container, classdev); return ic->cont; } EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container); static LIST_HEAD(attribute_container_list); static DEFINE_MUTEX(attribute_container_mutex); /** * attribute_container_register - register an attribute container * * @cont: The container to register. This must be allocated by the * callee and should also be zeroed by it. */ int attribute_container_register(struct attribute_container *cont) { INIT_LIST_HEAD(&cont->node); klist_init(&cont->containers, internal_container_klist_get, internal_container_klist_put); mutex_lock(&attribute_container_mutex); list_add_tail(&cont->node, &attribute_container_list); mutex_unlock(&attribute_container_mutex); return 0; } EXPORT_SYMBOL_GPL(attribute_container_register); /** * attribute_container_unregister - remove a container registration * * @cont: previously registered container to remove */ int attribute_container_unregister(struct attribute_container *cont) { int retval = -EBUSY; mutex_lock(&attribute_container_mutex); spin_lock(&cont->containers.k_lock); if (!list_empty(&cont->containers.k_list)) goto out; retval = 0; list_del(&cont->node); out: spin_unlock(&cont->containers.k_lock); mutex_unlock(&attribute_container_mutex); return retval; } EXPORT_SYMBOL_GPL(attribute_container_unregister); /* private function used as class release */ static void attribute_container_release(struct device *classdev) { struct internal_container *ic = container_of(classdev, struct internal_container, classdev); struct device *dev = classdev->parent; kfree(ic); put_device(dev); } /** * attribute_container_add_device - see if any container is interested in dev * * @dev: device to add attributes to * @fn: function to trigger addition of class device. * * This function allocates storage for the class device(s) to be * attached to dev (one for each matching attribute_container). If no * fn is provided, the code will simply register the class device via * device_add. If a function is provided, it is expected to add * the class device at the appropriate time. One of the things that * might be necessary is to allocate and initialise the classdev and * then add it a later time. To do this, call this routine for * allocation and initialisation and then use * attribute_container_device_trigger() to call device_add() on * it. Note: after this, the class device contains a reference to dev * which is not relinquished until the release of the classdev. */ void attribute_container_add_device(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *)) { struct attribute_container *cont; mutex_lock(&attribute_container_mutex); list_for_each_entry(cont, &attribute_container_list, node) { struct internal_container *ic; if (attribute_container_no_classdevs(cont)) continue; if (!cont->match(cont, dev)) continue; ic = kzalloc(sizeof(*ic), GFP_KERNEL); if (!ic) { dev_err(dev, "failed to allocate class container\n"); continue; } ic->cont = cont; device_initialize(&ic->classdev); ic->classdev.parent = get_device(dev); ic->classdev.class = cont->class; cont->class->dev_release = attribute_container_release; dev_set_name(&ic->classdev, "%s", dev_name(dev)); if (fn) fn(cont, dev, &ic->classdev); else attribute_container_add_class_device(&ic->classdev); klist_add_tail(&ic->node, &cont->containers); } mutex_unlock(&attribute_container_mutex); } /* FIXME: can't break out of this unless klist_iter_exit is also * called before doing the break */ #define klist_for_each_entry(pos, head, member, iter) \ for (klist_iter_init(head, iter); (pos = ({ \ struct klist_node *n = klist_next(iter); \ n ? container_of(n, typeof(*pos), member) : \ ({ klist_iter_exit(iter) ; NULL; }); \ })) != NULL;) /** * attribute_container_remove_device - make device eligible for removal. * * @dev: The generic device * @fn: A function to call to remove the device * * This routine triggers device removal. If fn is NULL, then it is * simply done via device_unregister (note that if something * still has a reference to the classdev, then the memory occupied * will not be freed until the classdev is released). If you want a * two phase release: remove from visibility and then delete the * device, then you should use this routine with a fn that calls * device_del() and then use attribute_container_device_trigger() * to do the final put on the classdev. */ void attribute_container_remove_device(struct device *dev, void (*fn)(struct attribute_container *, struct device *, struct device *)) { struct attribute_container *cont; mutex_lock(&attribute_container_mutex); list_for_each_entry(cont, &attribute_container_list, node) { struct internal_container *ic; struct klist_iter iter; if (attribute_container_no_classdevs(cont)) continue; if (!cont->match(cont, dev)) continue; klist_for_each_entry(ic, &cont->containers, node, &iter) { if (dev != ic->classdev.parent) continue; klist_del(&ic->node); if (fn) fn(cont, dev, &ic->classdev); else { attribute_container_remove_attrs(&ic->classdev); device_unregister(&ic->classdev); } } } mutex_unlock(&attribute_container_mutex); } static int do_attribute_container_device_trigger_safe(struct device *dev, struct attribute_container *cont, int (*fn)(struct attribute_container *, struct device *, struct device *), int (*undo)(struct attribute_container *, struct device *, struct device *)) { int ret; struct internal_container *ic, *failed; struct klist_iter iter; if (attribute_container_no_classdevs(cont)) return fn(cont, dev, NULL); klist_for_each_entry(ic, &cont->containers, node, &iter) { if (dev == ic->classdev.parent) { ret = fn(cont, dev, &ic->classdev); if (ret) { failed = ic; klist_iter_exit(&iter); goto fail; } } } return 0; fail: if (!undo) return ret; /* Attempt to undo the work partially done. */ klist_for_each_entry(ic, &cont->containers, node, &iter) { if (ic == failed) { klist_iter_exit(&iter); break; } if (dev == ic->classdev.parent) undo(cont, dev, &ic->classdev); } return ret; } /** * attribute_container_device_trigger_safe - execute a trigger for each * matching classdev or fail all of them. * * @dev: The generic device to run the trigger for * @fn: the function to execute for each classdev. * @undo: A function to undo the work previously done in case of error * * This function is a safe version of * attribute_container_device_trigger. It stops on the first error and * undo the partial work that has been done, on previous classdev. It * is guaranteed that either they all succeeded, or none of them * succeeded. */ int attribute_container_device_trigger_safe(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *), int (*undo)(struct attribute_container *, struct device *, struct device *)) { struct attribute_container *cont, *failed = NULL; int ret = 0; mutex_lock(&attribute_container_mutex); list_for_each_entry(cont, &attribute_container_list, node) { if (!cont->match(cont, dev)) continue; ret = do_attribute_container_device_trigger_safe(dev, cont, fn, undo); if (ret) { failed = cont; break; } } if (ret && !WARN_ON(!undo)) { list_for_each_entry(cont, &attribute_container_list, node) { if (failed == cont) break; if (!cont->match(cont, dev)) continue; do_attribute_container_device_trigger_safe(dev, cont, undo, NULL); } } mutex_unlock(&attribute_container_mutex); return ret; } /** * attribute_container_device_trigger - execute a trigger for each matching classdev * * @dev: The generic device to run the trigger for * @fn: the function to execute for each classdev. * * This function is for executing a trigger when you need to know both * the container and the classdev. */ void attribute_container_device_trigger(struct device *dev, int (*fn)(struct attribute_container *, struct device *, struct device *)) { struct attribute_container *cont; mutex_lock(&attribute_container_mutex); list_for_each_entry(cont, &attribute_container_list, node) { struct internal_container *ic; struct klist_iter iter; if (!cont->match(cont, dev)) continue; if (attribute_container_no_classdevs(cont)) { fn(cont, dev, NULL); continue; } klist_for_each_entry(ic, &cont->containers, node, &iter) { if (dev == ic->classdev.parent) fn(cont, dev, &ic->classdev); } } mutex_unlock(&attribute_container_mutex); } /** * attribute_container_add_attrs - add attributes * * @classdev: The class device * * This simply creates all the class device sysfs files from the * attributes listed in the container */ int attribute_container_add_attrs(struct device *classdev) { struct attribute_container *cont = attribute_container_classdev_to_container(classdev); struct device_attribute **attrs = cont->attrs; int i, error; BUG_ON(attrs && cont->grp); if (!attrs && !cont->grp) return 0; if (cont->grp) return sysfs_create_group(&classdev->kobj, cont->grp); for (i = 0; attrs[i]; i++) { sysfs_attr_init(&attrs[i]->attr); error = device_create_file(classdev, attrs[i]); if (error) return error; } return 0; } /** * attribute_container_add_class_device - same function as device_add * * @classdev: the class device to add * * This performs essentially the same function as device_add except for * attribute containers, namely add the classdev to the system and then * create the attribute files */ int attribute_container_add_class_device(struct device *classdev) { int error = device_add(classdev); if (error) return error; return attribute_container_add_attrs(classdev); } /** * attribute_container_remove_attrs - remove any attribute files * * @classdev: The class device to remove the files from * */ void attribute_container_remove_attrs(struct device *classdev) { struct attribute_container *cont = attribute_container_classdev_to_container(classdev); struct device_attribute **attrs = cont->attrs; int i; if (!attrs && !cont->grp) return; if (cont->grp) { sysfs_remove_group(&classdev->kobj, cont->grp); return ; } for (i = 0; attrs[i]; i++) device_remove_file(classdev, attrs[i]); } /** * attribute_container_class_device_del - equivalent of class_device_del * * @classdev: the class device * * This function simply removes all the attribute files and then calls * device_del. */ void attribute_container_class_device_del(struct device *classdev) { attribute_container_remove_attrs(classdev); device_del(classdev); } /** * attribute_container_find_class_device - find the corresponding class_device * * @cont: the container * @dev: the generic device * * Looks up the device in the container's list of class devices and returns * the corresponding class_device. */ struct device * attribute_container_find_class_device(struct attribute_container *cont, struct device *dev) { struct device *cdev = NULL; struct internal_container *ic; struct klist_iter iter; klist_for_each_entry(ic, &cont->containers, node, &iter) { if (ic->classdev.parent == dev) { cdev = &ic->classdev; /* FIXME: must exit iterator then break */ klist_iter_exit(&iter); break; } } return cdev; } EXPORT_SYMBOL_GPL(attribute_container_find_class_device); |
58 9 9 9 9 5 5 5 5 55 55 55 55 5 5 5 5 5 5 5 1 1 1 1 1 1 1 1 1 5 6 6 6 6 6 5 5 6 5 6 6 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 2 2 5 5 4 2 4 1 1 5 5 5 1 2 2 5 5 5 5 2 2 1 1 1 1 2 2 3 2 2 2 2 2 2 2 3 3 3 2 5 5 5 5 5 5 3 3 2 2 2 2 5 1 1 4 5 5 5 1 1 1 1 1 1 3 3 3 4 4 5 2 5 5 5 2 2 2 2 53 53 4 4 4 53 52 53 8 53 40 53 53 53 52 53 53 53 53 53 53 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH * Copyright 2018-2020, 2022-2024 Intel Corporation */ #include <crypto/utils.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/export.h> #include <net/mac80211.h> #include <linux/unaligned.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "debugfs_key.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "aes_gmac.h" #include "aes_gcm.h" /** * DOC: Key handling basics * * Key handling in mac80211 is done based on per-interface (sub_if_data) * keys and per-station keys. Since each station belongs to an interface, * each station key also belongs to that interface. * * Hardware acceleration is done on a best-effort basis for algorithms * that are implemented in software, for each key the hardware is asked * to enable that key for offloading but if it cannot do that the key is * simply kept for software encryption (unless it is for an algorithm * that isn't implemented in software). * There is currently no way of knowing whether a key is handled in SW * or HW except by looking into debugfs. * * All key management is internally protected by a mutex. Within all * other parts of mac80211, key references are, just as STA structure * references, protected by RCU. Note, however, that some things are * unprotected, namely the key->sta dereferences within the hardware * acceleration functions. This means that sta_info_destroy() must * remove the key which waits for an RCU grace period. */ static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; static void update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) { struct ieee80211_sub_if_data *vlan; if (sdata->vif.type != NL80211_IFTYPE_AP) return; /* crypto_tx_tailroom_needed_cnt is protected by this */ lockdep_assert_wiphy(sdata->local->hw.wiphy); rcu_read_lock(); list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list) vlan->crypto_tx_tailroom_needed_cnt += delta; rcu_read_unlock(); } static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) { /* * When this count is zero, SKB resizing for allocating tailroom * for IV or MMIC is skipped. But, this check has created two race * cases in xmit path while transiting from zero count to one: * * 1. SKB resize was skipped because no key was added but just before * the xmit key is added and SW encryption kicks off. * * 2. SKB resize was skipped because all the keys were hw planted but * just before xmit one of the key is deleted and SW encryption kicks * off. * * In both the above case SW encryption will find not enough space for * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c) * * Solution has been explained at * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net */ lockdep_assert_wiphy(sdata->local->hw.wiphy); update_vlan_tailroom_need_count(sdata, 1); if (!sdata->crypto_tx_tailroom_needed_cnt++) { /* * Flush all XMIT packets currently using HW encryption or no * encryption at all if the count transition is from 0 -> 1. */ synchronize_net(); } } static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) { lockdep_assert_wiphy(sdata->local->hw.wiphy); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); update_vlan_tailroom_need_count(sdata, -delta); sdata->crypto_tx_tailroom_needed_cnt -= delta; } static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata = key->sdata; struct sta_info *sta; int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(key->local->hw.wiphy); if (key->flags & KEY_FLAG_TAINTED) { /* If we get here, it's during resume and the key is * tainted so shouldn't be used/programmed any more. * However, its flags may still indicate that it was * programmed into the device (since we're in resume) * so clear that flag now to avoid trying to remove * it again later. */ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE && !(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; return -EINVAL; } if (!key->local->ops->set_key) goto out_unsupported; sta = key->sta; /* * If this is a per-STA GTK, check if it * is supported; if not, return. */ if (sta && !(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE) && !ieee80211_hw_check(&key->local->hw, SUPPORTS_PER_STA_GTK)) goto out_unsupported; if (sta && !sta->uploaded) goto out_unsupported; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { /* * The driver doesn't know anything about VLAN interfaces. * Hence, don't send GTKs for VLAN interfaces to the driver. */ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { ret = 1; goto out_unsupported; } } if (key->conf.link_id >= 0 && sdata->vif.active_links && !(sdata->vif.active_links & BIT(key->conf.link_id))) return 0; ret = drv_set_key(key->local, SET_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) decrease_tailroom_need_count(sdata, 1); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)); return 0; } if (ret != -ENOSPC && ret != -EOPNOTSUPP && ret != 1) sdata_err(sdata, "failed to set key (%d, %pM) to hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); out_unsupported: switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: /* all of these we can do in software - if driver can */ if (ret == 1) return 0; if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) return -EINVAL; return 0; default: return -EINVAL; } } static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata; struct sta_info *sta; int ret; might_sleep(); if (!key || !key->local->ops->set_key) return; if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; sta = key->sta; sdata = key->sdata; lockdep_assert_wiphy(key->local->hw.wiphy); if (key->conf.link_id >= 0 && sdata->vif.active_links && !(sdata->vif.active_links & BIT(key->conf.link_id))) return; if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; ret = drv_set_key(key->local, DISABLE_KEY, sdata, sta ? &sta->sta : NULL, &key->conf); if (ret) sdata_err(sdata, "failed to remove key (%d, %pM) from hardware (%d)\n", key->conf.keyidx, sta ? sta->sta.addr : bcast_addr, ret); } static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force) { struct sta_info *sta = key->sta; struct ieee80211_local *local = key->local; lockdep_assert_wiphy(local->hw.wiphy); set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); sta->ptk_idx = key->conf.keyidx; if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_check_fast_xmit(sta); return 0; } int ieee80211_set_tx_key(struct ieee80211_key *key) { return _ieee80211_set_tx_key(key, false); } static void ieee80211_pairwise_rekey(struct ieee80211_key *old, struct ieee80211_key *new) { struct ieee80211_local *local = new->local; struct sta_info *sta = new->sta; int i; lockdep_assert_wiphy(local->hw.wiphy); if (new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX) { /* Extended Key ID key install, initial one or rekey */ if (sta->ptk_idx != INVALID_PTK_KEYIDX && !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT)) { /* Aggregation Sessions with Extended Key ID must not * mix MPDUs with different keyIDs within one A-MPDU. * Tear down running Tx aggregation sessions and block * new Rx/Tx aggregation requests during rekey to * ensure there are no A-MPDUs when the driver is not * supporting A-MPDU key borders. (Blocking Tx only * would be sufficient but WLAN_STA_BLOCK_BA gets the * job done for the few ms we need it.) */ set_sta_flag(sta, WLAN_STA_BLOCK_BA); for (i = 0; i < IEEE80211_NUM_TIDS; i++) __ieee80211_stop_tx_ba_session(sta, i, AGG_STOP_LOCAL_REQUEST); } } else if (old) { /* Rekey without Extended Key ID. * Aggregation sessions are OK when running on SW crypto. * A broken remote STA may cause issues not observed with HW * crypto, though. */ if (!(old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; /* Stop Tx till we are on the new key */ old->flags |= KEY_FLAG_TAINTED; ieee80211_clear_fast_xmit(sta); if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION)) { set_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_sta_tear_down_BA_sessions(sta, AGG_STOP_LOCAL_REQUEST); } if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0)) { pr_warn_ratelimited("Rekeying PTK for STA %pM but driver can't safely do that.", sta->sta.addr); /* Flushing the driver queues *may* help prevent * the clear text leaks and freezes. */ ieee80211_flush_queues(local, old->sdata, false); } } } static void __ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (idx >= 0 && idx < NUM_DEFAULT_KEYS) { key = wiphy_dereference(sdata->local->hw.wiphy, sdata->keys[idx]); if (!key) key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); } if (uni) { rcu_assign_pointer(sdata->default_unicast_key, key); ieee80211_check_fast_xmit_iface(sdata); if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN) drv_set_default_unicast_key(sdata->local, sdata, idx); } if (multi) rcu_assign_pointer(link->default_multicast_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_key(struct ieee80211_link_data *link, int idx, bool uni, bool multi) { lockdep_assert_wiphy(link->sdata->local->hw.wiphy); __ieee80211_set_default_key(link, idx, uni, multi); } static void __ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (idx >= NUM_DEFAULT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); rcu_assign_pointer(link->default_mgmt_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_mgmt_key(struct ieee80211_link_data *link, int idx) { lockdep_assert_wiphy(link->sdata->local->hw.wiphy); __ieee80211_set_default_mgmt_key(link, idx); } static void __ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_key *key = NULL; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS && idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS) key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); rcu_assign_pointer(link->default_beacon_key, key); ieee80211_debugfs_key_update_default(sdata); } void ieee80211_set_default_beacon_key(struct ieee80211_link_data *link, int idx) { lockdep_assert_wiphy(link->sdata->local->hw.wiphy); __ieee80211_set_default_beacon_key(link, idx); } static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct sta_info *sta, bool pairwise, struct ieee80211_key *old, struct ieee80211_key *new) { struct link_sta_info *link_sta = sta ? &sta->deflink : NULL; int link_id; int idx; int ret = 0; bool defunikey, defmultikey, defmgmtkey, defbeaconkey; bool is_wep; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* caller must provide at least one old/new */ if (WARN_ON(!new && !old)) return 0; if (new) { idx = new->conf.keyidx; is_wep = new->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || new->conf.cipher == WLAN_CIPHER_SUITE_WEP104; link_id = new->conf.link_id; } else { idx = old->conf.keyidx; is_wep = old->conf.cipher == WLAN_CIPHER_SUITE_WEP40 || old->conf.cipher == WLAN_CIPHER_SUITE_WEP104; link_id = old->conf.link_id; } if (WARN(old && old->conf.link_id != link_id, "old link ID %d doesn't match new link ID %d\n", old->conf.link_id, link_id)) return -EINVAL; if (link_id >= 0) { if (!link) { link = sdata_dereference(sdata->link[link_id], sdata); if (!link) return -ENOLINK; } if (sta) { link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&sta->local->hw.wiphy->mtx)); if (!link_sta) return -ENOLINK; } } else { link = &sdata->deflink; } if ((is_wep || pairwise) && idx >= NUM_DEFAULT_KEYS) return -EINVAL; WARN_ON(new && old && new->conf.keyidx != old->conf.keyidx); if (new && sta && pairwise) { /* Unicast rekey needs special handling. With Extended Key ID * old is still NULL for the first rekey. */ ieee80211_pairwise_rekey(old, new); } if (old) { if (old->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { ieee80211_key_disable_hw_accel(old); if (new) ret = ieee80211_key_enable_hw_accel(new); } } else { if (!new->local->wowlan) ret = ieee80211_key_enable_hw_accel(new); else new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; } if (ret) return ret; if (new) list_add_tail_rcu(&new->list, &sdata->key_list); if (sta) { if (pairwise) { rcu_assign_pointer(sta->ptk[idx], new); if (new && !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) _ieee80211_set_tx_key(new, true); } else { rcu_assign_pointer(link_sta->gtk[idx], new); } /* Only needed for transition from no key -> key. * Still triggers unnecessary when using Extended Key ID * and installing the second key ID the first time. */ if (new && !old) ieee80211_check_fast_rx(sta); } else { defunikey = old && old == wiphy_dereference(sdata->local->hw.wiphy, sdata->default_unicast_key); defmultikey = old && old == wiphy_dereference(sdata->local->hw.wiphy, link->default_multicast_key); defmgmtkey = old && old == wiphy_dereference(sdata->local->hw.wiphy, link->default_mgmt_key); defbeaconkey = old && old == wiphy_dereference(sdata->local->hw.wiphy, link->default_beacon_key); if (defunikey && !new) __ieee80211_set_default_key(link, -1, true, false); if (defmultikey && !new) __ieee80211_set_default_key(link, -1, false, true); if (defmgmtkey && !new) __ieee80211_set_default_mgmt_key(link, -1); if (defbeaconkey && !new) __ieee80211_set_default_beacon_key(link, -1); if (is_wep || pairwise) rcu_assign_pointer(sdata->keys[idx], new); else rcu_assign_pointer(link->gtk[idx], new); if (defunikey && new) __ieee80211_set_default_key(link, new->conf.keyidx, true, false); if (defmultikey && new) __ieee80211_set_default_key(link, new->conf.keyidx, false, true); if (defmgmtkey && new) __ieee80211_set_default_mgmt_key(link, new->conf.keyidx); if (defbeaconkey && new) __ieee80211_set_default_beacon_key(link, new->conf.keyidx); } if (old) list_del_rcu(&old->list); return 0; } struct ieee80211_key * ieee80211_key_alloc(u32 cipher, int idx, size_t key_len, const u8 *key_data, size_t seq_len, const u8 *seq) { struct ieee80211_key *key; int i, j, err; if (WARN_ON(idx < 0 || idx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + NUM_DEFAULT_BEACON_KEYS)) return ERR_PTR(-EINVAL); key = kzalloc(sizeof(struct ieee80211_key) + key_len, GFP_KERNEL); if (!key) return ERR_PTR(-ENOMEM); /* * Default to software encryption; we'll later upload the * key to the hardware if possible. */ key->conf.flags = 0; key->flags = 0; key->conf.link_id = -1; key->conf.cipher = cipher; key->conf.keyidx = idx; key->conf.keylen = key_len; switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: key->conf.iv_len = IEEE80211_WEP_IV_LEN; key->conf.icv_len = IEEE80211_WEP_ICV_LEN; break; case WLAN_CIPHER_SUITE_TKIP: key->conf.iv_len = IEEE80211_TKIP_IV_LEN; key->conf.icv_len = IEEE80211_TKIP_ICV_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS; i++) { key->u.tkip.rx[i].iv32 = get_unaligned_le32(&seq[2]); key->u.tkip.rx[i].iv16 = get_unaligned_le16(seq); } } spin_lock_init(&key->u.tkip.txlock); break; case WLAN_CIPHER_SUITE_CCMP: key->conf.iv_len = IEEE80211_CCMP_HDR_LEN; key->conf.icv_len = IEEE80211_CCMP_MIC_LEN; if (seq) { for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[IEEE80211_CCMP_PN_LEN - j - 1]; } /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( key_data, key_len, IEEE80211_CCMP_MIC_LEN); if (IS_ERR(key->u.ccmp.tfm)) { err = PTR_ERR(key->u.ccmp.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_CCMP_256: key->conf.iv_len = IEEE80211_CCMP_256_HDR_LEN; key->conf.icv_len = IEEE80211_CCMP_256_MIC_LEN; for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_CCMP_256_PN_LEN; j++) key->u.ccmp.rx_pn[i][j] = seq[IEEE80211_CCMP_256_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.ccmp.tfm = ieee80211_aes_key_setup_encrypt( key_data, key_len, IEEE80211_CCMP_256_MIC_LEN); if (IS_ERR(key->u.ccmp.tfm)) { err = PTR_ERR(key->u.ccmp.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->conf.iv_len = 0; if (cipher == WLAN_CIPHER_SUITE_AES_CMAC) key->conf.icv_len = sizeof(struct ieee80211_mmie); else key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++) key->u.aes_cmac.rx_pn[j] = seq[IEEE80211_CMAC_PN_LEN - j - 1]; /* * Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.aes_cmac.tfm = ieee80211_aes_cmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_cmac.tfm)) { err = PTR_ERR(key->u.aes_cmac.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->conf.iv_len = 0; key->conf.icv_len = sizeof(struct ieee80211_mmie_16); if (seq) for (j = 0; j < IEEE80211_GMAC_PN_LEN; j++) key->u.aes_gmac.rx_pn[j] = seq[IEEE80211_GMAC_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.aes_gmac.tfm = ieee80211_aes_gmac_key_setup(key_data, key_len); if (IS_ERR(key->u.aes_gmac.tfm)) { err = PTR_ERR(key->u.aes_gmac.tfm); kfree(key); return ERR_PTR(err); } break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: key->conf.iv_len = IEEE80211_GCMP_HDR_LEN; key->conf.icv_len = IEEE80211_GCMP_MIC_LEN; for (i = 0; seq && i < IEEE80211_NUM_TIDS + 1; i++) for (j = 0; j < IEEE80211_GCMP_PN_LEN; j++) key->u.gcmp.rx_pn[i][j] = seq[IEEE80211_GCMP_PN_LEN - j - 1]; /* Initialize AES key state here as an optimization so that * it does not need to be initialized for every packet. */ key->u.gcmp.tfm = ieee80211_aes_gcm_key_setup_encrypt(key_data, key_len); if (IS_ERR(key->u.gcmp.tfm)) { err = PTR_ERR(key->u.gcmp.tfm); kfree(key); return ERR_PTR(err); } break; } memcpy(key->conf.key, key_data, key_len); INIT_LIST_HEAD(&key->list); return key; } static void ieee80211_key_free_common(struct ieee80211_key *key) { switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: ieee80211_aes_key_free(key->u.ccmp.tfm); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: ieee80211_aes_gmac_key_free(key->u.aes_gmac.tfm); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ieee80211_aes_gcm_key_free(key->u.gcmp.tfm); break; } kfree_sensitive(key); } static void __ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { if (key->local) { struct ieee80211_sub_if_data *sdata = key->sdata; ieee80211_debugfs_key_remove(key); if (delay_tailroom) { /* see ieee80211_delayed_tailroom_dec */ sdata->crypto_tx_tailroom_pending_dec++; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->dec_tailroom_needed_wk, HZ / 2); } else { decrease_tailroom_need_count(sdata, 1); } } ieee80211_key_free_common(key); } static void ieee80211_key_destroy(struct ieee80211_key *key, bool delay_tailroom) { if (!key) return; /* * Synchronize so the TX path and rcu key iterators * can no longer be using this key before we free/remove it. */ synchronize_net(); __ieee80211_key_destroy(key, delay_tailroom); } void ieee80211_key_free_unused(struct ieee80211_key *key) { if (!key) return; WARN_ON(key->sdata || key->local); ieee80211_key_free_common(key); } static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, struct ieee80211_key *old, struct ieee80211_key *new) { u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; u8 *tk_old, *tk_new; if (!old || new->conf.keylen != old->conf.keylen) return false; tk_old = old->conf.key; tk_new = new->conf.key; /* * In station mode, don't compare the TX MIC key, as it's never used * and offloaded rekeying may not care to send it to the host. This * is the case in iwlwifi, for example. */ if (sdata->vif.type == NL80211_IFTYPE_STATION && new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && new->conf.keylen == WLAN_KEY_LEN_TKIP && !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); tk_old = tkip_old; tk_new = tkip_new; } return !crypto_memneq(tk_old, tk_new, new->conf.keylen); } int ieee80211_key_link(struct ieee80211_key *key, struct ieee80211_link_data *link, struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = link->sdata; static atomic_t key_color = ATOMIC_INIT(0); struct ieee80211_key *old_key = NULL; int idx = key->conf.keyidx; bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; /* * We want to delay tailroom updates only for station - in that * case it helps roaming speed, but in other cases it hurts and * can cause warnings to appear. */ bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; int ret; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (sta && pairwise) { struct ieee80211_key *alt_key; old_key = wiphy_dereference(sdata->local->hw.wiphy, sta->ptk[idx]); alt_key = wiphy_dereference(sdata->local->hw.wiphy, sta->ptk[idx ^ 1]); /* The rekey code assumes that the old and new key are using * the same cipher. Enforce the assumption for pairwise keys. */ if ((alt_key && alt_key->conf.cipher != key->conf.cipher) || (old_key && old_key->conf.cipher != key->conf.cipher)) { ret = -EOPNOTSUPP; goto out; } } else if (sta) { struct link_sta_info *link_sta = &sta->deflink; int link_id = key->conf.link_id; if (link_id >= 0) { link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&sta->local->hw.wiphy->mtx)); if (!link_sta) { ret = -ENOLINK; goto out; } } old_key = wiphy_dereference(sdata->local->hw.wiphy, link_sta->gtk[idx]); } else { if (idx < NUM_DEFAULT_KEYS) old_key = wiphy_dereference(sdata->local->hw.wiphy, sdata->keys[idx]); if (!old_key) old_key = wiphy_dereference(sdata->local->hw.wiphy, link->gtk[idx]); } /* Non-pairwise keys must also not switch the cipher on rekey */ if (!pairwise) { if (old_key && old_key->conf.cipher != key->conf.cipher) { ret = -EOPNOTSUPP; goto out; } } /* * Silently accept key re-installation without really installing the * new version of the key to avoid nonce reuse or replay issues. */ if (ieee80211_key_identical(sdata, old_key, key)) { ret = -EALREADY; goto out; } key->local = sdata->local; key->sdata = sdata; key->sta = sta; /* * Assign a unique ID to every key so we can easily prevent mixed * key and fragment cache attacks. */ key->color = atomic_inc_return(&key_color); /* keep this flag for easier access later */ if (sta && sta->sta.spp_amsdu) key->conf.flags |= IEEE80211_KEY_FLAG_SPP_AMSDU; increment_tailroom_need_count(sdata); ret = ieee80211_key_replace(sdata, link, sta, pairwise, old_key, key); if (!ret) { ieee80211_debugfs_key_add(key); ieee80211_key_destroy(old_key, delay_tailroom); } else { ieee80211_key_free(key, delay_tailroom); } key = NULL; out: ieee80211_key_free_unused(key); return ret; } void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) { if (!key) return; /* * Replace key with nothingness if it was ever used. */ if (key->sdata) ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); ieee80211_key_destroy(key, delay_tailroom); } void ieee80211_reenable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; struct ieee80211_sub_if_data *vlan; lockdep_assert_wiphy(sdata->local->hw.wiphy); sdata->crypto_tx_tailroom_needed_cnt = 0; sdata->crypto_tx_tailroom_pending_dec = 0; if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { vlan->crypto_tx_tailroom_needed_cnt = 0; vlan->crypto_tx_tailroom_pending_dec = 0; } } if (ieee80211_sdata_running(sdata)) { list_for_each_entry(key, &sdata->key_list, list) { increment_tailroom_need_count(sdata); ieee80211_key_enable_hw_accel(key); } } } static void ieee80211_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_key *key, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { /* skip keys of station in removal process */ if (key->sta && key->sta->removed) return; if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) return; iter(hw, vif, key->sta ? &key->sta->sta : NULL, &key->conf, iter_data); } void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_key *key, *tmp; struct ieee80211_sub_if_data *sdata; lockdep_assert_wiphy(hw->wiphy); if (vif) { sdata = vif_to_sdata(vif); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) ieee80211_key_iter(hw, vif, key, iter, iter_data); } else { list_for_each_entry(sdata, &local->interfaces, list) list_for_each_entry_safe(key, tmp, &sdata->key_list, list) ieee80211_key_iter(hw, &sdata->vif, key, iter, iter_data); } } EXPORT_SYMBOL(ieee80211_iter_keys); static void _ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_sub_if_data *sdata, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_key *key; list_for_each_entry_rcu(key, &sdata->key_list, list) ieee80211_key_iter(hw, &sdata->vif, key, iter, iter_data); } void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data), void *iter_data) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; if (vif) { sdata = vif_to_sdata(vif); _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); } else { list_for_each_entry_rcu(sdata, &local->interfaces, list) _ieee80211_iter_keys_rcu(hw, sdata, iter, iter_data); } } EXPORT_SYMBOL(ieee80211_iter_keys_rcu); static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, struct list_head *keys) { struct ieee80211_key *key, *tmp; decrease_tailroom_need_count(sdata, sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; ieee80211_debugfs_key_remove_mgmt_default(sdata); ieee80211_debugfs_key_remove_beacon_default(sdata); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); list_add_tail(&key->list, keys); } ieee80211_debugfs_key_update_default(sdata); } void ieee80211_remove_link_keys(struct ieee80211_link_data *link, struct list_head *keys) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_key *key, *tmp; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(key, tmp, &sdata->key_list, list) { if (key->conf.link_id != link->link_id) continue; ieee80211_key_replace(key->sdata, link, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); list_add_tail(&key->list, keys); } } void ieee80211_free_key_list(struct ieee80211_local *local, struct list_head *keys) { struct ieee80211_key *key, *tmp; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(key, tmp, keys, list) __ieee80211_key_destroy(key, false); } void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, bool force_synchronize) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *vlan; struct ieee80211_sub_if_data *master; struct ieee80211_key *key, *tmp; LIST_HEAD(keys); wiphy_delayed_work_cancel(local->hw.wiphy, &sdata->dec_tailroom_needed_wk); lockdep_assert_wiphy(local->hw.wiphy); ieee80211_free_keys_iface(sdata, &keys); if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) ieee80211_free_keys_iface(vlan, &keys); } if (!list_empty(&keys) || force_synchronize) synchronize_net(); list_for_each_entry_safe(key, tmp, &keys, list) __ieee80211_key_destroy(key, false); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (sdata->bss) { master = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != master->crypto_tx_tailroom_needed_cnt); } } else { WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || sdata->crypto_tx_tailroom_pending_dec); } if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || vlan->crypto_tx_tailroom_pending_dec); } } void ieee80211_free_sta_keys(struct ieee80211_local *local, struct sta_info *sta) { struct ieee80211_key *key; int i; lockdep_assert_wiphy(local->hw.wiphy); for (i = 0; i < ARRAY_SIZE(sta->deflink.gtk); i++) { key = wiphy_dereference(local->hw.wiphy, sta->deflink.gtk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { key = wiphy_dereference(local->hw.wiphy, sta->ptk[i]); if (!key) continue; ieee80211_key_replace(key->sdata, NULL, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); __ieee80211_key_destroy(key, key->sdata->vif.type == NL80211_IFTYPE_STATION); } } void ieee80211_delayed_tailroom_dec(struct wiphy *wiphy, struct wiphy_work *wk) { struct ieee80211_sub_if_data *sdata; sdata = container_of(wk, struct ieee80211_sub_if_data, dec_tailroom_needed_wk.work); /* * The reason for the delayed tailroom needed decrementing is to * make roaming faster: during roaming, all keys are first deleted * and then new keys are installed. The first new key causes the * crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes * the cost of synchronize_net() (which can be slow). Avoid this * by deferring the crypto_tx_tailroom_needed_cnt decrementing on * key removal for a while, so if we roam the value is larger than * zero and no 0->1 transition happens. * * The cost is that if the AP switching was from an AP with keys * to one without, we still allocate tailroom while it would no * longer be needed. However, in the typical (fast) roaming case * within an ESS this usually won't happen. */ decrease_tailroom_need_count(sdata, sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; } void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr); cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp); } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify); void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { struct ieee80211_key *key; const u8 *pn; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) return; seq->tkip.iv32 = key->u.tkip.rx[tid].iv32; seq->tkip.iv16 = key->u.tkip.rx[tid].iv16; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_gmac.rx_pn; memcpy(seq->aes_gmac.pn, pn, IEEE80211_GMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.gcmp.rx_pn[tid]; memcpy(seq->gcmp.pn, pn, IEEE80211_GCMP_PN_LEN); break; } } EXPORT_SYMBOL(ieee80211_get_key_rx_seq); void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { struct ieee80211_key *key; u8 *pn; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS)) return; key->u.tkip.rx[tid].iv32 = seq->tkip.iv32; key->u.tkip.rx[tid].iv16 = seq->tkip.iv16; break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.ccmp.rx_pn[tid]; memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN); break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_cmac.rx_pn; memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: if (WARN_ON(tid != 0)) return; pn = key->u.aes_gmac.rx_pn; memcpy(pn, seq->aes_gmac.pn, IEEE80211_GMAC_PN_LEN); break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS)) return; if (tid < 0) pn = key->u.gcmp.rx_pn[IEEE80211_NUM_TIDS]; else pn = key->u.gcmp.rx_pn[tid]; memcpy(pn, seq->gcmp.pn, IEEE80211_GCMP_PN_LEN); break; default: WARN_ON(1); break; } } EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq); void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); lockdep_assert_wiphy(key->local->hw.wiphy); /* * if key was uploaded, we assume the driver will/has remove(d) * it, so adjust bookkeeping accordingly */ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; if (!(key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | IEEE80211_KEY_FLAG_PUT_MIC_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(key->sdata); } ieee80211_key_free(key, false); } EXPORT_SYMBOL_GPL(ieee80211_remove_key); struct ieee80211_key_conf * ieee80211_gtk_rekey_add(struct ieee80211_vif *vif, struct ieee80211_key_conf *keyconf, int link_id) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; struct ieee80211_key *key; int err; struct ieee80211_link_data *link_data = link_id < 0 ? &sdata->deflink : sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link_data)) return ERR_PTR(-EINVAL); if (WARN_ON(!local->wowlan)) return ERR_PTR(-EINVAL); if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return ERR_PTR(-EINVAL); key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx, keyconf->keylen, keyconf->key, 0, NULL); if (IS_ERR(key)) return ERR_CAST(key); if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED) key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT; key->conf.link_id = link_data->link_id; err = ieee80211_key_link(key, link_data, NULL); if (err) return ERR_PTR(err); return &key->conf; } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add); void ieee80211_key_mic_failure(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->u.aes_cmac.icverrors++; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->u.aes_gmac.icverrors++; break; default: /* ignore the others for now, we don't keep counters now */ break; } } EXPORT_SYMBOL_GPL(ieee80211_key_mic_failure); void ieee80211_key_replay(struct ieee80211_key_conf *keyconf) { struct ieee80211_key *key; key = container_of(keyconf, struct ieee80211_key, conf); switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: key->u.ccmp.replays++; break; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: key->u.aes_cmac.replays++; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: key->u.aes_gmac.replays++; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: key->u.gcmp.replays++; break; } } EXPORT_SYMBOL_GPL(ieee80211_key_replay); int ieee80211_key_switch_links(struct ieee80211_sub_if_data *sdata, unsigned long del_links_mask, unsigned long add_links_mask) { struct ieee80211_key *key; int ret; list_for_each_entry(key, &sdata->key_list, list) { if (key->conf.link_id < 0 || !(del_links_mask & BIT(key->conf.link_id))) continue; /* shouldn't happen for per-link keys */ WARN_ON(key->sta); ieee80211_key_disable_hw_accel(key); } list_for_each_entry(key, &sdata->key_list, list) { if (key->conf.link_id < 0 || !(add_links_mask & BIT(key->conf.link_id))) continue; /* shouldn't happen for per-link keys */ WARN_ON(key->sta); ret = ieee80211_key_enable_hw_accel(key); if (ret) return ret; } return 0; } |
1 1 5 5 1 1 1 8 4 2 3 3 2 8 4 4 8 8 7 2 1 2 2 2 5 5 1 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 | // SPDX-License-Identifier: GPL-2.0-only /* * VFIO-KVM bridge pseudo device * * Copyright (C) 2013 Red Hat, Inc. All rights reserved. * Author: Alex Williamson <alex.williamson@redhat.com> */ #include <linux/errno.h> #include <linux/file.h> #include <linux/kvm_host.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/vfio.h> #include "vfio.h" #ifdef CONFIG_SPAPR_TCE_IOMMU #include <asm/kvm_ppc.h> #endif struct kvm_vfio_file { struct list_head node; struct file *file; #ifdef CONFIG_SPAPR_TCE_IOMMU struct iommu_group *iommu_group; #endif }; struct kvm_vfio { struct list_head file_list; struct mutex lock; bool noncoherent; }; static void kvm_vfio_file_set_kvm(struct file *file, struct kvm *kvm) { void (*fn)(struct file *file, struct kvm *kvm); fn = symbol_get(vfio_file_set_kvm); if (!fn) return; fn(file, kvm); symbol_put(vfio_file_set_kvm); } static bool kvm_vfio_file_enforced_coherent(struct file *file) { bool (*fn)(struct file *file); bool ret; fn = symbol_get(vfio_file_enforced_coherent); if (!fn) return false; ret = fn(file); symbol_put(vfio_file_enforced_coherent); return ret; } static bool kvm_vfio_file_is_valid(struct file *file) { bool (*fn)(struct file *file); bool ret; fn = symbol_get(vfio_file_is_valid); if (!fn) return false; ret = fn(file); symbol_put(vfio_file_is_valid); return ret; } #ifdef CONFIG_SPAPR_TCE_IOMMU static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file) { struct iommu_group *(*fn)(struct file *file); struct iommu_group *ret; fn = symbol_get(vfio_file_iommu_group); if (!fn) return NULL; ret = fn(file); symbol_put(vfio_file_iommu_group); return ret; } static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm, struct kvm_vfio_file *kvf) { if (WARN_ON_ONCE(!kvf->iommu_group)) return; kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group); iommu_group_put(kvf->iommu_group); kvf->iommu_group = NULL; } #endif /* * Groups/devices can use the same or different IOMMU domains. If the same * then adding a new group/device may change the coherency of groups/devices * we've previously been told about. We don't want to care about any of * that so we retest each group/device and bail as soon as we find one that's * noncoherent. This means we only ever [un]register_noncoherent_dma once * for the whole device. */ static void kvm_vfio_update_coherency(struct kvm_device *dev) { struct kvm_vfio *kv = dev->private; bool noncoherent = false; struct kvm_vfio_file *kvf; list_for_each_entry(kvf, &kv->file_list, node) { if (!kvm_vfio_file_enforced_coherent(kvf->file)) { noncoherent = true; break; } } if (noncoherent != kv->noncoherent) { kv->noncoherent = noncoherent; if (kv->noncoherent) kvm_arch_register_noncoherent_dma(dev->kvm); else kvm_arch_unregister_noncoherent_dma(dev->kvm); } } static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; struct file *filp; int ret = 0; filp = fget(fd); if (!filp) return -EBADF; /* Ensure the FD is a vfio FD. */ if (!kvm_vfio_file_is_valid(filp)) { ret = -EINVAL; goto out_fput; } mutex_lock(&kv->lock); list_for_each_entry(kvf, &kv->file_list, node) { if (kvf->file == filp) { ret = -EEXIST; goto out_unlock; } } kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT); if (!kvf) { ret = -ENOMEM; goto out_unlock; } kvf->file = get_file(filp); list_add_tail(&kvf->node, &kv->file_list); kvm_arch_start_assignment(dev->kvm); kvm_vfio_file_set_kvm(kvf->file, dev->kvm); kvm_vfio_update_coherency(dev); out_unlock: mutex_unlock(&kv->lock); out_fput: fput(filp); return ret; } static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; CLASS(fd, f)(fd); int ret; if (fd_empty(f)) return -EBADF; ret = -ENOENT; mutex_lock(&kv->lock); list_for_each_entry(kvf, &kv->file_list, node) { if (kvf->file != fd_file(f)) continue; list_del(&kvf->node); kvm_arch_end_assignment(dev->kvm); #ifdef CONFIG_SPAPR_TCE_IOMMU kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); #endif kvm_vfio_file_set_kvm(kvf->file, NULL); fput(kvf->file); kfree(kvf); ret = 0; break; } kvm_vfio_update_coherency(dev); mutex_unlock(&kv->lock); return ret; } #ifdef CONFIG_SPAPR_TCE_IOMMU static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev, void __user *arg) { struct kvm_vfio_spapr_tce param; struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf; int ret; if (copy_from_user(¶m, arg, sizeof(struct kvm_vfio_spapr_tce))) return -EFAULT; CLASS(fd, f)(param.groupfd); if (fd_empty(f)) return -EBADF; ret = -ENOENT; mutex_lock(&kv->lock); list_for_each_entry(kvf, &kv->file_list, node) { if (kvf->file != fd_file(f)) continue; if (!kvf->iommu_group) { kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file); if (WARN_ON_ONCE(!kvf->iommu_group)) { ret = -EIO; goto err_fdput; } } ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd, kvf->iommu_group); break; } err_fdput: mutex_unlock(&kv->lock); return ret; } #endif static int kvm_vfio_set_file(struct kvm_device *dev, long attr, void __user *arg) { int32_t __user *argp = arg; int32_t fd; switch (attr) { case KVM_DEV_VFIO_FILE_ADD: if (get_user(fd, argp)) return -EFAULT; return kvm_vfio_file_add(dev, fd); case KVM_DEV_VFIO_FILE_DEL: if (get_user(fd, argp)) return -EFAULT; return kvm_vfio_file_del(dev, fd); #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: return kvm_vfio_file_set_spapr_tce(dev, arg); #endif } return -ENXIO; } static int kvm_vfio_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_VFIO_FILE: return kvm_vfio_set_file(dev, attr->attr, u64_to_user_ptr(attr->addr)); } return -ENXIO; } static int kvm_vfio_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { switch (attr->group) { case KVM_DEV_VFIO_FILE: switch (attr->attr) { case KVM_DEV_VFIO_FILE_ADD: case KVM_DEV_VFIO_FILE_DEL: #ifdef CONFIG_SPAPR_TCE_IOMMU case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: #endif return 0; } break; } return -ENXIO; } static void kvm_vfio_release(struct kvm_device *dev) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_file *kvf, *tmp; list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) { #ifdef CONFIG_SPAPR_TCE_IOMMU kvm_spapr_tce_release_vfio_group(dev->kvm, kvf); #endif kvm_vfio_file_set_kvm(kvf->file, NULL); fput(kvf->file); list_del(&kvf->node); kfree(kvf); kvm_arch_end_assignment(dev->kvm); } kvm_vfio_update_coherency(dev); kfree(kv); kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */ } static int kvm_vfio_create(struct kvm_device *dev, u32 type); static const struct kvm_device_ops kvm_vfio_ops = { .name = "kvm-vfio", .create = kvm_vfio_create, .release = kvm_vfio_release, .set_attr = kvm_vfio_set_attr, .has_attr = kvm_vfio_has_attr, }; static int kvm_vfio_create(struct kvm_device *dev, u32 type) { struct kvm_device *tmp; struct kvm_vfio *kv; lockdep_assert_held(&dev->kvm->lock); /* Only one VFIO "device" per VM */ list_for_each_entry(tmp, &dev->kvm->devices, vm_node) if (tmp->ops == &kvm_vfio_ops) return -EBUSY; kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT); if (!kv) return -ENOMEM; INIT_LIST_HEAD(&kv->file_list); mutex_init(&kv->lock); dev->private = kv; return 0; } int kvm_vfio_ops_init(void) { return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); } void kvm_vfio_ops_exit(void) { kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO); } |
24 22 22 16 24 2 24 24 2 2 2 24 24 22 16 16 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 | // SPDX-License-Identifier: GPL-2.0-only /* * LED Class Core * * Copyright 2005-2006 Openedhand Ltd. * * Author: Richard Purdie <rpurdie@openedhand.com> */ #include <linux/kernel.h> #include <linux/led-class-multicolor.h> #include <linux/leds.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/property.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <uapi/linux/uleds.h> #include "leds.h" DECLARE_RWSEM(leds_list_lock); EXPORT_SYMBOL_GPL(leds_list_lock); LIST_HEAD(leds_list); EXPORT_SYMBOL_GPL(leds_list); static const char * const led_colors[LED_COLOR_ID_MAX] = { [LED_COLOR_ID_WHITE] = "white", [LED_COLOR_ID_RED] = "red", [LED_COLOR_ID_GREEN] = "green", [LED_COLOR_ID_BLUE] = "blue", [LED_COLOR_ID_AMBER] = "amber", [LED_COLOR_ID_VIOLET] = "violet", [LED_COLOR_ID_YELLOW] = "yellow", [LED_COLOR_ID_IR] = "ir", [LED_COLOR_ID_MULTI] = "multicolor", [LED_COLOR_ID_RGB] = "rgb", [LED_COLOR_ID_PURPLE] = "purple", [LED_COLOR_ID_ORANGE] = "orange", [LED_COLOR_ID_PINK] = "pink", [LED_COLOR_ID_CYAN] = "cyan", [LED_COLOR_ID_LIME] = "lime", }; static int __led_set_brightness(struct led_classdev *led_cdev, unsigned int value) { if (!led_cdev->brightness_set) return -ENOTSUPP; led_cdev->brightness_set(led_cdev, value); return 0; } static int __led_set_brightness_blocking(struct led_classdev *led_cdev, unsigned int value) { if (!led_cdev->brightness_set_blocking) return -ENOTSUPP; return led_cdev->brightness_set_blocking(led_cdev, value); } static void led_timer_function(struct timer_list *t) { struct led_classdev *led_cdev = from_timer(led_cdev, t, blink_timer); unsigned long brightness; unsigned long delay; if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { led_set_brightness_nosleep(led_cdev, LED_OFF); clear_bit(LED_BLINK_SW, &led_cdev->work_flags); return; } if (test_and_clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags)) { clear_bit(LED_BLINK_SW, &led_cdev->work_flags); return; } brightness = led_get_brightness(led_cdev); if (!brightness) { /* Time to switch the LED on. */ if (test_and_clear_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags)) brightness = led_cdev->new_blink_brightness; else brightness = led_cdev->blink_brightness; delay = led_cdev->blink_delay_on; } else { /* Store the current brightness value to be able * to restore it when the delay_off period is over. */ led_cdev->blink_brightness = brightness; brightness = LED_OFF; delay = led_cdev->blink_delay_off; } led_set_brightness_nosleep(led_cdev, brightness); /* Return in next iteration if led is in one-shot mode and we are in * the final blink state so that the led is toggled each delay_on + * delay_off milliseconds in worst case. */ if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags)) { if (test_bit(LED_BLINK_INVERT, &led_cdev->work_flags)) { if (brightness) set_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); } else { if (!brightness) set_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); } } mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); } static void set_brightness_delayed_set_brightness(struct led_classdev *led_cdev, unsigned int value) { int ret; ret = __led_set_brightness(led_cdev, value); if (ret == -ENOTSUPP) { ret = __led_set_brightness_blocking(led_cdev, value); if (ret == -ENOTSUPP) /* No back-end support to set a fixed brightness value */ return; } /* LED HW might have been unplugged, therefore don't warn */ if (ret == -ENODEV && led_cdev->flags & LED_UNREGISTERING && led_cdev->flags & LED_HW_PLUGGABLE) return; if (ret < 0) dev_err(led_cdev->dev, "Setting an LED's brightness failed (%d)\n", ret); } static void set_brightness_delayed(struct work_struct *ws) { struct led_classdev *led_cdev = container_of(ws, struct led_classdev, set_brightness_work); if (test_and_clear_bit(LED_BLINK_DISABLE, &led_cdev->work_flags)) { led_stop_software_blink(led_cdev); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } /* * Triggers may call led_set_brightness(LED_OFF), * led_set_brightness(LED_FULL) in quick succession to disable blinking * and turn the LED on. Both actions may have been scheduled to run * before this work item runs once. To make sure this works properly * handle LED_SET_BRIGHTNESS_OFF first. */ if (test_and_clear_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, LED_OFF); if (test_and_clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags)) set_brightness_delayed_set_brightness(led_cdev, led_cdev->delayed_set_value); if (test_and_clear_bit(LED_SET_BLINK, &led_cdev->work_flags)) { unsigned long delay_on = led_cdev->delayed_delay_on; unsigned long delay_off = led_cdev->delayed_delay_off; led_blink_set(led_cdev, &delay_on, &delay_off); } } static void led_set_software_blink(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { int current_brightness; current_brightness = led_get_brightness(led_cdev); if (current_brightness) led_cdev->blink_brightness = current_brightness; if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; led_cdev->blink_delay_on = delay_on; led_cdev->blink_delay_off = delay_off; /* never on - just set to off */ if (!delay_on) { led_set_brightness_nosleep(led_cdev, LED_OFF); return; } /* never off - just set to brightness */ if (!delay_off) { led_set_brightness_nosleep(led_cdev, led_cdev->blink_brightness); return; } set_bit(LED_BLINK_SW, &led_cdev->work_flags); mod_timer(&led_cdev->blink_timer, jiffies + 1); } static void led_blink_setup(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { if (!test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && led_cdev->blink_set && !led_cdev->blink_set(led_cdev, delay_on, delay_off)) return; /* blink with 1 Hz as default if nothing specified */ if (!*delay_on && !*delay_off) *delay_on = *delay_off = 500; led_set_software_blink(led_cdev, *delay_on, *delay_off); } void led_init_core(struct led_classdev *led_cdev) { INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); timer_setup(&led_cdev->blink_timer, led_timer_function, 0); } EXPORT_SYMBOL_GPL(led_init_core); void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { del_timer_sync(&led_cdev->blink_timer); clear_bit(LED_BLINK_SW, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL_GPL(led_blink_set); void led_blink_set_oneshot(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off, int invert) { if (test_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags) && timer_pending(&led_cdev->blink_timer)) return; set_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); if (invert) set_bit(LED_BLINK_INVERT, &led_cdev->work_flags); else clear_bit(LED_BLINK_INVERT, &led_cdev->work_flags); led_blink_setup(led_cdev, delay_on, delay_off); } EXPORT_SYMBOL_GPL(led_blink_set_oneshot); void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on, unsigned long delay_off) { /* If necessary delegate to a work queue task. */ if (led_cdev->blink_set && led_cdev->brightness_set_blocking) { led_cdev->delayed_delay_on = delay_on; led_cdev->delayed_delay_off = delay_off; set_bit(LED_SET_BLINK, &led_cdev->work_flags); queue_work(led_cdev->wq, &led_cdev->set_brightness_work); return; } led_blink_set(led_cdev, &delay_on, &delay_off); } EXPORT_SYMBOL_GPL(led_blink_set_nosleep); void led_stop_software_blink(struct led_classdev *led_cdev) { del_timer_sync(&led_cdev->blink_timer); led_cdev->blink_delay_on = 0; led_cdev->blink_delay_off = 0; clear_bit(LED_BLINK_SW, &led_cdev->work_flags); } EXPORT_SYMBOL_GPL(led_stop_software_blink); void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness) { /* * If software blink is active, delay brightness setting * until the next timer tick. */ if (test_bit(LED_BLINK_SW, &led_cdev->work_flags)) { /* * If we need to disable soft blinking delegate this to the * work queue task to avoid problems in case we are called * from hard irq context. */ if (!brightness) { set_bit(LED_BLINK_DISABLE, &led_cdev->work_flags); queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } else { set_bit(LED_BLINK_BRIGHTNESS_CHANGE, &led_cdev->work_flags); led_cdev->new_blink_brightness = brightness; } return; } led_set_brightness_nosleep(led_cdev, brightness); } EXPORT_SYMBOL_GPL(led_set_brightness); void led_set_brightness_nopm(struct led_classdev *led_cdev, unsigned int value) { /* Use brightness_set op if available, it is guaranteed not to sleep */ if (!__led_set_brightness(led_cdev, value)) return; /* * Brightness setting can sleep, delegate it to a work queue task. * value 0 / LED_OFF is special, since it also disables hw-blinking * (sw-blink disable is handled in led_set_brightness()). * To avoid a hw-blink-disable getting lost when a second brightness * change is done immediately afterwards (before the work runs), * it uses a separate work_flag. */ if (value) { led_cdev->delayed_set_value = value; set_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); } else { clear_bit(LED_SET_BRIGHTNESS, &led_cdev->work_flags); clear_bit(LED_SET_BLINK, &led_cdev->work_flags); set_bit(LED_SET_BRIGHTNESS_OFF, &led_cdev->work_flags); } queue_work(led_cdev->wq, &led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_set_brightness_nopm); void led_set_brightness_nosleep(struct led_classdev *led_cdev, unsigned int value) { led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return; led_set_brightness_nopm(led_cdev, led_cdev->brightness); } EXPORT_SYMBOL_GPL(led_set_brightness_nosleep); int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value) { if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) return -EBUSY; led_cdev->brightness = min(value, led_cdev->max_brightness); if (led_cdev->flags & LED_SUSPENDED) return 0; return __led_set_brightness_blocking(led_cdev, led_cdev->brightness); } EXPORT_SYMBOL_GPL(led_set_brightness_sync); /* * This is a led-core function because just like led_set_brightness() * it is used in the kernel by e.g. triggers. */ void led_mc_set_brightness(struct led_classdev *led_cdev, unsigned int *intensity_value, unsigned int num_colors, unsigned int brightness) { struct led_classdev_mc *mcled_cdev; unsigned int i; if (!(led_cdev->flags & LED_MULTI_COLOR)) { dev_err_once(led_cdev->dev, "error not a multi-color LED\n"); return; } mcled_cdev = lcdev_to_mccdev(led_cdev); if (num_colors != mcled_cdev->num_colors) { dev_err_once(led_cdev->dev, "error num_colors mismatch %u != %u\n", num_colors, mcled_cdev->num_colors); return; } for (i = 0; i < mcled_cdev->num_colors; i++) mcled_cdev->subled_info[i].intensity = intensity_value[i]; led_set_brightness(led_cdev, brightness); } EXPORT_SYMBOL_GPL(led_mc_set_brightness); int led_update_brightness(struct led_classdev *led_cdev) { int ret; if (led_cdev->brightness_get) { ret = led_cdev->brightness_get(led_cdev); if (ret < 0) return ret; led_cdev->brightness = ret; } return 0; } EXPORT_SYMBOL_GPL(led_update_brightness); u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size) { struct fwnode_handle *fwnode = led_cdev->dev->fwnode; u32 *pattern; int count; count = fwnode_property_count_u32(fwnode, "led-pattern"); if (count < 0) return NULL; pattern = kcalloc(count, sizeof(*pattern), GFP_KERNEL); if (!pattern) return NULL; if (fwnode_property_read_u32_array(fwnode, "led-pattern", pattern, count)) { kfree(pattern); return NULL; } *size = count; return pattern; } EXPORT_SYMBOL_GPL(led_get_default_pattern); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_disable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags |= LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_disable); /* Caller must ensure led_cdev->led_access held */ void led_sysfs_enable(struct led_classdev *led_cdev) { lockdep_assert_held(&led_cdev->led_access); led_cdev->flags &= ~LED_SYSFS_DISABLE; } EXPORT_SYMBOL_GPL(led_sysfs_enable); static void led_parse_fwnode_props(struct device *dev, struct fwnode_handle *fwnode, struct led_properties *props) { int ret; if (!fwnode) return; if (fwnode_property_present(fwnode, "label")) { ret = fwnode_property_read_string(fwnode, "label", &props->label); if (ret) dev_err(dev, "Error parsing 'label' property (%d)\n", ret); return; } if (fwnode_property_present(fwnode, "color")) { ret = fwnode_property_read_u32(fwnode, "color", &props->color); if (ret) dev_err(dev, "Error parsing 'color' property (%d)\n", ret); else if (props->color >= LED_COLOR_ID_MAX) dev_err(dev, "LED color identifier out of range\n"); else props->color_present = true; } if (!fwnode_property_present(fwnode, "function")) return; ret = fwnode_property_read_string(fwnode, "function", &props->function); if (ret) { dev_err(dev, "Error parsing 'function' property (%d)\n", ret); } if (!fwnode_property_present(fwnode, "function-enumerator")) return; ret = fwnode_property_read_u32(fwnode, "function-enumerator", &props->func_enum); if (ret) { dev_err(dev, "Error parsing 'function-enumerator' property (%d)\n", ret); } else { props->func_enum_present = true; } } int led_compose_name(struct device *dev, struct led_init_data *init_data, char *led_classdev_name) { struct led_properties props = {}; struct fwnode_handle *fwnode = init_data->fwnode; const char *devicename = init_data->devicename; if (!led_classdev_name) return -EINVAL; led_parse_fwnode_props(dev, fwnode, &props); if (props.label) { /* * If init_data.devicename is NULL, then it indicates that * DT label should be used as-is for LED class device name. * Otherwise the label is prepended with devicename to compose * the final LED class device name. */ if (!devicename) { strscpy(led_classdev_name, props.label, LED_MAX_NAME_SIZE); } else { snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, props.label); } } else if (props.function || props.color_present) { char tmp_buf[LED_MAX_NAME_SIZE]; if (props.func_enum_present) { snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s-%d", props.color_present ? led_colors[props.color] : "", props.function ?: "", props.func_enum); } else { snprintf(tmp_buf, LED_MAX_NAME_SIZE, "%s:%s", props.color_present ? led_colors[props.color] : "", props.function ?: ""); } if (init_data->devname_mandatory) { snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, tmp_buf); } else { strscpy(led_classdev_name, tmp_buf, LED_MAX_NAME_SIZE); } } else if (init_data->default_label) { if (!devicename) { dev_err(dev, "Legacy LED naming requires devicename segment"); return -EINVAL; } snprintf(led_classdev_name, LED_MAX_NAME_SIZE, "%s:%s", devicename, init_data->default_label); } else if (is_of_node(fwnode)) { strscpy(led_classdev_name, to_of_node(fwnode)->name, LED_MAX_NAME_SIZE); } else return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(led_compose_name); const char *led_get_color_name(u8 color_id) { if (color_id >= ARRAY_SIZE(led_colors)) return NULL; return led_colors[color_id]; } EXPORT_SYMBOL_GPL(led_get_color_name); enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode) { const char *state = NULL; if (!fwnode_property_read_string(fwnode, "default-state", &state)) { if (!strcmp(state, "keep")) return LEDS_DEFSTATE_KEEP; if (!strcmp(state, "on")) return LEDS_DEFSTATE_ON; } return LEDS_DEFSTATE_OFF; } EXPORT_SYMBOL_GPL(led_init_default_state_get); |
10 8 8 10 9 8 1 8 2 8 3 8 8 8 7 1 6 5 5 4 5 1 5 1 5 3 5 4 5 5 10 1 2 1 3 3 3 1 3 1 3 1 3 3 3 80 80 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/act_skbmod.c skb data modifier * * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com> */ #include <linux/module.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/inet_ecn.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tc_wrapper.h> #include <linux/tc_act/tc_skbmod.h> #include <net/tc_act/tc_skbmod.h> static struct tc_action_ops act_skbmod_ops; TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbmod *d = to_skbmod(a); int action, max_edit_len, err; struct tcf_skbmod_params *p; u64 flags; tcf_lastuse_update(&d->tcf_tm); bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb); action = READ_ONCE(d->tcf_action); if (unlikely(action == TC_ACT_SHOT)) goto drop; max_edit_len = skb_mac_header_len(skb); p = rcu_dereference_bh(d->skbmod_p); flags = p->flags; /* tcf_skbmod_init() guarantees "flags" to be one of the following: * 1. a combination of SKBMOD_F_{DMAC,SMAC,ETYPE} * 2. SKBMOD_F_SWAPMAC * 3. SKBMOD_F_ECN * SKBMOD_F_ECN only works with IP packets; all other flags only work with Ethernet * packets. */ if (flags == SKBMOD_F_ECN) { switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): case cpu_to_be16(ETH_P_IPV6): max_edit_len += skb_network_header_len(skb); break; default: goto out; } } else if (!skb->dev || skb->dev->type != ARPHRD_ETHER) { goto out; } err = skb_ensure_writable(skb, max_edit_len); if (unlikely(err)) /* best policy is to drop on the floor */ goto drop; if (flags & SKBMOD_F_DMAC) ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst); if (flags & SKBMOD_F_SMAC) ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src); if (flags & SKBMOD_F_ETYPE) eth_hdr(skb)->h_proto = p->eth_type; if (flags & SKBMOD_F_SWAPMAC) { u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */ /*XXX: I am sure we can come up with more efficient swapping*/ ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest); ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source); ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr); } if (flags & SKBMOD_F_ECN) INET_ECN_set_ce(skb); out: return action; drop: qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); return TC_ACT_SHOT; } static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) }, [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN }, [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN }, [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 }, }; static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); bool ovr = flags & TCA_ACT_FLAGS_REPLACE; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_SKBMOD_MAX + 1]; struct tcf_skbmod_params *p, *p_old; struct tcf_chain *goto_ch = NULL; struct tc_skbmod *parm; u32 lflags = 0, index; struct tcf_skbmod *d; bool exists = false; u8 *daddr = NULL; u8 *saddr = NULL; u16 eth_type = 0; int ret = 0, err; if (!nla) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_SKBMOD_MAX, nla, skbmod_policy, NULL); if (err < 0) return err; if (!tb[TCA_SKBMOD_PARMS]) return -EINVAL; if (tb[TCA_SKBMOD_DMAC]) { daddr = nla_data(tb[TCA_SKBMOD_DMAC]); lflags |= SKBMOD_F_DMAC; } if (tb[TCA_SKBMOD_SMAC]) { saddr = nla_data(tb[TCA_SKBMOD_SMAC]); lflags |= SKBMOD_F_SMAC; } if (tb[TCA_SKBMOD_ETYPE]) { eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]); lflags |= SKBMOD_F_ETYPE; } parm = nla_data(tb[TCA_SKBMOD_PARMS]); index = parm->index; if (parm->flags & SKBMOD_F_SWAPMAC) lflags = SKBMOD_F_SWAPMAC; if (parm->flags & SKBMOD_F_ECN) lflags = SKBMOD_F_ECN; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return ACT_P_BOUND; if (!lflags) { if (exists) tcf_idr_release(*a, bind); else tcf_idr_cleanup(tn, index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!ovr) { tcf_idr_release(*a, bind); return -EEXIST; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; d = to_skbmod(*a); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { err = -ENOMEM; goto put_chain; } p->flags = lflags; if (ovr) spin_lock_bh(&d->tcf_lock); /* Protected by tcf_lock if overwriting existing action. */ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); p_old = rcu_dereference_protected(d->skbmod_p, 1); if (lflags & SKBMOD_F_DMAC) ether_addr_copy(p->eth_dst, daddr); if (lflags & SKBMOD_F_SMAC) ether_addr_copy(p->eth_src, saddr); if (lflags & SKBMOD_F_ETYPE) p->eth_type = htons(eth_type); rcu_assign_pointer(d->skbmod_p, p); if (ovr) spin_unlock_bh(&d->tcf_lock); if (p_old) kfree_rcu(p_old, rcu); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static void tcf_skbmod_cleanup(struct tc_action *a) { struct tcf_skbmod *d = to_skbmod(a); struct tcf_skbmod_params *p; p = rcu_dereference_protected(d->skbmod_p, 1); if (p) kfree_rcu(p, rcu); } static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { struct tcf_skbmod *d = to_skbmod(a); unsigned char *b = skb_tail_pointer(skb); struct tcf_skbmod_params *p; struct tc_skbmod opt; struct tcf_t t; memset(&opt, 0, sizeof(opt)); opt.index = d->tcf_index; opt.refcnt = refcount_read(&d->tcf_refcnt) - ref; opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind; spin_lock_bh(&d->tcf_lock); opt.action = d->tcf_action; p = rcu_dereference_protected(d->skbmod_p, lockdep_is_held(&d->tcf_lock)); opt.flags = p->flags; if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if ((p->flags & SKBMOD_F_DMAC) && nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst)) goto nla_put_failure; if ((p->flags & SKBMOD_F_SMAC) && nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src)) goto nla_put_failure; if ((p->flags & SKBMOD_F_ETYPE) && nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type))) goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD)) goto nla_put_failure; spin_unlock_bh(&d->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&d->tcf_lock); nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .id = TCA_ACT_SKBMOD, .owner = THIS_MODULE, .act = tcf_skbmod_act, .dump = tcf_skbmod_dump, .init = tcf_skbmod_init, .cleanup = tcf_skbmod_cleanup, .size = sizeof(struct tcf_skbmod), }; MODULE_ALIAS_NET_ACT("skbmod"); static __net_init int skbmod_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); return tc_action_net_init(net, tn, &act_skbmod_ops); } static void __net_exit skbmod_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_skbmod_ops.net_id); } static struct pernet_operations skbmod_net_ops = { .init = skbmod_init_net, .exit_batch = skbmod_exit_net, .id = &act_skbmod_ops.net_id, .size = sizeof(struct tc_action_net), }; MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>"); MODULE_DESCRIPTION("SKB data mod-ing"); MODULE_LICENSE("GPL"); static int __init skbmod_init_module(void) { return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops); } static void __exit skbmod_cleanup_module(void) { tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops); } module_init(skbmod_init_module); module_exit(skbmod_cleanup_module); |
109 109 109 108 1 109 109 107 25 18 24 9 9 6 6 6 9 9 1 2 25 12 3 3 3 3 25 25 26 25 26 25 17 26 18 25 25 7 6 5 3 22 17 18 18 20 21 21 21 21 3 5 21 20 3 3 3 3 2 12 12 12 22 22 11 22 21 21 15 15 15 21 21 14 13 12 3 3 3 3 3 3 3 22 12 12 12 12 12 12 26 4 26 26 16 6 6 2 5 4 4 4 4 4 1 4 2 22 2 26 32 31 30 29 29 28 27 27 13 27 27 27 26 23 23 32 6 6 6 6 6 6 6 6 5 6 6 6 6 6 2 2 6 6 6 6 6 6 6 6 7 6 6 4 4 6 7 10 9 8 10 1 2 1 2 6 5 1 6 6 6 3 2 2 6 15 14 14 13 1 12 7 3 4 3 4 1 4 4 6 15 43 43 32 32 10 10 80 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ebtables * * Author: * Bart De Schuymer <bdschuym@pandora.be> * * ebtables.c,v 2.0, July, 2002 * * This code is strongly inspired by the iptables code which is * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kmod.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/audit.h> #include <net/sock.h> #include <net/netns/generic.h> /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" /* Each cpu has its own set of counters, so there is no need for write_lock in * the softirq * For reading or updating the counters, the user context needs to * get a write_lock */ /* The size of each set of counters is altered to get cache alignment */ #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter))) #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ COUNTER_OFFSET(n) * cpu)) struct ebt_pernet { struct list_head tables; }; struct ebt_template { struct list_head list; char name[EBT_TABLE_MAXNAMELEN]; struct module *owner; /* called when table is needed in the given netns */ int (*table_init)(struct net *net); }; static unsigned int ebt_pernet_id __read_mostly; static LIST_HEAD(template_tables); static DEFINE_MUTEX(ebt_mutex); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT static void ebt_standard_compat_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v >= 0) v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); memcpy(dst, &v, sizeof(v)); } static int ebt_standard_compat_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv >= 0) cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } #endif static struct xt_target ebt_standard_target = { .name = "standard", .revision = 0, .family = NFPROTO_BRIDGE, .targetsize = sizeof(int), #ifdef CONFIG_NETFILTER_XTABLES_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = ebt_standard_compat_from_user, .compat_to_user = ebt_standard_compat_to_user, #endif }; static inline int ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, struct xt_action_param *par) { par->target = w->u.watcher; par->targinfo = w->data; w->u.watcher->target(skb, par); /* watchers don't give a verdict */ return 0; } static inline int ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb, struct xt_action_param *par) { par->match = m->u.match; par->matchinfo = m->data; return !m->u.match->match(skb, par); } static inline int ebt_dev_check(const char *entry, const struct net_device *device) { int i = 0; const char *devname; if (*entry == '\0') return 0; if (!device) return 1; devname = device->name; /* 1 is the wildcard token */ while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) i++; return devname[i] != entry[i] && entry[i] != 1; } /* process standard matches */ static inline int ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out) { const struct ethhdr *h = eth_hdr(skb); const struct net_bridge_port *p; __be16 ethproto; if (skb_vlan_tag_present(skb)) ethproto = htons(ETH_P_8021Q); else ethproto = h->h_proto; if (e->bitmask & EBT_802_3) { if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto))) return 1; } else if (!(e->bitmask & EBT_NOPROTO) && NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto)) return 1; if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in))) return 1; if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out))) return 1; /* rcu_read_lock()ed by nf_hook_thresh */ if (in && (p = br_port_get_rcu(in)) != NULL && NF_INVF(e, EBT_ILOGICALIN, ebt_dev_check(e->logical_in, p->br->dev))) return 1; if (out && (p = br_port_get_rcu(out)) != NULL && NF_INVF(e, EBT_ILOGICALOUT, ebt_dev_check(e->logical_out, p->br->dev))) return 1; if (e->bitmask & EBT_SOURCEMAC) { if (NF_INVF(e, EBT_ISOURCE, !ether_addr_equal_masked(h->h_source, e->sourcemac, e->sourcemsk))) return 1; } if (e->bitmask & EBT_DESTMAC) { if (NF_INVF(e, EBT_IDEST, !ether_addr_equal_masked(h->h_dest, e->destmac, e->destmsk))) return 1; } return 0; } static inline struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry) { return (void *)entry + entry->next_offset; } static inline const struct ebt_entry_target * ebt_get_target_c(const struct ebt_entry *e) { return ebt_get_target((struct ebt_entry *)e); } /* Do some firewalling */ unsigned int ebt_do_table(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct ebt_table *table = priv; unsigned int hook = state->hook; int i, nentries; struct ebt_entry *point; struct ebt_counter *counter_base, *cb_base; const struct ebt_entry_target *t; int verdict, sp = 0; struct ebt_chainstack *cs; struct ebt_entries *chaininfo; const char *base; const struct ebt_table_info *private; struct xt_action_param acpar; acpar.state = state; acpar.hotdrop = false; read_lock_bh(&table->lock); private = table->private; cb_base = COUNTER_BASE(private->counters, private->nentries, smp_processor_id()); if (private->chainstack) cs = private->chainstack[smp_processor_id()]; else cs = NULL; chaininfo = private->hook_entry[hook]; nentries = private->hook_entry[hook]->nentries; point = (struct ebt_entry *)(private->hook_entry[hook]->data); counter_base = cb_base + private->hook_entry[hook]->counter_offset; /* base for chain jumps */ base = private->entries; i = 0; while (i < nentries) { if (ebt_basic_match(point, skb, state->in, state->out)) goto letscontinue; if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) goto letscontinue; if (acpar.hotdrop) { read_unlock_bh(&table->lock); return NF_DROP; } ADD_COUNTER(*(counter_base + i), skb->len, 1); /* these should only watch: not modify, nor tell us * what to do with the packet */ EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); t = ebt_get_target_c(point); /* standard target */ if (!t->u.target->target) verdict = ((struct ebt_standard_target *)t)->verdict; else { acpar.target = t->u.target; acpar.targinfo = t->data; verdict = t->u.target->target(skb, &acpar); } if (verdict == EBT_ACCEPT) { read_unlock_bh(&table->lock); return NF_ACCEPT; } if (verdict == EBT_DROP) { read_unlock_bh(&table->lock); return NF_DROP; } if (verdict == EBT_RETURN) { letsreturn: if (WARN(sp == 0, "RETURN on base chain")) { /* act like this is EBT_CONTINUE */ goto letscontinue; } sp--; /* put all the local variables right */ i = cs[sp].n; chaininfo = cs[sp].chaininfo; nentries = chaininfo->nentries; point = cs[sp].e; counter_base = cb_base + chaininfo->counter_offset; continue; } if (verdict == EBT_CONTINUE) goto letscontinue; if (WARN(verdict < 0, "bogus standard verdict\n")) { read_unlock_bh(&table->lock); return NF_DROP; } /* jump to a udc */ cs[sp].n = i + 1; cs[sp].chaininfo = chaininfo; cs[sp].e = ebt_next_entry(point); i = 0; chaininfo = (struct ebt_entries *) (base + verdict); if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) { read_unlock_bh(&table->lock); return NF_DROP; } nentries = chaininfo->nentries; point = (struct ebt_entry *)chaininfo->data; counter_base = cb_base + chaininfo->counter_offset; sp++; continue; letscontinue: point = ebt_next_entry(point); i++; } /* I actually like this :) */ if (chaininfo->policy == EBT_RETURN) goto letsreturn; if (chaininfo->policy == EBT_ACCEPT) { read_unlock_bh(&table->lock); return NF_ACCEPT; } read_unlock_bh(&table->lock); return NF_DROP; } /* If it succeeds, returns element and locks mutex */ static inline void * find_inlist_lock_noload(struct net *net, const char *name, int *error, struct mutex *mutex) { struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); struct ebt_template *tmpl; struct ebt_table *table; mutex_lock(mutex); list_for_each_entry(table, &ebt_net->tables, list) { if (strcmp(table->name, name) == 0) return table; } list_for_each_entry(tmpl, &template_tables, list) { if (strcmp(name, tmpl->name) == 0) { struct module *owner = tmpl->owner; if (!try_module_get(owner)) goto out; mutex_unlock(mutex); *error = tmpl->table_init(net); if (*error) { module_put(owner); return NULL; } mutex_lock(mutex); module_put(owner); break; } } list_for_each_entry(table, &ebt_net->tables, list) { if (strcmp(table->name, name) == 0) return table; } out: *error = -ENOENT; mutex_unlock(mutex); return NULL; } static void * find_inlist_lock(struct net *net, const char *name, const char *prefix, int *error, struct mutex *mutex) { return try_then_request_module( find_inlist_lock_noload(net, name, error, mutex), "%s%s", prefix, name); } static inline struct ebt_table * find_table_lock(struct net *net, const char *name, int *error, struct mutex *mutex) { return find_inlist_lock(net, name, "ebtable_", error, mutex); } static inline void ebt_free_table_info(struct ebt_table_info *info) { int i; if (info->chainstack) { for_each_possible_cpu(i) vfree(info->chainstack[i]); vfree(info->chainstack); } } static inline int ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, unsigned int *cnt) { const struct ebt_entry *e = par->entryinfo; struct xt_match *match; size_t left = ((char *)e + e->watchers_offset) - (char *)m; int ret; if (left < sizeof(struct ebt_entry_match) || left - sizeof(struct ebt_entry_match) < m->match_size) return -EINVAL; match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision); if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { if (!IS_ERR(match)) module_put(match->me); request_module("ebt_%s", m->u.name); match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision); } if (IS_ERR(match)) return PTR_ERR(match); m->u.match = match; par->match = match; par->matchinfo = m->data; ret = xt_check_match(par, m->match_size, ntohs(e->ethproto), e->invflags & EBT_IPROTO); if (ret < 0) { module_put(match->me); return ret; } (*cnt)++; return 0; } static inline int ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, unsigned int *cnt) { const struct ebt_entry *e = par->entryinfo; struct xt_target *watcher; size_t left = ((char *)e + e->target_offset) - (char *)w; int ret; if (left < sizeof(struct ebt_entry_watcher) || left - sizeof(struct ebt_entry_watcher) < w->watcher_size) return -EINVAL; watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); if (IS_ERR(watcher)) return PTR_ERR(watcher); if (watcher->family != NFPROTO_BRIDGE) { module_put(watcher->me); return -ENOENT; } w->u.watcher = watcher; par->target = watcher; par->targinfo = w->data; ret = xt_check_target(par, w->watcher_size, ntohs(e->ethproto), e->invflags & EBT_IPROTO); if (ret < 0) { module_put(watcher->me); return ret; } (*cnt)++; return 0; } static int ebt_verify_pointers(const struct ebt_replace *repl, struct ebt_table_info *newinfo) { unsigned int limit = repl->entries_size; unsigned int valid_hooks = repl->valid_hooks; unsigned int offset = 0; int i; for (i = 0; i < NF_BR_NUMHOOKS; i++) newinfo->hook_entry[i] = NULL; newinfo->entries_size = repl->entries_size; newinfo->nentries = repl->nentries; while (offset < limit) { size_t left = limit - offset; struct ebt_entry *e = (void *)newinfo->entries + offset; if (left < sizeof(unsigned int)) break; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((valid_hooks & (1 << i)) == 0) continue; if ((char __user *)repl->hook_entry[i] == repl->entries + offset) break; } if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { if (e->bitmask != 0) { /* we make userspace set this right, * so there is no misunderstanding */ return -EINVAL; } if (i != NF_BR_NUMHOOKS) newinfo->hook_entry[i] = (struct ebt_entries *)e; if (left < sizeof(struct ebt_entries)) break; offset += sizeof(struct ebt_entries); } else { if (left < sizeof(struct ebt_entry)) break; if (left < e->next_offset) break; if (e->next_offset < sizeof(struct ebt_entry)) return -EINVAL; offset += e->next_offset; } } if (offset != limit) return -EINVAL; /* check if all valid hooks have a chain */ for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (!newinfo->hook_entry[i] && (valid_hooks & (1 << i))) return -EINVAL; } return 0; } /* this one is very careful, as it is the first function * to parse the userspace data */ static inline int ebt_check_entry_size_and_hooks(const struct ebt_entry *e, const struct ebt_table_info *newinfo, unsigned int *n, unsigned int *cnt, unsigned int *totalcnt, unsigned int *udc_cnt) { int i; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((void *)e == (void *)newinfo->hook_entry[i]) break; } /* beginning of a new chain * if i == NF_BR_NUMHOOKS it must be a user defined chain */ if (i != NF_BR_NUMHOOKS || !e->bitmask) { /* this checks if the previous chain has as many entries * as it said it has */ if (*n != *cnt) return -EINVAL; if (((struct ebt_entries *)e)->policy != EBT_DROP && ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { /* only RETURN from udc */ if (i != NF_BR_NUMHOOKS || ((struct ebt_entries *)e)->policy != EBT_RETURN) return -EINVAL; } if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ (*udc_cnt)++; if (((struct ebt_entries *)e)->counter_offset != *totalcnt) return -EINVAL; *n = ((struct ebt_entries *)e)->nentries; *cnt = 0; return 0; } /* a plain old entry, heh */ if (sizeof(struct ebt_entry) > e->watchers_offset || e->watchers_offset > e->target_offset || e->target_offset >= e->next_offset) return -EINVAL; /* this is not checked anywhere else */ if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) return -EINVAL; (*cnt)++; (*totalcnt)++; return 0; } struct ebt_cl_stack { struct ebt_chainstack cs; int from; unsigned int hookmask; }; /* We need these positions to check that the jumps to a different part of the * entries is a jump to the beginning of a new chain. */ static inline int ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, unsigned int *n, struct ebt_cl_stack *udc) { int i; /* we're only interested in chain starts */ if (e->bitmask) return 0; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (newinfo->hook_entry[i] == (struct ebt_entries *)e) break; } /* only care about udc */ if (i != NF_BR_NUMHOOKS) return 0; udc[*n].cs.chaininfo = (struct ebt_entries *)e; /* these initialisations are depended on later in check_chainloops() */ udc[*n].cs.n = 0; udc[*n].hookmask = 0; (*n)++; return 0; } static inline int ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) { struct xt_mtdtor_param par; if (i && (*i)-- == 0) return 1; par.net = net; par.match = m->u.match; par.matchinfo = m->data; par.family = NFPROTO_BRIDGE; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); return 0; } static inline int ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) { struct xt_tgdtor_param par; if (i && (*i)-- == 0) return 1; par.net = net; par.target = w->u.watcher; par.targinfo = w->data; par.family = NFPROTO_BRIDGE; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); return 0; } static inline int ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) { struct xt_tgdtor_param par; struct ebt_entry_target *t; if (e->bitmask == 0) return 0; /* we're done */ if (cnt && (*cnt)-- == 0) return 1; EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); t = ebt_get_target(e); par.net = net; par.target = t->u.target; par.targinfo = t->data; par.family = NFPROTO_BRIDGE; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); return 0; } static inline int ebt_check_entry(struct ebt_entry *e, struct net *net, const struct ebt_table_info *newinfo, const char *name, unsigned int *cnt, struct ebt_cl_stack *cl_s, unsigned int udc_cnt) { struct ebt_entry_target *t; struct xt_target *target; unsigned int i, j, hook = 0, hookmask = 0; size_t gap; int ret; struct xt_mtchk_param mtpar; struct xt_tgchk_param tgpar; /* don't mess with the struct ebt_entries */ if (e->bitmask == 0) return 0; if (e->bitmask & ~EBT_F_MASK) return -EINVAL; if (e->invflags & ~EBT_INV_MASK) return -EINVAL; if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) return -EINVAL; /* what hook do we belong to? */ for (i = 0; i < NF_BR_NUMHOOKS; i++) { if (!newinfo->hook_entry[i]) continue; if ((char *)newinfo->hook_entry[i] < (char *)e) hook = i; else break; } /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on * a base chain */ if (i < NF_BR_NUMHOOKS) hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); else { for (i = 0; i < udc_cnt; i++) if ((char *)(cl_s[i].cs.chaininfo) > (char *)e) break; if (i == 0) hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); else hookmask = cl_s[i - 1].hookmask; } i = 0; memset(&mtpar, 0, sizeof(mtpar)); memset(&tgpar, 0, sizeof(tgpar)); mtpar.net = tgpar.net = net; mtpar.table = tgpar.table = name; mtpar.entryinfo = tgpar.entryinfo = e; mtpar.hook_mask = tgpar.hook_mask = hookmask; mtpar.family = tgpar.family = NFPROTO_BRIDGE; ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i); if (ret != 0) goto cleanup_matches; j = 0; ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j); if (ret != 0) goto cleanup_watchers; t = ebt_get_target(e); gap = e->next_offset - e->target_offset; target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); if (IS_ERR(target)) { ret = PTR_ERR(target); goto cleanup_watchers; } /* Reject UNSPEC, xtables verdicts/return values are incompatible */ if (target->family != NFPROTO_BRIDGE) { module_put(target->me); ret = -ENOENT; goto cleanup_watchers; } t->u.target = target; if (t->u.target == &ebt_standard_target) { if (gap < sizeof(struct ebt_standard_target)) { ret = -EFAULT; goto cleanup_watchers; } if (((struct ebt_standard_target *)t)->verdict < -NUM_STANDARD_TARGETS) { ret = -EFAULT; goto cleanup_watchers; } } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) { module_put(t->u.target->me); ret = -EFAULT; goto cleanup_watchers; } tgpar.target = target; tgpar.targinfo = t->data; ret = xt_check_target(&tgpar, t->target_size, ntohs(e->ethproto), e->invflags & EBT_IPROTO); if (ret < 0) { module_put(target->me); goto cleanup_watchers; } (*cnt)++; return 0; cleanup_watchers: EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); cleanup_matches: EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); return ret; } /* checks for loops and sets the hook mask for udc * the hook mask for udc tells us from which base chains the udc can be * accessed. This mask is a parameter to the check() functions of the extensions */ static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, unsigned int udc_cnt, unsigned int hooknr, char *base) { int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; const struct ebt_entry *e = (struct ebt_entry *)chain->data; const struct ebt_entry_target *t; while (pos < nentries || chain_nr != -1) { /* end of udc, go back one 'recursion' step */ if (pos == nentries) { /* put back values of the time when this chain was called */ e = cl_s[chain_nr].cs.e; if (cl_s[chain_nr].from != -1) nentries = cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; else nentries = chain->nentries; pos = cl_s[chain_nr].cs.n; /* make sure we won't see a loop that isn't one */ cl_s[chain_nr].cs.n = 0; chain_nr = cl_s[chain_nr].from; if (pos == nentries) continue; } t = ebt_get_target_c(e); if (strcmp(t->u.name, EBT_STANDARD_TARGET)) goto letscontinue; if (e->target_offset + sizeof(struct ebt_standard_target) > e->next_offset) return -1; verdict = ((struct ebt_standard_target *)t)->verdict; if (verdict >= 0) { /* jump to another chain */ struct ebt_entries *hlp2 = (struct ebt_entries *)(base + verdict); for (i = 0; i < udc_cnt; i++) if (hlp2 == cl_s[i].cs.chaininfo) break; /* bad destination or loop */ if (i == udc_cnt) return -1; if (cl_s[i].cs.n) return -1; if (cl_s[i].hookmask & (1 << hooknr)) goto letscontinue; /* this can't be 0, so the loop test is correct */ cl_s[i].cs.n = pos + 1; pos = 0; cl_s[i].cs.e = ebt_next_entry(e); e = (struct ebt_entry *)(hlp2->data); nentries = hlp2->nentries; cl_s[i].from = chain_nr; chain_nr = i; /* this udc is accessible from the base chain for hooknr */ cl_s[i].hookmask |= (1 << hooknr); continue; } letscontinue: e = ebt_next_entry(e); pos++; } return 0; } /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ static int translate_table(struct net *net, const char *name, struct ebt_table_info *newinfo) { unsigned int i, j, k, udc_cnt; int ret; struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ i = 0; while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i]) i++; if (i == NF_BR_NUMHOOKS) return -EINVAL; if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) return -EINVAL; /* make sure chains are ordered after each other in same order * as their corresponding hooks */ for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { if (!newinfo->hook_entry[j]) continue; if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) return -EINVAL; i = j; } /* do some early checkings and initialize some things */ i = 0; /* holds the expected nr. of entries for the chain */ j = 0; /* holds the up to now counted entries for the chain */ k = 0; /* holds the total nr. of entries, should equal * newinfo->nentries afterwards */ udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry_size_and_hooks, newinfo, &i, &j, &k, &udc_cnt); if (ret != 0) return ret; if (i != j) return -EINVAL; if (k != newinfo->nentries) return -EINVAL; /* get the location of the udc, put them in an array * while we're at it, allocate the chainstack */ if (udc_cnt) { /* this will get free'd in do_replace()/ebt_register_table() * if an error occurs */ newinfo->chainstack = vmalloc(array_size(nr_cpu_ids, sizeof(*(newinfo->chainstack)))); if (!newinfo->chainstack) return -ENOMEM; for_each_possible_cpu(i) { newinfo->chainstack[i] = vmalloc_node(array_size(udc_cnt, sizeof(*(newinfo->chainstack[0]))), cpu_to_node(i)); if (!newinfo->chainstack[i]) { while (i) vfree(newinfo->chainstack[--i]); vfree(newinfo->chainstack); newinfo->chainstack = NULL; return -ENOMEM; } } cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s))); if (!cl_s) return -ENOMEM; i = 0; /* the i'th udc */ EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_get_udc_positions, newinfo, &i, cl_s); /* sanity check */ if (i != udc_cnt) { vfree(cl_s); return -EFAULT; } } /* Check for loops */ for (i = 0; i < NF_BR_NUMHOOKS; i++) if (newinfo->hook_entry[i]) if (check_chainloops(newinfo->hook_entry[i], cl_s, udc_cnt, i, newinfo->entries)) { vfree(cl_s); return -EINVAL; } /* we now know the following (along with E=mc²): * - the nr of entries in each chain is right * - the size of the allocated space is right * - all valid hooks have a corresponding chain * - there are no loops * - wrong data can still be on the level of a single entry * - could be there are jumps to places that are not the * beginning of a chain. This can only occur in chains that * are not accessible from any base chains, so we don't care. */ /* used to know what we need to clean up if something goes wrong */ i = 0; ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); if (ret != 0) { EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, net, &i); } vfree(cl_s); return ret; } /* called under write_lock */ static void get_counters(const struct ebt_counter *oldcounters, struct ebt_counter *counters, unsigned int nentries) { int i, cpu; struct ebt_counter *counter_base; /* counters of cpu 0 */ memcpy(counters, oldcounters, sizeof(struct ebt_counter) * nentries); /* add other counters to those of cpu 0 */ for_each_possible_cpu(cpu) { if (cpu == 0) continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); for (i = 0; i < nentries; i++) ADD_COUNTER(counters[i], counter_base[i].bcnt, counter_base[i].pcnt); } } static int do_replace_finish(struct net *net, struct ebt_replace *repl, struct ebt_table_info *newinfo) { int ret; struct ebt_counter *counterstmp = NULL; /* used to be able to unlock earlier */ struct ebt_table_info *table; struct ebt_table *t; /* the user wants counters back * the check on the size is done later, when we have the lock */ if (repl->num_counters) { unsigned long size = repl->num_counters * sizeof(*counterstmp); counterstmp = vmalloc(size); if (!counterstmp) return -ENOMEM; } newinfo->chainstack = NULL; ret = ebt_verify_pointers(repl, newinfo); if (ret != 0) goto free_counterstmp; ret = translate_table(net, repl->name, newinfo); if (ret != 0) goto free_counterstmp; t = find_table_lock(net, repl->name, &ret, &ebt_mutex); if (!t) { ret = -ENOENT; goto free_iterate; } if (repl->valid_hooks != t->valid_hooks) { ret = -EINVAL; goto free_unlock; } if (repl->num_counters && repl->num_counters != t->private->nentries) { ret = -EINVAL; goto free_unlock; } /* we have the mutex lock, so no danger in reading this pointer */ table = t->private; /* make sure the table can only be rmmod'ed if it contains no rules */ if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) { ret = -ENOENT; goto free_unlock; } else if (table->nentries && !newinfo->nentries) module_put(t->me); /* we need an atomic snapshot of the counters */ write_lock_bh(&t->lock); if (repl->num_counters) get_counters(t->private->counters, counterstmp, t->private->nentries); t->private = newinfo; write_unlock_bh(&t->lock); mutex_unlock(&ebt_mutex); /* so, a user can change the chains while having messed up her counter * allocation. Only reason why this is done is because this way the lock * is held only once, while this doesn't bring the kernel into a * dangerous state. */ if (repl->num_counters && copy_to_user(repl->counters, counterstmp, array_size(repl->num_counters, sizeof(struct ebt_counter)))) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); } /* decrease module count and free resources */ EBT_ENTRY_ITERATE(table->entries, table->entries_size, ebt_cleanup_entry, net, NULL); vfree(table->entries); ebt_free_table_info(table); vfree(table); vfree(counterstmp); audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, AUDIT_XT_OP_REPLACE, GFP_KERNEL); return 0; free_unlock: mutex_unlock(&ebt_mutex); free_iterate: EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, net, NULL); free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ ebt_free_table_info(newinfo); return ret; } /* replace the table */ static int do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret, countersize; struct ebt_table_info *newinfo; struct ebt_replace tmp; if (len < sizeof(tmp)) return -EINVAL; if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) return -EINVAL; if (tmp.entries_size == 0) return -EINVAL; /* overflow check */ if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; tmp.name[sizeof(tmp.name) - 1] = 0; countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; } if (copy_from_user( newinfo->entries, tmp.entries, tmp.entries_size) != 0) { ret = -EFAULT; goto free_entries; } ret = do_replace_finish(net, &tmp, newinfo); if (ret == 0) return ret; free_entries: vfree(newinfo->entries); free_newinfo: vfree(newinfo); return ret; } static void __ebt_unregister_table(struct net *net, struct ebt_table *table) { mutex_lock(&ebt_mutex); list_del(&table->list); mutex_unlock(&ebt_mutex); audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries, AUDIT_XT_OP_UNREGISTER, GFP_KERNEL); EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_cleanup_entry, net, NULL); if (table->private->nentries) module_put(table->me); vfree(table->private->entries); ebt_free_table_info(table->private); vfree(table->private); kfree(table->ops); kfree(table); } int ebt_register_table(struct net *net, const struct ebt_table *input_table, const struct nf_hook_ops *template_ops) { struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); struct ebt_table_info *newinfo; struct ebt_table *t, *table; struct nf_hook_ops *ops; unsigned int num_ops; struct ebt_replace_kernel *repl; int ret, i, countersize; void *p; if (input_table == NULL || (repl = input_table->table) == NULL || repl->entries == NULL || repl->entries_size == 0 || repl->counters != NULL || input_table->private != NULL) return -EINVAL; /* Don't add one table to multiple lists. */ table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto out; } countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); ret = -ENOMEM; if (!newinfo) goto free_table; p = vmalloc(repl->entries_size); if (!p) goto free_newinfo; memcpy(p, repl->entries, repl->entries_size); newinfo->entries = p; newinfo->entries_size = repl->entries_size; newinfo->nentries = repl->nentries; if (countersize) memset(newinfo->counters, 0, countersize); /* fill in newinfo and parse the entries */ newinfo->chainstack = NULL; for (i = 0; i < NF_BR_NUMHOOKS; i++) { if ((repl->valid_hooks & (1 << i)) == 0) newinfo->hook_entry[i] = NULL; else newinfo->hook_entry[i] = p + ((char *)repl->hook_entry[i] - repl->entries); } ret = translate_table(net, repl->name, newinfo); if (ret != 0) goto free_chainstack; table->private = newinfo; rwlock_init(&table->lock); mutex_lock(&ebt_mutex); list_for_each_entry(t, &ebt_net->tables, list) { if (strcmp(t->name, table->name) == 0) { ret = -EEXIST; goto free_unlock; } } /* Hold a reference count if the chains aren't empty */ if (newinfo->nentries && !try_module_get(table->me)) { ret = -ENOENT; goto free_unlock; } num_ops = hweight32(table->valid_hooks); if (num_ops == 0) { ret = -EINVAL; goto free_unlock; } ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL); if (!ops) { ret = -ENOMEM; if (newinfo->nentries) module_put(table->me); goto free_unlock; } for (i = 0; i < num_ops; i++) ops[i].priv = table; list_add(&table->list, &ebt_net->tables); mutex_unlock(&ebt_mutex); table->ops = ops; ret = nf_register_net_hooks(net, ops, num_ops); if (ret) __ebt_unregister_table(net, table); audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, AUDIT_XT_OP_REGISTER, GFP_KERNEL); return ret; free_unlock: mutex_unlock(&ebt_mutex); free_chainstack: ebt_free_table_info(newinfo); vfree(newinfo->entries); free_newinfo: vfree(newinfo); free_table: kfree(table); out: return ret; } int ebt_register_template(const struct ebt_table *t, int (*table_init)(struct net *net)) { struct ebt_template *tmpl; mutex_lock(&ebt_mutex); list_for_each_entry(tmpl, &template_tables, list) { if (WARN_ON_ONCE(strcmp(t->name, tmpl->name) == 0)) { mutex_unlock(&ebt_mutex); return -EEXIST; } } tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); if (!tmpl) { mutex_unlock(&ebt_mutex); return -ENOMEM; } tmpl->table_init = table_init; strscpy(tmpl->name, t->name, sizeof(tmpl->name)); tmpl->owner = t->me; list_add(&tmpl->list, &template_tables); mutex_unlock(&ebt_mutex); return 0; } EXPORT_SYMBOL(ebt_register_template); void ebt_unregister_template(const struct ebt_table *t) { struct ebt_template *tmpl; mutex_lock(&ebt_mutex); list_for_each_entry(tmpl, &template_tables, list) { if (strcmp(t->name, tmpl->name)) continue; list_del(&tmpl->list); mutex_unlock(&ebt_mutex); kfree(tmpl); return; } mutex_unlock(&ebt_mutex); WARN_ON_ONCE(1); } EXPORT_SYMBOL(ebt_unregister_template); static struct ebt_table *__ebt_find_table(struct net *net, const char *name) { struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); struct ebt_table *t; mutex_lock(&ebt_mutex); list_for_each_entry(t, &ebt_net->tables, list) { if (strcmp(t->name, name) == 0) { mutex_unlock(&ebt_mutex); return t; } } mutex_unlock(&ebt_mutex); return NULL; } void ebt_unregister_table_pre_exit(struct net *net, const char *name) { struct ebt_table *table = __ebt_find_table(net, name); if (table) nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks)); } EXPORT_SYMBOL(ebt_unregister_table_pre_exit); void ebt_unregister_table(struct net *net, const char *name) { struct ebt_table *table = __ebt_find_table(net, name); if (table) __ebt_unregister_table(net, table); } /* userspace just supplied us with counters */ static int do_update_counters(struct net *net, const char *name, struct ebt_counter __user *counters, unsigned int num_counters, unsigned int len) { int i, ret; struct ebt_counter *tmp; struct ebt_table *t; if (num_counters == 0) return -EINVAL; tmp = vmalloc(array_size(num_counters, sizeof(*tmp))); if (!tmp) return -ENOMEM; t = find_table_lock(net, name, &ret, &ebt_mutex); if (!t) goto free_tmp; if (num_counters != t->private->nentries) { ret = -EINVAL; goto unlock_mutex; } if (copy_from_user(tmp, counters, array_size(num_counters, sizeof(*counters)))) { ret = -EFAULT; goto unlock_mutex; } /* we want an atomic add of the counters */ write_lock_bh(&t->lock); /* we add to the counters of the first cpu */ for (i = 0; i < num_counters; i++) ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt); write_unlock_bh(&t->lock); ret = 0; unlock_mutex: mutex_unlock(&ebt_mutex); free_tmp: vfree(tmp); return ret; } static int update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct ebt_replace hlp; if (len < sizeof(hlp)) return -EINVAL; if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) return -EINVAL; return do_update_counters(net, hlp.name, hlp.counters, hlp.num_counters, len); } static inline int ebt_obj_to_user(char __user *um, const char *_name, const char *data, int entrysize, int usersize, int datasize, u8 revision) { char name[EBT_EXTENSION_MAXNAMELEN] = {0}; /* ebtables expects 31 bytes long names but xt_match names are 29 bytes * long. Copy 29 bytes and fill remaining bytes with zeroes. */ strscpy(name, _name, sizeof(name)); if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) || put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) || put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) || xt_data_to_user(um + entrysize, data, usersize, datasize, XT_ALIGN(datasize))) return -EFAULT; return 0; } static inline int ebt_match_to_user(const struct ebt_entry_match *m, const char *base, char __user *ubase) { return ebt_obj_to_user(ubase + ((char *)m - base), m->u.match->name, m->data, sizeof(*m), m->u.match->usersize, m->match_size, m->u.match->revision); } static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w, const char *base, char __user *ubase) { return ebt_obj_to_user(ubase + ((char *)w - base), w->u.watcher->name, w->data, sizeof(*w), w->u.watcher->usersize, w->watcher_size, w->u.watcher->revision); } static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, char __user *ubase) { int ret; char __user *hlp; const struct ebt_entry_target *t; if (e->bitmask == 0) { /* special case !EBT_ENTRY_OR_ENTRIES */ if (copy_to_user(ubase + ((char *)e - base), e, sizeof(struct ebt_entries))) return -EFAULT; return 0; } if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e))) return -EFAULT; hlp = ubase + (((char *)e + e->target_offset) - base); t = ebt_get_target_c(e); ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); if (ret != 0) return ret; ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase); if (ret != 0) return ret; ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t), t->u.target->usersize, t->target_size, t->u.target->revision); if (ret != 0) return ret; return 0; } static int copy_counters_to_user(struct ebt_table *t, const struct ebt_counter *oldcounters, void __user *user, unsigned int num_counters, unsigned int nentries) { struct ebt_counter *counterstmp; int ret = 0; /* userspace might not need the counters */ if (num_counters == 0) return 0; if (num_counters != nentries) return -EINVAL; counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp))); if (!counterstmp) return -ENOMEM; write_lock_bh(&t->lock); get_counters(oldcounters, counterstmp, nentries); write_unlock_bh(&t->lock); if (copy_to_user(user, counterstmp, array_size(nentries, sizeof(struct ebt_counter)))) ret = -EFAULT; vfree(counterstmp); return ret; } /* called with ebt_mutex locked */ static int copy_everything_to_user(struct ebt_table *t, void __user *user, const int *len, int cmd) { struct ebt_replace tmp; const struct ebt_counter *oldcounters; unsigned int entries_size, nentries; int ret; char *entries; if (cmd == EBT_SO_GET_ENTRIES) { entries_size = t->private->entries_size; nentries = t->private->nentries; entries = t->private->entries; oldcounters = t->private->counters; } else { entries_size = t->table->entries_size; nentries = t->table->nentries; entries = t->table->entries; oldcounters = t->table->counters; } if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (*len != sizeof(struct ebt_replace) + entries_size + (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0)) return -EINVAL; if (tmp.nentries != nentries) return -EINVAL; if (tmp.entries_size != entries_size) return -EINVAL; ret = copy_counters_to_user(t, oldcounters, tmp.counters, tmp.num_counters, nentries); if (ret) return ret; /* set the match/watcher/target names right */ return EBT_ENTRY_ITERATE(entries, entries_size, ebt_entry_to_user, entries, tmp.entries); } #ifdef CONFIG_NETFILTER_XTABLES_COMPAT /* 32 bit-userspace compatibility definitions. */ struct compat_ebt_replace { char name[EBT_TABLE_MAXNAMELEN]; compat_uint_t valid_hooks; compat_uint_t nentries; compat_uint_t entries_size; /* start of the chains */ compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; /* nr of counters userspace expects back */ compat_uint_t num_counters; /* where the kernel will put the old counters. */ compat_uptr_t counters; compat_uptr_t entries; }; /* struct ebt_entry_match, _target and _watcher have same layout */ struct compat_ebt_entry_mwt { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; u8 revision; }; compat_uptr_t ptr; } u; compat_uint_t match_size; compat_uint_t data[] __aligned(__alignof__(struct compat_ebt_replace)); }; /* account for possible padding between match_size and ->data */ static int ebt_compat_entry_padsize(void) { BUILD_BUG_ON(sizeof(struct ebt_entry_match) < sizeof(struct compat_ebt_entry_mwt)); return (int) sizeof(struct ebt_entry_match) - sizeof(struct compat_ebt_entry_mwt); } static int ebt_compat_match_offset(const struct xt_match *match, unsigned int userlen) { /* ebt_among needs special handling. The kernel .matchsize is * set to -1 at registration time; at runtime an EBT_ALIGN()ed * value is expected. * Example: userspace sends 4500, ebt_among.c wants 4504. */ if (unlikely(match->matchsize == -1)) return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); return xt_compat_match_offset(match); } static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, unsigned int *size) { const struct xt_match *match = m->u.match; struct compat_ebt_entry_mwt __user *cm = *dstptr; int off = ebt_compat_match_offset(match, m->match_size); compat_uint_t msize = m->match_size - off; if (WARN_ON(off >= m->match_size)) return -EINVAL; if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) || put_user(match->revision, &cm->u.revision) || put_user(msize, &cm->match_size)) return -EFAULT; if (match->compat_to_user) { if (match->compat_to_user(cm->data, m->data)) return -EFAULT; } else { if (xt_data_to_user(cm->data, m->data, match->usersize, msize, COMPAT_XT_ALIGN(msize))) return -EFAULT; } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; *dstptr += msize; return 0; } static int compat_target_to_user(struct ebt_entry_target *t, void __user **dstptr, unsigned int *size) { const struct xt_target *target = t->u.target; struct compat_ebt_entry_mwt __user *cm = *dstptr; int off = xt_compat_target_offset(target); compat_uint_t tsize = t->target_size - off; if (WARN_ON(off >= t->target_size)) return -EINVAL; if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) || put_user(target->revision, &cm->u.revision) || put_user(tsize, &cm->match_size)) return -EFAULT; if (target->compat_to_user) { if (target->compat_to_user(cm->data, t->data)) return -EFAULT; } else { if (xt_data_to_user(cm->data, t->data, target->usersize, tsize, COMPAT_XT_ALIGN(tsize))) return -EFAULT; } *size -= ebt_compat_entry_padsize() + off; *dstptr = cm->data; *dstptr += tsize; return 0; } static int compat_watcher_to_user(struct ebt_entry_watcher *w, void __user **dstptr, unsigned int *size) { return compat_target_to_user((struct ebt_entry_target *)w, dstptr, size); } static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, unsigned int *size) { struct ebt_entry_target *t; struct ebt_entry __user *ce; u32 watchers_offset, target_offset, next_offset; compat_uint_t origsize; int ret; if (e->bitmask == 0) { if (*size < sizeof(struct ebt_entries)) return -EINVAL; if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) return -EFAULT; *dstptr += sizeof(struct ebt_entries); *size -= sizeof(struct ebt_entries); return 0; } if (*size < sizeof(*ce)) return -EINVAL; ce = *dstptr; if (copy_to_user(ce, e, sizeof(*ce))) return -EFAULT; origsize = *size; *dstptr += sizeof(*ce); ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); if (ret) return ret; watchers_offset = e->watchers_offset - (origsize - *size); ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); if (ret) return ret; target_offset = e->target_offset - (origsize - *size); t = ebt_get_target(e); ret = compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(watchers_offset, &ce->watchers_offset) || put_user(target_offset, &ce->target_offset) || put_user(next_offset, &ce->next_offset)) return -EFAULT; *size -= sizeof(*ce); return 0; } static int compat_calc_match(struct ebt_entry_match *m, int *off) { *off += ebt_compat_match_offset(m->u.match, m->match_size); *off += ebt_compat_entry_padsize(); return 0; } static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) { *off += xt_compat_target_offset(w->u.watcher); *off += ebt_compat_entry_padsize(); return 0; } static int compat_calc_entry(const struct ebt_entry *e, const struct ebt_table_info *info, const void *base, struct compat_ebt_replace *newinfo) { const struct ebt_entry_target *t; unsigned int entry_offset; int off, ret, i; if (e->bitmask == 0) return 0; off = 0; entry_offset = (void *)e - base; EBT_MATCH_ITERATE(e, compat_calc_match, &off); EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); t = ebt_get_target_c(e); off += xt_compat_target_offset(t->u.target); off += ebt_compat_entry_padsize(); newinfo->entries_size -= off; ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_BR_NUMHOOKS; i++) { const void *hookptr = info->hook_entry[i]; if (info->hook_entry[i] && (e < (struct ebt_entry *)(base - hookptr))) { newinfo->hook_entry[i] -= off; pr_debug("0x%08X -> 0x%08X\n", newinfo->hook_entry[i] + off, newinfo->hook_entry[i]); } } return 0; } static int ebt_compat_init_offsets(unsigned int number) { if (number > INT_MAX) return -EINVAL; /* also count the base chain policies */ number += NF_BR_NUMHOOKS; return xt_compat_init_offsets(NFPROTO_BRIDGE, number); } static int compat_table_info(const struct ebt_table_info *info, struct compat_ebt_replace *newinfo) { unsigned int size = info->entries_size; const void *entries = info->entries; int ret; newinfo->entries_size = size; ret = ebt_compat_init_offsets(info->nentries); if (ret) return ret; return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); } static int compat_copy_everything_to_user(struct ebt_table *t, void __user *user, int *len, int cmd) { struct compat_ebt_replace repl, tmp; struct ebt_counter *oldcounters; struct ebt_table_info tinfo; int ret; void __user *pos; memset(&tinfo, 0, sizeof(tinfo)); if (cmd == EBT_SO_GET_ENTRIES) { tinfo.entries_size = t->private->entries_size; tinfo.nentries = t->private->nentries; tinfo.entries = t->private->entries; oldcounters = t->private->counters; } else { tinfo.entries_size = t->table->entries_size; tinfo.nentries = t->table->nentries; tinfo.entries = t->table->entries; oldcounters = t->table->counters; } if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; if (tmp.nentries != tinfo.nentries || (tmp.num_counters && tmp.num_counters != tinfo.nentries)) return -EINVAL; memcpy(&repl, &tmp, sizeof(repl)); if (cmd == EBT_SO_GET_ENTRIES) ret = compat_table_info(t->private, &repl); else ret = compat_table_info(&tinfo, &repl); if (ret) return ret; if (*len != sizeof(tmp) + repl.entries_size + (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", *len, tinfo.entries_size, repl.entries_size); return -EINVAL; } /* userspace might not need the counters */ ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), tmp.num_counters, tinfo.nentries); if (ret) return ret; pos = compat_ptr(tmp.entries); return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, compat_copy_entry_to_user, &pos, &tmp.entries_size); } struct ebt_entries_buf_state { char *buf_kern_start; /* kernel buffer to copy (translated) data to */ u32 buf_kern_len; /* total size of kernel buffer */ u32 buf_kern_offset; /* amount of data copied so far */ u32 buf_user_offset; /* read position in userspace buffer */ }; static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) { state->buf_kern_offset += sz; return state->buf_kern_offset >= sz ? 0 : -EINVAL; } static int ebt_buf_add(struct ebt_entries_buf_state *state, const void *data, unsigned int sz) { if (state->buf_kern_start == NULL) goto count_only; if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) return -EINVAL; memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); count_only: state->buf_user_offset += sz; return ebt_buf_count(state, sz); } static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) { char *b = state->buf_kern_start; if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) return -EINVAL; if (b != NULL && sz > 0) memset(b + state->buf_kern_offset, 0, sz); /* do not adjust ->buf_user_offset here, we added kernel-side padding */ return ebt_buf_count(state, sz); } enum compat_mwt { EBT_COMPAT_MATCH, EBT_COMPAT_WATCHER, EBT_COMPAT_TARGET, }; static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, enum compat_mwt compat_mwt, struct ebt_entries_buf_state *state, const unsigned char *base) { char name[EBT_EXTENSION_MAXNAMELEN]; struct xt_match *match; struct xt_target *wt; void *dst = NULL; int off, pad = 0; unsigned int size_kern, match_size = mwt->match_size; if (strscpy(name, mwt->u.name, sizeof(name)) < 0) return -EINVAL; if (state->buf_kern_start) dst = state->buf_kern_start + state->buf_kern_offset; switch (compat_mwt) { case EBT_COMPAT_MATCH: match = xt_request_find_match(NFPROTO_BRIDGE, name, mwt->u.revision); if (IS_ERR(match)) return PTR_ERR(match); off = ebt_compat_match_offset(match, match_size); if (dst) { if (match->compat_from_user) match->compat_from_user(dst, mwt->data); else memcpy(dst, mwt->data, match_size); } size_kern = match->matchsize; if (unlikely(size_kern == -1)) size_kern = match_size; module_put(match->me); break; case EBT_COMPAT_WATCHER: case EBT_COMPAT_TARGET: wt = xt_request_find_target(NFPROTO_BRIDGE, name, mwt->u.revision); if (IS_ERR(wt)) return PTR_ERR(wt); off = xt_compat_target_offset(wt); if (dst) { if (wt->compat_from_user) wt->compat_from_user(dst, mwt->data); else memcpy(dst, mwt->data, match_size); } size_kern = wt->targetsize; module_put(wt->me); break; default: return -EINVAL; } state->buf_kern_offset += match_size + off; state->buf_user_offset += match_size; pad = XT_ALIGN(size_kern) - size_kern; if (pad > 0 && dst) { if (WARN_ON(state->buf_kern_len <= pad)) return -EINVAL; if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) return -EINVAL; memset(dst + size_kern, 0, pad); } return off + match_size; } /* return size of all matches, watchers or target, including necessary * alignment and padding. */ static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32, unsigned int size_left, enum compat_mwt type, struct ebt_entries_buf_state *state, const void *base) { const char *buf = (const char *)match32; int growth = 0; if (size_left == 0) return 0; do { struct ebt_entry_match *match_kern; int ret; if (size_left < sizeof(*match32)) return -EINVAL; match_kern = (struct ebt_entry_match *) state->buf_kern_start; if (match_kern) { char *tmp; tmp = state->buf_kern_start + state->buf_kern_offset; match_kern = (struct ebt_entry_match *) tmp; } ret = ebt_buf_add(state, buf, sizeof(*match32)); if (ret < 0) return ret; size_left -= sizeof(*match32); /* add padding before match->data (if any) */ ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); if (ret < 0) return ret; if (match32->match_size > size_left) return -EINVAL; size_left -= match32->match_size; ret = compat_mtw_from_user(match32, type, state, base); if (ret < 0) return ret; if (WARN_ON(ret < match32->match_size)) return -EINVAL; growth += ret - match32->match_size; growth += ebt_compat_entry_padsize(); buf += sizeof(*match32); buf += match32->match_size; if (match_kern) match_kern->match_size = ret; match32 = (struct compat_ebt_entry_mwt *) buf; } while (size_left); return growth; } /* called for all ebt_entry structures. */ static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base, unsigned int *total, struct ebt_entries_buf_state *state) { unsigned int i, j, startoff, next_expected_off, new_offset = 0; /* stores match/watchers/targets & offset of next struct ebt_entry: */ unsigned int offsets[4]; unsigned int *offsets_update = NULL; int ret; char *buf_start; if (*total < sizeof(struct ebt_entries)) return -EINVAL; if (!entry->bitmask) { *total -= sizeof(struct ebt_entries); return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); } if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) return -EINVAL; startoff = state->buf_user_offset; /* pull in most part of ebt_entry, it does not need to be changed. */ ret = ebt_buf_add(state, entry, offsetof(struct ebt_entry, watchers_offset)); if (ret < 0) return ret; offsets[0] = sizeof(struct ebt_entry); /* matches come first */ memcpy(&offsets[1], &entry->offsets, sizeof(entry->offsets)); if (state->buf_kern_start) { buf_start = state->buf_kern_start + state->buf_kern_offset; offsets_update = (unsigned int *) buf_start; } ret = ebt_buf_add(state, &offsets[1], sizeof(offsets) - sizeof(offsets[0])); if (ret < 0) return ret; buf_start = (char *) entry; /* 0: matches offset, always follows ebt_entry. * 1: watchers offset, from ebt_entry structure * 2: target offset, from ebt_entry structure * 3: next ebt_entry offset, from ebt_entry structure * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ for (i = 0; i < 4 ; ++i) { if (offsets[i] > *total) return -EINVAL; if (i < 3 && offsets[i] == *total) return -EINVAL; if (i == 0) continue; if (offsets[i-1] > offsets[i]) return -EINVAL; } for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; char *buf = buf_start + offsets[i]; if (offsets[i] > offsets[j]) return -EINVAL; match32 = (struct compat_ebt_entry_mwt *) buf; size = offsets[j] - offsets[i]; ret = ebt_size_mwt(match32, size, i, state, base); if (ret < 0) return ret; new_offset += ret; if (offsets_update && new_offset) { pr_debug("change offset %d to %d\n", offsets_update[i], offsets[j] + new_offset); offsets_update[i] = offsets[j] + new_offset; } } if (state->buf_kern_start == NULL) { unsigned int offset = buf_start - (char *) base; ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); if (ret < 0) return ret; } next_expected_off = state->buf_user_offset - startoff; if (next_expected_off != entry->next_offset) return -EINVAL; if (*total < entry->next_offset) return -EINVAL; *total -= entry->next_offset; return 0; } /* repl->entries_size is the size of the ebt_entry blob in userspace. * It might need more memory when copied to a 64 bit kernel in case * userspace is 32-bit. So, first task: find out how much memory is needed. * * Called before validation is performed. */ static int compat_copy_entries(unsigned char *data, unsigned int size_user, struct ebt_entries_buf_state *state) { unsigned int size_remaining = size_user; int ret; ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, &size_remaining, state); if (ret < 0) return ret; if (size_remaining) return -EINVAL; return state->buf_kern_offset; } static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, sockptr_t arg, unsigned int len) { struct compat_ebt_replace tmp; int i; if (len < sizeof(tmp)) return -EINVAL; if (copy_from_sockptr(&tmp, arg, sizeof(tmp))) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) return -EINVAL; if (tmp.entries_size == 0) return -EINVAL; if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); /* starting with hook_entry, 32 vs. 64 bit structures are different */ for (i = 0; i < NF_BR_NUMHOOKS; i++) repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); repl->num_counters = tmp.num_counters; repl->counters = compat_ptr(tmp.counters); repl->entries = compat_ptr(tmp.entries); return 0; } static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret, i, countersize, size64; struct ebt_table_info *newinfo; struct ebt_replace tmp; struct ebt_entries_buf_state state; void *entries_tmp; ret = compat_copy_ebt_replace_from_user(&tmp, arg, len); if (ret) { /* try real handler in case userland supplied needed padding */ if (ret == -EINVAL && do_replace(net, arg, len) == 0) ret = 0; return ret; } countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) return -ENOMEM; if (countersize) memset(newinfo->counters, 0, countersize); memset(&state, 0, sizeof(state)); newinfo->entries = vmalloc(tmp.entries_size); if (!newinfo->entries) { ret = -ENOMEM; goto free_newinfo; } if (copy_from_user( newinfo->entries, tmp.entries, tmp.entries_size) != 0) { ret = -EFAULT; goto free_entries; } entries_tmp = newinfo->entries; xt_compat_lock(NFPROTO_BRIDGE); ret = ebt_compat_init_offsets(tmp.nentries); if (ret < 0) goto out_unlock; ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (ret < 0) goto out_unlock; pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); size64 = ret; newinfo->entries = vmalloc(size64); if (!newinfo->entries) { vfree(entries_tmp); ret = -ENOMEM; goto out_unlock; } memset(&state, 0, sizeof(state)); state.buf_kern_start = newinfo->entries; state.buf_kern_len = size64; ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); if (WARN_ON(ret < 0)) { vfree(entries_tmp); goto out_unlock; } vfree(entries_tmp); tmp.entries_size = size64; for (i = 0; i < NF_BR_NUMHOOKS; i++) { char __user *usrptr; if (tmp.hook_entry[i]) { unsigned int delta; usrptr = (char __user *) tmp.hook_entry[i]; delta = usrptr - tmp.entries; usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; } } xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); ret = do_replace_finish(net, &tmp, newinfo); if (ret == 0) return ret; free_entries: vfree(newinfo->entries); free_newinfo: vfree(newinfo); return ret; out_unlock: xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); goto free_entries; } static int compat_update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct compat_ebt_replace hlp; if (len < sizeof(hlp)) return -EINVAL; if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; /* try real handler in case userland supplied needed padding */ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) return update_counters(net, arg, len); return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), hlp.num_counters, len); } static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; struct compat_ebt_replace tmp; struct ebt_table *t; struct net *net = sock_net(sk); if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(struct compat_ebt_replace)) return -EINVAL; if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; tmp.name[sizeof(tmp.name) - 1] = '\0'; t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); if (!t) return ret; xt_compat_lock(NFPROTO_BRIDGE); switch (cmd) { case EBT_SO_GET_INFO: tmp.nentries = t->private->nentries; ret = compat_table_info(t->private, &tmp); if (ret) goto out; tmp.valid_hooks = t->valid_hooks; if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_INIT_INFO: tmp.nentries = t->table->nentries; tmp.entries_size = t->table->entries_size; tmp.valid_hooks = t->table->valid_hooks; if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_ENTRIES: case EBT_SO_GET_INIT_ENTRIES: /* try real handler first in case of userland-side padding. * in case we are dealing with an 'ordinary' 32 bit binary * without 64bit compatibility padding, this will fail right * after copy_from_user when the *len argument is validated. * * the compat_ variant needs to do one pass over the kernel * data set to adjust for size differences before it the check. */ if (copy_everything_to_user(t, user, len, cmd) == 0) ret = 0; else ret = compat_copy_everything_to_user(t, user, len, cmd); break; default: ret = -EINVAL; } out: xt_compat_flush_offsets(NFPROTO_BRIDGE); xt_compat_unlock(NFPROTO_BRIDGE); mutex_unlock(&ebt_mutex); return ret; } #endif static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { struct net *net = sock_net(sk); struct ebt_replace tmp; struct ebt_table *t; int ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; #ifdef CONFIG_NETFILTER_XTABLES_COMPAT /* try real handler in case userland supplied needed padding */ if (in_compat_syscall() && ((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) || *len != sizeof(tmp))) return compat_do_ebt_get_ctl(sk, cmd, user, len); #endif if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; tmp.name[sizeof(tmp.name) - 1] = '\0'; t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); if (!t) return ret; switch (cmd) { case EBT_SO_GET_INFO: case EBT_SO_GET_INIT_INFO: if (*len != sizeof(struct ebt_replace)) { ret = -EINVAL; mutex_unlock(&ebt_mutex); break; } if (cmd == EBT_SO_GET_INFO) { tmp.nentries = t->private->nentries; tmp.entries_size = t->private->entries_size; tmp.valid_hooks = t->valid_hooks; } else { tmp.nentries = t->table->nentries; tmp.entries_size = t->table->entries_size; tmp.valid_hooks = t->table->valid_hooks; } mutex_unlock(&ebt_mutex); if (copy_to_user(user, &tmp, *len) != 0) { ret = -EFAULT; break; } ret = 0; break; case EBT_SO_GET_ENTRIES: case EBT_SO_GET_INIT_ENTRIES: ret = copy_everything_to_user(t, user, len, cmd); mutex_unlock(&ebt_mutex); break; default: mutex_unlock(&ebt_mutex); ret = -EINVAL; } return ret; } static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { struct net *net = sock_net(sk); int ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case EBT_SO_SET_ENTRIES: #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) ret = compat_do_replace(net, arg, len); else #endif ret = do_replace(net, arg, len); break; case EBT_SO_SET_COUNTERS: #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) ret = compat_update_counters(net, arg, len); else #endif ret = update_counters(net, arg, len); break; default: ret = -EINVAL; } return ret; } static struct nf_sockopt_ops ebt_sockopts = { .pf = PF_INET, .set_optmin = EBT_BASE_CTL, .set_optmax = EBT_SO_SET_MAX + 1, .set = do_ebt_set_ctl, .get_optmin = EBT_BASE_CTL, .get_optmax = EBT_SO_GET_MAX + 1, .get = do_ebt_get_ctl, .owner = THIS_MODULE, }; static int __net_init ebt_pernet_init(struct net *net) { struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); INIT_LIST_HEAD(&ebt_net->tables); return 0; } static struct pernet_operations ebt_net_ops = { .init = ebt_pernet_init, .id = &ebt_pernet_id, .size = sizeof(struct ebt_pernet), }; static int __init ebtables_init(void) { int ret; ret = xt_register_target(&ebt_standard_target); if (ret < 0) return ret; ret = nf_register_sockopt(&ebt_sockopts); if (ret < 0) { xt_unregister_target(&ebt_standard_target); return ret; } ret = register_pernet_subsys(&ebt_net_ops); if (ret < 0) { nf_unregister_sockopt(&ebt_sockopts); xt_unregister_target(&ebt_standard_target); return ret; } return 0; } static void ebtables_fini(void) { nf_unregister_sockopt(&ebt_sockopts); xt_unregister_target(&ebt_standard_target); unregister_pernet_subsys(&ebt_net_ops); } EXPORT_SYMBOL(ebt_register_table); EXPORT_SYMBOL(ebt_unregister_table); EXPORT_SYMBOL(ebt_do_table); module_init(ebtables_init); module_exit(ebtables_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ebtables legacy core"); |
16 15 15 4 4 4 1 3 1 2 1 1 11 10 10 5 5 5 5 2 5 3 1 1 2 3 3 1 3 5 16 7 7 7 7 16 4 4 4 4 4 4 4 4 4 4 8 2 2 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 | /* Key type used to cache DNS lookups made by the kernel * * See Documentation/networking/dns_resolver.rst * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/keyctl.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/dns_resolver.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); unsigned int dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, 0644); MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); const struct cred *dns_resolver_cache; #define DNS_ERRORNO_OPTION "dnserror" /* * Preparse instantiation data for a dns_resolver key. * * For normal hostname lookups, the data must be a NUL-terminated string, with * the NUL char accounted in datalen. * * If the data contains a '#' characters, then we take the clause after each * one to be an option of the form 'key=value'. The actual data of interest is * the string leading up to the first '#'. For instance: * * "ip1,ip2,...#foo=bar" * * For server list requests, the data must begin with a NUL char and be * followed by a byte indicating the version of the data format. Version 1 * looks something like (note this is packed): * * u8 Non-string marker (ie. 0) * u8 Content (DNS_PAYLOAD_IS_*) * u8 Version (e.g. 1) * u8 Source of server list * u8 Lookup status of server list * u8 Number of servers * foreach-server { * __le16 Name length * __le16 Priority (as per SRV record, low first) * __le16 Weight (as per SRV record, higher first) * __le16 Port * u8 Source of address list * u8 Lookup status of address list * u8 Protocol (DNS_SERVER_PROTOCOL_*) * u8 Number of addresses * char[] Name (not NUL-terminated) * foreach-address { * u8 Family (DNS_ADDRESS_IS_*) * union { * u8[4] ipv4_addr * u8[16] ipv6_addr * } * } * } * */ static int dns_resolver_preparse(struct key_preparsed_payload *prep) { struct user_key_payload *upayload; unsigned long derrno; int ret; int datalen = prep->datalen, result_len = 0; const char *data = prep->data, *end, *opt; if (datalen <= 1 || !data) return -EINVAL; if (data[0] == 0) { const struct dns_server_list_v1_header *v1; /* It may be a server list. */ if (datalen < sizeof(*v1)) return -EINVAL; v1 = (const struct dns_server_list_v1_header *)data; kenter("[%u,%u],%u", v1->hdr.content, v1->hdr.version, datalen); if (v1->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST) { pr_warn_ratelimited( "dns_resolver: Unsupported content type (%u)\n", v1->hdr.content); return -EINVAL; } if (v1->hdr.version != 1) { pr_warn_ratelimited( "dns_resolver: Unsupported server list version (%u)\n", v1->hdr.version); return -EINVAL; } if ((v1->status != DNS_LOOKUP_GOOD && v1->status != DNS_LOOKUP_GOOD_WITH_BAD)) { if (prep->expiry == TIME64_MAX) prep->expiry = ktime_get_real_seconds() + 1; } result_len = datalen; goto store_result; } kenter("'%*.*s',%u", datalen, datalen, data, datalen); if (!data || data[datalen - 1] != '\0') return -EINVAL; datalen--; /* deal with any options embedded in the data */ end = data + datalen; opt = memchr(data, '#', datalen); if (!opt) { /* no options: the entire data is the result */ kdebug("no options"); result_len = datalen; } else { const char *next_opt; result_len = opt - data; opt++; kdebug("options: '%s'", opt); do { int opt_len, opt_nlen; const char *eq; char optval[128]; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; if (opt_len <= 0 || opt_len > sizeof(optval)) { pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", opt_len); return -EINVAL; } eq = memchr(opt, '=', opt_len); if (eq) { opt_nlen = eq - opt; eq++; memcpy(optval, eq, next_opt - eq); optval[next_opt - eq] = '\0'; } else { opt_nlen = opt_len; optval[0] = '\0'; } kdebug("option '%*.*s' val '%s'", opt_nlen, opt_nlen, opt, optval); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); ret = kstrtoul(optval, 10, &derrno); if (ret < 0) goto bad_option_value; if (derrno < 1 || derrno > 511) goto bad_option_value; kdebug("dns error no. = %lu", derrno); prep->payload.data[dns_key_error] = ERR_PTR(-derrno); continue; } bad_option_value: pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n", opt_nlen, opt_nlen, opt); return -EINVAL; } while (opt = next_opt + 1, opt < end); } /* don't cache the result if we're caching an error saying there's no * result */ if (prep->payload.data[dns_key_error]) { kleave(" = 0 [h_error %ld]", PTR_ERR(prep->payload.data[dns_key_error])); return 0; } store_result: kdebug("store result"); prep->quotalen = result_len; upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL); if (!upayload) { kleave(" = -ENOMEM"); return -ENOMEM; } upayload->datalen = result_len; memcpy(upayload->data, data, result_len); upayload->data[result_len] = '\0'; prep->payload.data[dns_key_data] = upayload; kleave(" = 0"); return 0; } /* * Clean up the preparse data */ static void dns_resolver_free_preparse(struct key_preparsed_payload *prep) { pr_devel("==>%s()\n", __func__); kfree(prep->payload.data[dns_key_data]); } /* * The description is of the form "[<type>:]<domain_name>" * * The domain name may be a simple name or an absolute domain name (which * should end with a period). The domain name is case-independent. */ static bool dns_resolver_cmp(const struct key *key, const struct key_match_data *match_data) { int slen, dlen, ret = 0; const char *src = key->description, *dsp = match_data->raw_data; kenter("%s,%s", src, dsp); if (!src || !dsp) goto no_match; if (strcasecmp(src, dsp) == 0) goto matched; slen = strlen(src); dlen = strlen(dsp); if (slen <= 0 || dlen <= 0) goto no_match; if (src[slen - 1] == '.') slen--; if (dsp[dlen - 1] == '.') dlen--; if (slen != dlen || strncasecmp(src, dsp, slen) != 0) goto no_match; matched: ret = 1; no_match: kleave(" = %d", ret); return ret; } /* * Preparse the match criterion. */ static int dns_resolver_match_preparse(struct key_match_data *match_data) { match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE; match_data->cmp = dns_resolver_cmp; return 0; } /* * Describe a DNS key */ static void dns_resolver_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_positive(key)) { int err = PTR_ERR(key->payload.data[dns_key_error]); if (err) seq_printf(m, ": %d", err); else seq_printf(m, ": %u", key->datalen); } } /* * read the DNS data * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, char *buffer, size_t buflen) { int err = PTR_ERR(key->payload.data[dns_key_error]); if (err) return err; return user_read(key, buffer, buflen); } struct key_type key_type_dns_resolver = { .name = "dns_resolver", .flags = KEY_TYPE_NET_DOMAIN | KEY_TYPE_INSTANT_REAP, .preparse = dns_resolver_preparse, .free_preparse = dns_resolver_free_preparse, .instantiate = generic_key_instantiate, .match_preparse = dns_resolver_match_preparse, .revoke = user_revoke, .destroy = user_destroy, .describe = dns_resolver_describe, .read = dns_resolver_read, }; static int __init init_dns_resolver(void) { struct cred *cred; struct key *keyring; int ret; /* create an override credential set with a special thread keyring in * which DNS requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(&init_task); if (!cred) return -ENOMEM; keyring = keyring_alloc(".dns_resolver", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&key_type_dns_resolver); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; dns_resolver_cache = cred; kdebug("DNS resolver keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } static void __exit exit_dns_resolver(void) { key_revoke(dns_resolver_cache->thread_keyring); unregister_key_type(&key_type_dns_resolver); put_cred(dns_resolver_cache); } module_init(init_dns_resolver) module_exit(exit_dns_resolver) MODULE_LICENSE("GPL"); |
11 447 15 24 201 5 1 7 6 7 20 1595 2887 131 93 328 54 275 20 24 345 468 53 53 121 4 211 1 132 132 23 8 23 125 9 4 354 2 546 101 163 388 17 116 177 177 311 87 69 70 2688 95 9 753 10 30 100 5 90 3 37 82 37 260 60 9 1 1 5 92 93 91 86 86 93 86 7 7 7 7 7 7 7 7 7 7 101 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PAGEMAP_H #define _LINUX_PAGEMAP_H /* * Copyright 1995 Linus Torvalds */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/highmem.h> #include <linux/compiler.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/bitops.h> #include <linux/hardirq.h> /* for in_interrupt() */ #include <linux/hugetlb_inline.h> struct folio_batch; unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); static inline void invalidate_remote_inode(struct inode *inode) { if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) invalidate_mapping_pages(inode->i_mapping, 0, -1); } int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); int filemap_invalidate_pages(struct address_space *mapping, loff_t pos, loff_t end, bool nowait); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); int filemap_flush(struct address_space *); int filemap_fdatawait_keep_errors(struct address_space *mapping); int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte); int filemap_invalidate_inode(struct inode *inode, bool flush, loff_t start, loff_t end); static inline int filemap_fdatawait(struct address_space *mapping) { return filemap_fdatawait_range(mapping, 0, LLONG_MAX); } bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode); int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); int filemap_check_errors(struct address_space *mapping); void __filemap_set_wb_err(struct address_space *mapping, int err); int filemap_fdatawrite_wbc(struct address_space *mapping, struct writeback_control *wbc); int kiocb_write_and_wait(struct kiocb *iocb, size_t count); static inline int filemap_write_and_wait(struct address_space *mapping) { return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); } /** * filemap_set_wb_err - set a writeback error on an address_space * @mapping: mapping in which to set writeback error * @err: error to be set in mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * filemap_set_wb_err to record the error in the mapping so that it will be * automatically reported whenever fsync is called on the file. */ static inline void filemap_set_wb_err(struct address_space *mapping, int err) { /* Fastpath for common case of no error */ if (unlikely(err)) __filemap_set_wb_err(mapping, err); } /** * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * * Grab the errseq_t value from the mapping, and see if it has changed "since" * the given value was sampled. * * If it has then report the latest error set, otherwise return 0. */ static inline int filemap_check_wb_err(struct address_space *mapping, errseq_t since) { return errseq_check(&mapping->wb_err, since); } /** * filemap_sample_wb_err - sample the current errseq_t to test for later errors * @mapping: mapping to be sampled * * Writeback errors are always reported relative to a particular sample point * in the past. This function provides those sample points. */ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) { return errseq_sample(&mapping->wb_err); } /** * file_sample_sb_err - sample the current errseq_t to test for later errors * @file: file pointer to be sampled * * Grab the most current superblock-level errseq_t value for the given * struct file. */ static inline errseq_t file_sample_sb_err(struct file *file) { return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); } /* * Flush file data before changing attributes. Caller must hold any locks * required to prevent further writes to this file until we're done setting * flags. */ static inline int inode_drain_writes(struct inode *inode) { inode_dio_wait(inode); return filemap_write_and_wait(inode->i_mapping); } static inline bool mapping_empty(struct address_space *mapping) { return xa_empty(&mapping->i_pages); } /* * mapping_shrinkable - test if page cache state allows inode reclaim * @mapping: the page cache mapping * * This checks the mapping's cache state for the pupose of inode * reclaim and LRU management. * * The caller is expected to hold the i_lock, but is not required to * hold the i_pages lock, which usually protects cache state. That's * because the i_lock and the list_lru lock that protect the inode and * its LRU state don't nest inside the irq-safe i_pages lock. * * Cache deletions are performed under the i_lock, which ensures that * when an inode goes empty, it will reliably get queued on the LRU. * * Cache additions do not acquire the i_lock and may race with this * check, in which case we'll report the inode as shrinkable when it * has cache pages. This is okay: the shrinker also checks the * refcount and the referenced bit, which will be elevated or set in * the process of adding new cache pages to an inode. */ static inline bool mapping_shrinkable(struct address_space *mapping) { void *head; /* * On highmem systems, there could be lowmem pressure from the * inodes before there is highmem pressure from the page * cache. Make inodes shrinkable regardless of cache state. */ if (IS_ENABLED(CONFIG_HIGHMEM)) return true; /* Cache completely empty? Shrink away. */ head = rcu_access_pointer(mapping->i_pages.xa_head); if (!head) return true; /* * The xarray stores single offset-0 entries directly in the * head pointer, which allows non-resident page cache entries * to escape the shadow shrinker's list of xarray nodes. The * inode shrinker needs to pick them up under memory pressure. */ if (!xa_is_node(head) && xa_is_value(head)) return true; return false; } /* * Bits in mapping->flags. */ enum mapping_flags { AS_EIO = 0, /* IO error on async write */ AS_ENOSPC = 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying folio contents */ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */ /* Bits 16-25 are used for FOLIO_ORDER */ AS_FOLIO_ORDER_BITS = 5, AS_FOLIO_ORDER_MIN = 16, AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS, }; #define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1) #define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN) #define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX) #define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK) /** * mapping_set_error - record a writeback error in the address_space * @mapping: the mapping in which an error should be set * @error: the error to set in the mapping * * When writeback fails in some way, we must record that error so that * userspace can be informed when fsync and the like are called. We endeavor * to report errors on any file that was open at the time of the error. Some * internal callers also need to know when writeback errors have occurred. * * When a writeback error occurs, most filesystems will want to call * mapping_set_error to record the error in the mapping so that it can be * reported when the application calls fsync(2). */ static inline void mapping_set_error(struct address_space *mapping, int error) { if (likely(!error)) return; /* Record in wb_err for checkers using errseq_t based tracking */ __filemap_set_wb_err(mapping, error); /* Record it in superblock */ if (mapping->host) errseq_set(&mapping->host->i_sb->s_wb_err, error); /* Record it in flags for now, for legacy callers */ if (error == -ENOSPC) set_bit(AS_ENOSPC, &mapping->flags); else set_bit(AS_EIO, &mapping->flags); } static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_clear_unevictable(struct address_space *mapping) { clear_bit(AS_UNEVICTABLE, &mapping->flags); } static inline bool mapping_unevictable(struct address_space *mapping) { return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); } static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); } static inline int mapping_exiting(struct address_space *mapping) { return test_bit(AS_EXITING, &mapping->flags); } static inline void mapping_set_no_writeback_tags(struct address_space *mapping) { set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline int mapping_use_writeback_tags(struct address_space *mapping) { return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); } static inline bool mapping_release_always(const struct address_space *mapping) { return test_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline void mapping_set_release_always(struct address_space *mapping) { set_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline void mapping_clear_release_always(struct address_space *mapping) { clear_bit(AS_RELEASE_ALWAYS, &mapping->flags); } static inline bool mapping_stable_writes(const struct address_space *mapping) { return test_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_set_stable_writes(struct address_space *mapping) { set_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_clear_stable_writes(struct address_space *mapping) { clear_bit(AS_STABLE_WRITES, &mapping->flags); } static inline void mapping_set_inaccessible(struct address_space *mapping) { /* * It's expected inaccessible mappings are also unevictable. Compaction * migrate scanner (isolate_migratepages_block()) relies on this to * reduce page locking. */ set_bit(AS_UNEVICTABLE, &mapping->flags); set_bit(AS_INACCESSIBLE, &mapping->flags); } static inline bool mapping_inaccessible(struct address_space *mapping) { return test_bit(AS_INACCESSIBLE, &mapping->flags); } static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; } /* Restricts the given gfp_mask to what the mapping allows. */ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, gfp_t gfp_mask) { return mapping_gfp_mask(mapping) & gfp_mask; } /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { m->gfp_mask = mask; } /* * There are some parts of the kernel which assume that PMD entries * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then, * limit the maximum allocation order to PMD size. I'm not aware of any * assumptions about maximum order if THP are disabled, but 8 seems like * a good order (that's 1MB if you're using 4kB pages) */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER #else #define PREFERRED_MAX_PAGECACHE_ORDER 8 #endif /* * xas_split_alloc() does not support arbitrary orders. This implies no * 512MB THP on ARM64 with 64KB base page size. */ #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) /* * mapping_max_folio_size_supported() - Check the max folio size supported * * The filesystem should call this function at mount time if there is a * requirement on the folio mapping size in the page cache. */ static inline size_t mapping_max_folio_size_supported(void) { if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER); return PAGE_SIZE; } /* * mapping_set_folio_order_range() - Set the orders supported by a file. * @mapping: The address space of the file. * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive). * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive). * * The filesystem should call this function in its inode constructor to * indicate which base size (min) and maximum size (max) of folio the VFS * can use to cache the contents of the file. This should only be used * if the filesystem needs special handling of folio sizes (ie there is * something the core cannot know). * Do not tune it based on, eg, i_size. * * Context: This should not be called while the inode is active as it * is non-atomic. */ static inline void mapping_set_folio_order_range(struct address_space *mapping, unsigned int min, unsigned int max) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return; if (min > MAX_PAGECACHE_ORDER) min = MAX_PAGECACHE_ORDER; if (max > MAX_PAGECACHE_ORDER) max = MAX_PAGECACHE_ORDER; if (max < min) max = min; mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX); } static inline void mapping_set_folio_min_order(struct address_space *mapping, unsigned int min) { mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER); } /** * mapping_set_large_folios() - Indicate the file supports large folios. * @mapping: The address space of the file. * * The filesystem should call this function in its inode constructor to * indicate that the VFS can use large folios to cache the contents of * the file. * * Context: This should not be called while the inode is active as it * is non-atomic. */ static inline void mapping_set_large_folios(struct address_space *mapping) { mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER); } static inline unsigned int mapping_max_folio_order(const struct address_space *mapping) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 0; return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX; } static inline unsigned int mapping_min_folio_order(const struct address_space *mapping) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return 0; return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN; } static inline unsigned long mapping_min_folio_nrpages(struct address_space *mapping) { return 1UL << mapping_min_folio_order(mapping); } /** * mapping_align_index() - Align index for this mapping. * @mapping: The address_space. * @index: The page index. * * The index of a folio must be naturally aligned. If you are adding a * new folio to the page cache and need to know what index to give it, * call this function. */ static inline pgoff_t mapping_align_index(struct address_space *mapping, pgoff_t index) { return round_down(index, mapping_min_folio_nrpages(mapping)); } /* * Large folio support currently depends on THP. These dependencies are * being worked on but are not yet fixed. */ static inline bool mapping_large_folio_support(struct address_space *mapping) { /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, "Anonymous mapping always supports large folio"); return mapping_max_folio_order(mapping) > 0; } /* Return the maximum folio size for this pagecache mapping, in bytes. */ static inline size_t mapping_max_folio_size(const struct address_space *mapping) { return PAGE_SIZE << mapping_max_folio_order(mapping); } static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS return atomic_read(&mapping->nr_thps); #else return 0; #endif } static inline void filemap_nr_thps_inc(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } static inline void filemap_nr_thps_dec(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); /** * folio_file_mapping - Find the mapping this folio belongs to. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Folios in the swap cache return the mapping of the * swap file or swap device where the data is stored. This is different * from the mapping returned by folio_mapping(). The only reason to * use it is if, like NFS, you return 0 from ->activate_swapfile. * * Do not call this for folios which aren't in the page cache or swap cache. */ static inline struct address_space *folio_file_mapping(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return swapcache_mapping(folio); return folio->mapping; } /** * folio_flush_mapping - Find the file mapping this folio belongs to. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Anonymous folios return NULL, even if they're in * the swap cache. Other kinds of folio also return NULL. * * This is ONLY used by architecture cache flushing code. If you aren't * writing cache flushing code, you want either folio_mapping() or * folio_file_mapping(). */ static inline struct address_space *folio_flush_mapping(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return NULL; return folio_mapping(folio); } static inline struct address_space *page_file_mapping(struct page *page) { return folio_file_mapping(page_folio(page)); } /** * folio_inode - Get the host inode for this folio. * @folio: The folio. * * For folios which are in the page cache, return the inode that this folio * belongs to. * * Do not call this for folios which aren't in the page cache. */ static inline struct inode *folio_inode(struct folio *folio) { return folio->mapping->host; } /** * folio_attach_private - Attach private data to a folio. * @folio: Folio to attach data to. * @data: Data to attach to folio. * * Attaching private data to a folio increments the page's reference count. * The data must be detached before the folio will be freed. */ static inline void folio_attach_private(struct folio *folio, void *data) { folio_get(folio); folio->private = data; folio_set_private(folio); } /** * folio_change_private - Change private data on a folio. * @folio: Folio to change the data on. * @data: Data to set on the folio. * * Change the private data attached to a folio and return the old * data. The page must previously have had data attached and the data * must be detached before the folio will be freed. * * Return: Data that was previously attached to the folio. */ static inline void *folio_change_private(struct folio *folio, void *data) { void *old = folio_get_private(folio); folio->private = data; return old; } /** * folio_detach_private - Detach private data from a folio. * @folio: Folio to detach data from. * * Removes the data that was previously attached to the folio and decrements * the refcount on the page. * * Return: Data that was attached to the folio. */ static inline void *folio_detach_private(struct folio *folio) { void *data = folio_get_private(folio); if (!folio_test_private(folio)) return NULL; folio_clear_private(folio); folio->private = NULL; folio_put(folio); return data; } static inline void attach_page_private(struct page *page, void *data) { folio_attach_private(page_folio(page), data); } static inline void *detach_page_private(struct page *page) { return folio_detach_private(page_folio(page)); } #ifdef CONFIG_NUMA struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); #else static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { return folio_alloc_noprof(gfp, order); } #endif #define filemap_alloc_folio(...) \ alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) static inline struct page *__page_cache_alloc(gfp_t gfp) { return &filemap_alloc_folio(gfp, 0)->page; } static inline gfp_t readahead_gfp_mask(struct address_space *x) { return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(struct file *, struct folio *); pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan); /** * typedef fgf_t - Flags for getting folios from the page cache. * * Most users of the page cache will not need to use these flags; * there are convenience functions such as filemap_get_folio() and * filemap_lock_folio(). For users which need more control over exactly * what is done with the folios, these flags to __filemap_get_folio() * are available. * * * %FGP_ACCESSED - The folio will be marked accessed. * * %FGP_LOCK - The folio is returned locked. * * %FGP_CREAT - If no folio is present then a new folio is allocated, * added to the page cache and the VM's LRU list. The folio is * returned locked. * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the * folio is already in cache. If the folio was allocated, unlock it * before returning so the caller can do the same dance. * * %FGP_WRITE - The folio will be written to by the caller. * * %FGP_NOFS - __GFP_FS will get cleared in gfp. * * %FGP_NOWAIT - Don't block on the folio lock. * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) * * %FGP_DONTCACHE - Uncached buffered IO * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() * implementation. */ typedef unsigned int __bitwise fgf_t; #define FGP_ACCESSED ((__force fgf_t)0x00000001) #define FGP_LOCK ((__force fgf_t)0x00000002) #define FGP_CREAT ((__force fgf_t)0x00000004) #define FGP_WRITE ((__force fgf_t)0x00000008) #define FGP_NOFS ((__force fgf_t)0x00000010) #define FGP_NOWAIT ((__force fgf_t)0x00000020) #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) #define FGP_STABLE ((__force fgf_t)0x00000080) #define FGP_DONTCACHE ((__force fgf_t)0x00000100) #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) static inline unsigned int filemap_get_order(size_t size) { unsigned int shift = ilog2(size); if (shift <= PAGE_SHIFT) return 0; return shift - PAGE_SHIFT; } /** * fgf_set_order - Encode a length in the fgf_t flags. * @size: The suggested size of the folio to create. * * The caller of __filemap_get_folio() can use this to suggest a preferred * size for the folio that is created. If there is already a folio at * the index, it will be returned, no matter what its size. If a folio * is freshly created, it may be of a different size than requested * due to alignment constraints, memory pressure, or the presence of * other folios at nearby indices. */ static inline fgf_t fgf_set_order(size_t size) { unsigned int order = filemap_get_order(size); if (!order) return 0; return (__force fgf_t)(order << 26); } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp); /** * filemap_get_folio - Find and get a folio. * @mapping: The address_space to search. * @index: The page index. * * Looks up the page cache entry at @mapping & @index. If a folio is * present, it is returned with an increased refcount. * * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for * this index. Will not return a shadow, swap or DAX entry. */ static inline struct folio *filemap_get_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, 0, 0); } /** * filemap_lock_folio - Find and lock a folio. * @mapping: The address_space to search. * @index: The page index. * * Looks up the page cache entry at @mapping & @index. If a folio is * present, it is returned locked with an increased refcount. * * Context: May sleep. * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for * this index. Will not return a shadow, swap or DAX entry. */ static inline struct folio *filemap_lock_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, FGP_LOCK, 0); } /** * filemap_grab_folio - grab a folio from the page cache * @mapping: The address space to search * @index: The page index * * Looks up the page cache entry at @mapping & @index. If no folio is found, * a new folio is created. The folio is locked, marked as accessed, and * returned. * * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found * and failed to create a folio. */ static inline struct folio *filemap_grab_folio(struct address_space *mapping, pgoff_t index) { return __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping_gfp_mask(mapping)); } /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned with an increased refcount. * * Otherwise, %NULL is returned. */ static inline struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, 0, 0); } static inline struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset, fgf_t fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); } /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @index: the page index * * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, it is returned locked and with an increased * refcount. * * Context: May sleep. * Return: A struct page or %NULL if there is no page in the cache for this * index. */ static inline struct page *find_lock_page(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK, 0); } /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * If the page is not present, a new page is allocated using @gfp_mask * and added to the page cache and the VM's LRU list. The page is * returned locked and with an increased refcount. * * On memory exhaustion, %NULL is returned. * * find_or_create_page() may sleep, even if @gfp_flags specifies an * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); } extern pgoff_t __folio_swap_cache_index(struct folio *folio); /** * folio_index - File index of a folio. * @folio: The folio. * * For a folio which is either in the page cache or the swap cache, * return its index within the address_space it belongs to. If you know * the page is definitely in the page cache, you can look at the folio's * index directly. * * Return: The index (offset in units of pages) of a folio in its file. */ static inline pgoff_t folio_index(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return __folio_swap_cache_index(folio); return folio->index; } /** * folio_next_index - Get the index of the next folio. * @folio: The current folio. * * Return: The index of the folio which follows this folio in the file. */ static inline pgoff_t folio_next_index(struct folio *folio) { return folio->index + folio_nr_pages(folio); } /** * folio_file_page - The page for a particular index. * @folio: The folio which contains this index. * @index: The index we want to look up. * * Sometimes after looking up a folio in the page cache, we need to * obtain the specific page for an index (eg a page fault). * * Return: The page containing the file data for this index. */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } /** * folio_contains - Does this folio contain this index? * @folio: The folio. * @index: The page index within the file. * * Context: The caller should have the page locked in order to prevent * (eg) shmem from moving the page between the page cache and swap cache * and changing its index in the middle of the operation. * Return: true or false. */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { return index - folio_index(folio) < folio_nr_pages(folio); } /* * Given the page we found in the page cache, return the page corresponding * to this index in the file */ static inline struct page *find_subpage(struct page *head, pgoff_t index) { /* HugeTLBfs wants the head page regardless */ if (PageHuge(head)) return head; return head + (index & (thp_nr_pages(head) - 1)); } unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_contig(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index); /* * Returns locked page at given index in given cache, creating it if needed. */ static inline struct page *grab_cache_page(struct address_space *mapping, pgoff_t index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } struct folio *read_cache_folio(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, gfp_t flags); struct page *read_cache_page(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, struct file *file) { return read_cache_page(mapping, index, NULL, file); } static inline struct folio *read_mapping_folio(struct address_space *mapping, pgoff_t index, struct file *file) { return read_cache_folio(mapping, index, NULL, file); } /** * page_pgoff - Calculate the logical page offset of this page. * @folio: The folio containing this page. * @page: The page which we need the offset of. * * For file pages, this is the offset from the beginning of the file * in units of PAGE_SIZE. For anonymous pages, this is the offset from * the beginning of the anon_vma in units of PAGE_SIZE. This will * return nonsense for KSM pages. * * Context: Caller must have a reference on the folio or otherwise * prevent it from being split or freed. * * Return: The offset in units of PAGE_SIZE. */ static inline pgoff_t page_pgoff(const struct folio *folio, const struct page *page) { return folio->index + folio_page_idx(folio, page); } /* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) { return ((loff_t)page->index) << PAGE_SHIFT; } /** * folio_pos - Returns the byte position of this folio in its file. * @folio: The folio. */ static inline loff_t folio_pos(struct folio *folio) { return page_offset(&folio->page); } /* * Get the offset in PAGE_SIZE (even for hugetlb folios). */ static inline pgoff_t folio_pgoff(struct folio *folio) { return folio->index; } static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; } struct wait_page_key { struct folio *folio; int bit_nr; int page_match; }; struct wait_page_queue { struct folio *folio; int bit_nr; wait_queue_entry_t wait; }; static inline bool wake_page_match(struct wait_page_queue *wait_page, struct wait_page_key *key) { if (wait_page->folio != key->folio) return false; key->page_match = 1; if (wait_page->bit_nr != key->bit_nr) return false; return true; } void __folio_lock(struct folio *folio); int __folio_lock_killable(struct folio *folio); vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); void unlock_page(struct page *page); void folio_unlock(struct folio *folio); /** * folio_trylock() - Attempt to lock a folio. * @folio: The folio to attempt to lock. * * Sometimes it is undesirable to wait for a folio to be unlocked (eg * when the locks are being taken in the wrong order, or if making * progress through a batch of folios is more important than processing * them in order). Usually folio_lock() is the correct function to call. * * Context: Any context. * Return: Whether the lock was successfully acquired. */ static inline bool folio_trylock(struct folio *folio) { return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); } /* * Return true if the page was successfully locked */ static inline bool trylock_page(struct page *page) { return folio_trylock(page_folio(page)); } /** * folio_lock() - Lock this folio. * @folio: The folio to lock. * * The folio lock protects against many things, probably more than it * should. It is primarily held while a folio is being brought uptodate, * either from its backing file or from swap. It is also held while a * folio is being truncated from its address_space, so holding the lock * is sufficient to keep folio->mapping stable. * * The folio lock is also held while write() is modifying the page to * provide POSIX atomicity guarantees (as long as the write does not * cross a page boundary). Other modifications to the data in the folio * do not hold the folio lock and can race with writes, eg DMA and stores * to mapped pages. * * Context: May sleep. If you need to acquire the locks of two or * more folios, they must be in order of ascending index, if they are * in the same address_space. If they are in different address_spaces, * acquire the lock of the folio which belongs to the address_space which * has the lowest address in memory first. */ static inline void folio_lock(struct folio *folio) { might_sleep(); if (!folio_trylock(folio)) __folio_lock(folio); } /** * lock_page() - Lock the folio containing this page. * @page: The page to lock. * * See folio_lock() for a description of what the lock protects. * This is a legacy function and new code should probably use folio_lock() * instead. * * Context: May sleep. Pages in the same folio share a lock, so do not * attempt to lock two pages which share a folio. */ static inline void lock_page(struct page *page) { struct folio *folio; might_sleep(); folio = page_folio(page); if (!folio_trylock(folio)) __folio_lock(folio); } /** * folio_lock_killable() - Lock this folio, interruptible by a fatal signal. * @folio: The folio to lock. * * Attempts to lock the folio, like folio_lock(), except that the sleep * to acquire the lock is interruptible by a fatal signal. * * Context: May sleep; see folio_lock(). * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received. */ static inline int folio_lock_killable(struct folio *folio) { might_sleep(); if (!folio_trylock(folio)) return __folio_lock_killable(folio); return 0; } /* * folio_lock_or_retry - Lock the folio, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_lock implications depend on flags; see * __folio_lock_or_retry(). */ static inline vm_fault_t folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) { might_sleep(); if (!folio_trylock(folio)) return __folio_lock_or_retry(folio, vmf); return 0; } /* * This is exported only for folio_wait_locked/folio_wait_writeback, etc., * and should not be used directly. */ void folio_wait_bit(struct folio *folio, int bit_nr); int folio_wait_bit_killable(struct folio *folio, int bit_nr); /* * Wait for a folio to be unlocked. * * This must be called with the caller "holding" the folio, * ie with increased folio reference count so that the folio won't * go away during the wait. */ static inline void folio_wait_locked(struct folio *folio) { if (folio_test_locked(folio)) folio_wait_bit(folio, PG_locked); } static inline int folio_wait_locked_killable(struct folio *folio) { if (!folio_test_locked(folio)) return 0; return folio_wait_bit_killable(folio, PG_locked); } static inline void wait_on_page_locked(struct page *page) { folio_wait_locked(page_folio(page)); } void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); void end_page_writeback(struct page *page); void folio_end_writeback(struct folio *folio); void wait_for_stable_page(struct page *page); void folio_wait_stable(struct folio *folio); void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) { /* Avoid atomic ops, locking, etc. when not actually needed. */ if (folio_test_dirty(folio)) __folio_cancel_dirty(folio); } bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_MIGRATION int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); #else #define filemap_migrate_folio NULL #endif void folio_end_private_2(struct folio *folio); void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); /* * Fault in userspace address range. */ size_t fault_in_writeable(char __user *uaddr, size_t size); size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp); void filemap_remove_folio(struct folio *folio); void __filemap_remove_folio(struct folio *folio, void *shadow); void replace_page_cache_folio(struct folio *old, struct folio *new); void delete_from_page_cache_batch(struct address_space *mapping, struct folio_batch *fbatch); bool filemap_release_folio(struct folio *folio, gfp_t gfp); loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, int whence); /* Must be non-static for BPF error injection */ int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp); bool filemap_range_has_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte); /** * filemap_range_needs_writeback - check if range potentially needs writeback * @mapping: address space within which to check * @start_byte: offset in bytes where the range starts * @end_byte: offset in bytes where the range ends (inclusive) * * Find at least one page in the range supplied, usually used to check if * direct writing in this range will trigger a writeback. Used by O_DIRECT * read/write with IOCB_NOWAIT, to see if the caller needs to do * filemap_write_and_wait_range() before proceeding. * * Return: %true if the caller should do filemap_write_and_wait_range() before * doing O_DIRECT to a page in this range, %false otherwise. */ static inline bool filemap_range_needs_writeback(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { if (!mapping->nrpages) return false; if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) return false; return filemap_range_has_writeback(mapping, start_byte, end_byte); } /** * struct readahead_control - Describes a readahead request. * * A readahead request is for consecutive pages. Filesystems which * implement the ->readahead method should call readahead_page() or * readahead_page_batch() in a loop and attempt to start I/O against * each page in the request. * * Most of the fields in this struct are private and should be accessed * by the functions below. * * @file: The file, used primarily by network filesystems for authentication. * May be NULL if invoked internally by the filesystem. * @mapping: Readahead this filesystem object. * @ra: File readahead state. May be NULL. */ struct readahead_control { struct file *file; struct address_space *mapping; struct file_ra_state *ra; /* private: use the readahead_* accessors instead */ pgoff_t _index; unsigned int _nr_pages; unsigned int _batch_count; bool dropbehind; bool _workingset; unsigned long _pflags; }; #define DEFINE_READAHEAD(ractl, f, r, m, i) \ struct readahead_control ractl = { \ .file = f, \ .mapping = m, \ .ra = r, \ ._index = i, \ } #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) void page_cache_ra_unbounded(struct readahead_control *, unsigned long nr_to_read, unsigned long lookahead_count); void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); void page_cache_async_ra(struct readahead_control *, struct folio *, unsigned long req_count); void readahead_expand(struct readahead_control *ractl, loff_t new_start, size_t new_len); /** * page_cache_sync_readahead - generic file readahead * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. * @index: Index of first page to be read. * @req_count: Total number of pages being read by the caller. * * page_cache_sync_readahead() should be called when a cache miss happened: * it will submit the read. The readahead logic may decide to piggyback more * pages onto the read request if access patterns suggest it will improve * performance. */ static inline void page_cache_sync_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, pgoff_t index, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, index); page_cache_sync_ra(&ractl, req_count); } /** * page_cache_async_readahead - file readahead for marked pages * @mapping: address_space which holds the pagecache and I/O vectors * @ra: file_ra_state which holds the readahead state * @file: Used by the filesystem for authentication. * @folio: The folio which triggered the readahead call. * @req_count: Total number of pages being read by the caller. * * page_cache_async_readahead() should be called when a page is used which * is marked as PageReadahead; this is a marker to suggest that the application * has used up enough of the readahead window that we should start pulling in * more pages. */ static inline void page_cache_async_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *file, struct folio *folio, unsigned long req_count) { DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); page_cache_async_ra(&ractl, folio, req_count); } static inline struct folio *__readahead_folio(struct readahead_control *ractl) { struct folio *folio; BUG_ON(ractl->_batch_count > ractl->_nr_pages); ractl->_nr_pages -= ractl->_batch_count; ractl->_index += ractl->_batch_count; if (!ractl->_nr_pages) { ractl->_batch_count = 0; return NULL; } folio = xa_load(&ractl->mapping->i_pages, ractl->_index); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); ractl->_batch_count = folio_nr_pages(folio); return folio; } /** * readahead_page - Get the next page to read. * @ractl: The current readahead request. * * Context: The page is locked and has an elevated refcount. The caller * should decreases the refcount once the page has been submitted for I/O * and unlock the page once all I/O to that page has completed. * Return: A pointer to the next page, or %NULL if we are done. */ static inline struct page *readahead_page(struct readahead_control *ractl) { struct folio *folio = __readahead_folio(ractl); return &folio->page; } /** * readahead_folio - Get the next folio to read. * @ractl: The current readahead request. * * Context: The folio is locked. The caller should unlock the folio once * all I/O to that folio has completed. * Return: A pointer to the next folio, or %NULL if we are done. */ static inline struct folio *readahead_folio(struct readahead_control *ractl) { struct folio *folio = __readahead_folio(ractl); if (folio) folio_put(folio); return folio; } static inline unsigned int __readahead_batch(struct readahead_control *rac, struct page **array, unsigned int array_sz) { unsigned int i = 0; XA_STATE(xas, &rac->mapping->i_pages, 0); struct page *page; BUG_ON(rac->_batch_count > rac->_nr_pages); rac->_nr_pages -= rac->_batch_count; rac->_index += rac->_batch_count; rac->_batch_count = 0; xas_set(&xas, rac->_index); rcu_read_lock(); xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { if (xas_retry(&xas, page)) continue; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageTail(page), page); array[i++] = page; rac->_batch_count += thp_nr_pages(page); if (i == array_sz) break; } rcu_read_unlock(); return i; } /** * readahead_page_batch - Get a batch of pages to read. * @rac: The current readahead request. * @array: An array of pointers to struct page. * * Context: The pages are locked and have an elevated refcount. The caller * should decreases the refcount once the page has been submitted for I/O * and unlock the page once all I/O to that page has completed. * Return: The number of pages placed in the array. 0 indicates the request * is complete. */ #define readahead_page_batch(rac, array) \ __readahead_batch(rac, array, ARRAY_SIZE(array)) /** * readahead_pos - The byte offset into the file of this readahead request. * @rac: The readahead request. */ static inline loff_t readahead_pos(struct readahead_control *rac) { return (loff_t)rac->_index * PAGE_SIZE; } /** * readahead_length - The number of bytes in this readahead request. * @rac: The readahead request. */ static inline size_t readahead_length(struct readahead_control *rac) { return rac->_nr_pages * PAGE_SIZE; } /** * readahead_index - The index of the first page in this readahead request. * @rac: The readahead request. */ static inline pgoff_t readahead_index(struct readahead_control *rac) { return rac->_index; } /** * readahead_count - The number of pages in this readahead request. * @rac: The readahead request. */ static inline unsigned int readahead_count(struct readahead_control *rac) { return rac->_nr_pages; } /** * readahead_batch_length - The number of bytes in the current batch. * @rac: The readahead request. */ static inline size_t readahead_batch_length(struct readahead_control *rac) { return rac->_batch_count * PAGE_SIZE; } static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; } /** * folio_mkwrite_check_truncate - check if folio was truncated * @folio: the folio to check * @inode: the inode to check the folio against * * Return: the number of bytes in the folio up to EOF, * or -EFAULT if the folio was truncated. */ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, struct inode *inode) { loff_t size = i_size_read(inode); pgoff_t index = size >> PAGE_SHIFT; size_t offset = offset_in_folio(folio, size); if (!folio->mapping) return -EFAULT; /* folio is wholly inside EOF */ if (folio_next_index(folio) - 1 < index) return folio_size(folio); /* folio is wholly past EOF */ if (folio->index > index || !offset) return -EFAULT; /* folio is partially inside EOF */ return offset; } /** * page_mkwrite_check_truncate - check if page was truncated * @page: the page to check * @inode: the inode to check the page against * * Returns the number of bytes in the page up to EOF, * or -EFAULT if the page was truncated. */ static inline int page_mkwrite_check_truncate(struct page *page, struct inode *inode) { loff_t size = i_size_read(inode); pgoff_t index = size >> PAGE_SHIFT; int offset = offset_in_page(size); if (page->mapping != inode->i_mapping) return -EFAULT; /* page is wholly inside EOF */ if (page->index < index) return PAGE_SIZE; /* page is wholly past EOF */ if (page->index > index || !offset) return -EFAULT; /* page is partially inside EOF */ return offset; } /** * i_blocks_per_folio - How many blocks fit in this folio. * @inode: The inode which contains the blocks. * @folio: The folio. * * If the block size is larger than the size of this folio, return zero. * * Context: The caller should hold a refcount on the folio to prevent it * from being split. * Return: The number of filesystem blocks covered by this folio. */ static inline unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) { return folio_size(folio) >> inode->i_blkbits; } #endif /* _LINUX_PAGEMAP_H */ |
863 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_COREDUMP_H #define _LINUX_SCHED_COREDUMP_H #include <linux/mm_types.h> #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ #define SUID_DUMP_USER 1 /* Dump as user of process */ #define SUID_DUMP_ROOT 2 /* Dump as root */ extern void set_dumpable(struct mm_struct *mm, int value); /* * This returns the actual value of the suid_dumpable flag. For things * that are using this for checking for privilege transitions, it must * test against SUID_DUMP_USER rather than treating it as a boolean * value. */ static inline int __get_dumpable(unsigned long mm_flags) { return mm_flags & MMF_DUMPABLE_MASK; } static inline int get_dumpable(struct mm_struct *mm) { return __get_dumpable(mm->flags); } #endif /* _LINUX_SCHED_COREDUMP_H */ |
3 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Public Key Signature Algorithm * * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au> */ #include <crypto/internal/sig.h> #include <linux/cryptouser.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/string.h> #include <net/netlink.h> #include "internal.h" static void crypto_sig_exit_tfm(struct crypto_tfm *tfm) { struct crypto_sig *sig = __crypto_sig_tfm(tfm); struct sig_alg *alg = crypto_sig_alg(sig); alg->exit(sig); } static int crypto_sig_init_tfm(struct crypto_tfm *tfm) { struct crypto_sig *sig = __crypto_sig_tfm(tfm); struct sig_alg *alg = crypto_sig_alg(sig); if (alg->exit) sig->base.exit = crypto_sig_exit_tfm; if (alg->init) return alg->init(sig); return 0; } static void crypto_sig_free_instance(struct crypto_instance *inst) { struct sig_instance *sig = sig_instance(inst); sig->free(sig); } static void __maybe_unused crypto_sig_show(struct seq_file *m, struct crypto_alg *alg) { seq_puts(m, "type : sig\n"); } static int __maybe_unused crypto_sig_report(struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_sig rsig = {}; strscpy(rsig.type, "sig", sizeof(rsig.type)); return nla_put(skb, CRYPTOCFGA_REPORT_SIG, sizeof(rsig), &rsig); } static const struct crypto_type crypto_sig_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_sig_init_tfm, .free = crypto_sig_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_sig_show, #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_sig_report, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_MASK, .type = CRYPTO_ALG_TYPE_SIG, .tfmsize = offsetof(struct crypto_sig, base), }; struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_sig); static int sig_default_sign(struct crypto_sig *tfm, const void *src, unsigned int slen, void *dst, unsigned int dlen) { return -ENOSYS; } static int sig_default_verify(struct crypto_sig *tfm, const void *src, unsigned int slen, const void *dst, unsigned int dlen) { return -ENOSYS; } static int sig_default_set_key(struct crypto_sig *tfm, const void *key, unsigned int keylen) { return -ENOSYS; } static int sig_prepare_alg(struct sig_alg *alg) { struct crypto_alg *base = &alg->base; if (!alg->sign) alg->sign = sig_default_sign; if (!alg->verify) alg->verify = sig_default_verify; if (!alg->set_priv_key) alg->set_priv_key = sig_default_set_key; if (!alg->set_pub_key) return -EINVAL; if (!alg->key_size) return -EINVAL; if (!alg->max_size) alg->max_size = alg->key_size; if (!alg->digest_size) alg->digest_size = alg->key_size; base->cra_type = &crypto_sig_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SIG; return 0; } int crypto_register_sig(struct sig_alg *alg) { struct crypto_alg *base = &alg->base; int err; err = sig_prepare_alg(alg); if (err) return err; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_sig); void crypto_unregister_sig(struct sig_alg *alg) { crypto_unregister_alg(&alg->base); } EXPORT_SYMBOL_GPL(crypto_unregister_sig); int sig_register_instance(struct crypto_template *tmpl, struct sig_instance *inst) { int err; if (WARN_ON(!inst->free)) return -EINVAL; err = sig_prepare_alg(&inst->alg); if (err) return err; return crypto_register_instance(tmpl, sig_crypto_instance(inst)); } EXPORT_SYMBOL_GPL(sig_register_instance); int crypto_grab_sig(struct crypto_sig_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { spawn->base.frontend = &crypto_sig_type; return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_sig); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Public Key Signature Algorithms"); |
17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | // SPDX-License-Identifier: GPL-2.0-only /* This is a module which is used to mark packets for tracing. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> MODULE_DESCRIPTION("Xtables: packet flow tracing"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_TRACE"); MODULE_ALIAS("ip6t_TRACE"); static int trace_tg_check(const struct xt_tgchk_param *par) { return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG); } static void trace_tg_destroy(const struct xt_tgdtor_param *par) { nf_logger_put(par->family, NF_LOG_TYPE_LOG); } static unsigned int trace_tg(struct sk_buff *skb, const struct xt_action_param *par) { skb->nf_trace = 1; return XT_CONTINUE; } static struct xt_target trace_tg_reg[] __read_mostly = { { .name = "TRACE", .revision = 0, .family = NFPROTO_IPV4, .table = "raw", .target = trace_tg, .checkentry = trace_tg_check, .destroy = trace_tg_destroy, .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "TRACE", .revision = 0, .family = NFPROTO_IPV6, .table = "raw", .target = trace_tg, .checkentry = trace_tg_check, .destroy = trace_tg_destroy, .me = THIS_MODULE, }, #endif }; static int __init trace_tg_init(void) { return xt_register_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg)); } static void __exit trace_tg_exit(void) { xt_unregister_targets(trace_tg_reg, ARRAY_SIZE(trace_tg_reg)); } module_init(trace_tg_init); module_exit(trace_tg_exit); MODULE_SOFTDEP("pre: nf_log_syslog"); |
147 280 21 8 3 1251 113 1747 1739 81 84 82 83 84 84 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 | /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/backing-dev.h * * low-level device information and state which is propagated up through * to high-level code. */ #ifndef _LINUX_BACKING_DEV_H #define _LINUX_BACKING_DEV_H #include <linux/kernel.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/writeback.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) { kref_get(&bdi->refcnt); return bdi; } struct backing_dev_info *bdi_get_by_id(u64 id); void bdi_put(struct backing_dev_info *bdi); __printf(2, 3) int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); __printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); struct backing_dev_info *bdi_alloc(int node_id); void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); void wb_wait_for_completion(struct wb_completion *done); extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { return test_bit(WB_has_dirty_io, &wb->state); } static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) { /* * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are * any dirty wbs. See wb_update_write_bandwidth(). */ return atomic_long_read(&bdi->tot_write_bandwidth); } static inline void wb_stat_mod(struct bdi_writeback *wb, enum wb_stat_item item, s64 amount) { percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); } static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { wb_stat_mod(wb, item, 1); } static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { wb_stat_mod(wb, item, -1); } static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_read_positive(&wb->stat[item]); } static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_sum_positive(&wb->stat[item]); } extern void wb_writeout_inc(struct bdi_writeback *wb); /* * maximal error of a stat counter. */ static inline unsigned long wb_stat_error(void) { #ifdef CONFIG_SMP return nr_cpu_ids * WB_STAT_BATCH; #else return 1; #endif } /* BDI ratio is expressed as part per 1000000 for finer granularity. */ #define BDI_RATIO_SCALE 10000 u64 bdi_get_min_bytes(struct backing_dev_info *bdi); u64 bdi_get_max_bytes(struct backing_dev_info *bdi); int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio); int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes); int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes); int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit); /* * Flags in backing_dev_info::capability * * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages * should contribute to accounting * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold */ #define BDI_CAP_WRITEBACK (1 << 0) #define BDI_CAP_WRITEBACK_ACCT (1 << 1) #define BDI_CAP_STRICTLIMIT (1 << 2) extern struct backing_dev_info noop_backing_dev_info; int bdi_init(struct backing_dev_info *bdi); /** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest * * Determine whether there is writeback waiting to be handled against a * bdi_writeback. */ static inline bool writeback_in_progress(struct bdi_writeback *wb) { return test_bit(WB_writeback_running, &wb->state); } struct backing_dev_info *inode_to_bdi(struct inode *inode); static inline bool mapping_can_writeback(struct address_space *mapping) { return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; } #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp); void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest * * Cgroup writeback requires support from the filesystem. Also, both memcg and * iocg have to be on the default hierarchy. Test whether all conditions are * met. * * Note that the test result may change dynamically on the same inode * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); return cgroup_subsys_on_dfl(memory_cgrp_subsys) && cgroup_subsys_on_dfl(io_cgrp_subsys) && (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } /** * wb_find_current - find wb for %current on a bdi * @bdi: bdi of interest * * Find the wb of @bdi which matches both the memcg and blkcg of %current. * Must be called under rcu_read_lock() which protects the returend wb. * NULL if not found. */ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { struct cgroup_subsys_state *memcg_css; struct bdi_writeback *wb; memcg_css = task_css(current, memory_cgrp_id); if (!memcg_css->parent) return &bdi->wb; wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); /* * %current's blkcg equals the effective blkcg of its memcg. No * need to use the relatively expensive cgroup_get_e_css(). */ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) return wb; return NULL; } /** * wb_get_create_current - get or create wb for %current on a bdi * @bdi: bdi of interest * @gfp: allocation mask * * Equivalent to wb_get_create() on %current's memcg. This function is * called from a relatively hot path and optimizes the common cases using * wb_find_current(). */ static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { struct bdi_writeback *wb; rcu_read_lock(); wb = wb_find_current(bdi); if (wb && unlikely(!wb_tryget(wb))) wb = NULL; rcu_read_unlock(); if (unlikely(!wb)) { struct cgroup_subsys_state *memcg_css; memcg_css = task_get_css(current, memory_cgrp_id); wb = wb_get_create(bdi, memcg_css, gfp); css_put(memcg_css); } return wb; } /** * inode_to_wb - determine the wb of an inode * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock. */ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&inode->i_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; } static inline struct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc) { /* * If wbc does not have inode attached, it means cgroup writeback was * disabled when wbc started. Just use the default wb in that case. */ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; } /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and * can't sleep during the transaction. IRQs may or may not be disabled on * return. */ static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); /* * Paired with store_release in inode_switch_wbs_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; if (unlikely(cookie->locked)) xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages * lock. inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode * @cookie: @cookie from unlocked_inode_to_wb_begin() */ static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { if (unlikely(cookie->locked)) xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) { return false; } static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; } static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { return &bdi->wb; } static inline struct bdi_writeback *inode_to_wb(struct inode *inode) { return &inode_to_bdi(inode)->wb; } static inline struct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc) { return inode_to_wb(inode); } static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { } static inline void wb_memcg_offline(struct mem_cgroup *memcg) { } static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ |
10 9 324 36 132 226 226 10 5 2 5 5 6 5 4 1 5 423 6 6 424 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | // SPDX-License-Identifier: GPL-2.0 #ifndef IOU_KBUF_H #define IOU_KBUF_H #include <uapi/linux/io_uring.h> #include <linux/io_uring_types.h> enum { /* ring mapped provided buffers */ IOBL_BUF_RING = 1, /* buffers are consumed incrementally rather than always fully */ IOBL_INC = 2, }; struct io_buffer_list { /* * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, * then these are classic provided buffers and ->buf_list is used. */ union { struct list_head buf_list; struct io_uring_buf_ring *buf_ring; }; __u16 bgid; /* below is for ring provided buffers */ __u16 buf_nr_pages; __u16 nr_entries; __u16 head; __u16 mask; __u16 flags; struct io_mapped_region region; }; struct io_buffer { struct list_head list; __u64 addr; __u32 len; __u16 bid; __u16 bgid; }; enum { /* can alloc a bigger vec */ KBUF_MODE_EXPAND = 1, /* if bigger vec allocated, free old one */ KBUF_MODE_FREE = 2, }; struct buf_sel_arg { struct iovec *iovs; size_t out_len; size_t max_len; unsigned short nr_iovs; unsigned short mode; }; void __user *io_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags); int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, unsigned int issue_flags); int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg); void io_destroy_buffers(struct io_ring_ctx *ctx); int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx, unsigned int bgid); static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) { /* * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear * the flag and hence ensure that bl->head doesn't get incremented. * If the tail has already been incremented, hang on to it. * The exception is partial io, that case we should increment bl->head * to monopolize the buffer. */ if (req->buf_list) { req->buf_index = req->buf_list->bgid; req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); return true; } return false; } static inline bool io_do_buffer_select(struct io_kiocb *req) { if (!(req->flags & REQ_F_BUFFER_SELECT)) return false; return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); } static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) { if (req->flags & REQ_F_BL_NO_RECYCLE) return false; if (req->flags & REQ_F_BUFFER_SELECTED) return io_kbuf_recycle_legacy(req, issue_flags); if (req->flags & REQ_F_BUFFER_RING) return io_kbuf_recycle_ring(req); return false; } /* Mapped buffer ring, return io_uring_buf from head */ #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)] static inline bool io_kbuf_commit(struct io_kiocb *req, struct io_buffer_list *bl, int len, int nr) { if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT))) return true; req->flags &= ~REQ_F_BUFFERS_COMMIT; if (unlikely(len < 0)) return true; if (bl->flags & IOBL_INC) { struct io_uring_buf *buf; buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask); if (WARN_ON_ONCE(len > buf->len)) len = buf->len; buf->len -= len; if (buf->len) { buf->addr += len; return false; } } bl->head += nr; return true; } static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr) { struct io_buffer_list *bl = req->buf_list; bool ret = true; if (bl) { ret = io_kbuf_commit(req, bl, len, nr); req->buf_index = bl->bgid; } req->flags &= ~REQ_F_BUFFER_RING; return ret; } static inline void __io_put_kbuf_list(struct io_kiocb *req, int len, struct list_head *list) { if (req->flags & REQ_F_BUFFER_RING) { __io_put_kbuf_ring(req, len, 1); } else { req->buf_index = req->kbuf->bgid; list_add(&req->kbuf->list, list); req->flags &= ~REQ_F_BUFFER_SELECTED; } } static inline void io_kbuf_drop(struct io_kiocb *req) { lockdep_assert_held(&req->ctx->completion_lock); if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) return; /* len == 0 is fine here, non-ring will always drop all of it */ __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp); } static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs, unsigned issue_flags) { unsigned int ret; if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) return 0; ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); if (req->flags & REQ_F_BUFFER_RING) { if (!__io_put_kbuf_ring(req, len, nbufs)) ret |= IORING_CQE_F_BUF_MORE; } else { __io_put_kbuf(req, len, issue_flags); } return ret; } static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags) { return __io_put_kbufs(req, len, 1, issue_flags); } static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len, int nbufs, unsigned issue_flags) { return __io_put_kbufs(req, len, nbufs, issue_flags); } #endif |
50 50 50 58 59 59 59 50 50 49 50 4 4 4 4 4 4 4 4 4 1 3 3 4 4 4 1 4 4 4 4 4 10 10 10 10 10 1 9 4 3 2 3 1 7 5 4 5 5 3 5 10 10 31 7 5 30 2 23 5 24 25 3 15 12 4 13 14 31 8 1 1 21 60 60 6 8 4 3 1 1 1 1 3 12 12 11 1 10 32 60 31 2 1 2 1 10 10 10 10 1 49 59 58 59 59 58 59 59 50 50 50 50 60 59 50 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 | // SPDX-License-Identifier: GPL-2.0-or-later /* i2c-dev.c - i2c-bus driver, char device interface Copyright (C) 1995-97 Simon G. Vogl Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> */ /* Note that this is a complete rewrite of Simon Vogl's i2c-dev module. But I have used so much of his original code and ideas that it seems only fair to recognize him as co-author -- Frodo */ /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cdev.h> #include <linux/compat.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/i2c-dev.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/uaccess.h> /* * An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a * slave (i2c_client) with which messages will be exchanged. It's coupled * with a character special file which is accessed by user mode drivers. * * The list of i2c_dev structures is parallel to the i2c_adapter lists * maintained by the driver model, and is updated using bus notifications. */ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; struct device dev; struct cdev cdev; }; #define I2C_MINORS (MINORMASK + 1) static LIST_HEAD(i2c_dev_list); static DEFINE_SPINLOCK(i2c_dev_list_lock); static struct i2c_dev *i2c_dev_get_by_minor(unsigned index) { struct i2c_dev *i2c_dev; spin_lock(&i2c_dev_list_lock); list_for_each_entry(i2c_dev, &i2c_dev_list, list) { if (i2c_dev->adap->nr == index) goto found; } i2c_dev = NULL; found: spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) { struct i2c_dev *i2c_dev; if (adap->nr >= I2C_MINORS) { pr_err("Out of device minors (%d)\n", adap->nr); return ERR_PTR(-ENODEV); } i2c_dev = kzalloc(sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return ERR_PTR(-ENOMEM); i2c_dev->adap = adap; spin_lock(&i2c_dev_list_lock); list_add_tail(&i2c_dev->list, &i2c_dev_list); spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); if (del_cdev) cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); put_device(&i2c_dev->dev); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt)); if (!i2c_dev) return -ENODEV; return sysfs_emit(buf, "%s\n", i2c_dev->adap->name); } static DEVICE_ATTR_RO(name); static struct attribute *i2c_attrs[] = { &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(i2c); /* ------------------------------------------------------------------------- */ /* * After opening an instance of this character special file, a file * descriptor starts out associated only with an i2c_adapter (and bus). * * Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg * traffic to any devices on the bus used by that adapter. That's because * the i2c_msg vectors embed all the addressing information they need, and * are submitted directly to an i2c_adapter. However, SMBus-only adapters * don't support that interface. * * To use read()/write() system calls on that file descriptor, or to use * SMBus interfaces (and work with SMBus-only hosts!), you must first issue * an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous * (never registered) i2c_client so it holds the addressing information * needed by those system calls and by this SMBus interface. */ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { char *tmp; int ret; struct i2c_client *client = file->private_data; /* Adapter must support I2C transfers */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EOPNOTSUPP; if (count > 8192) count = 8192; tmp = kzalloc(count, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; pr_debug("i2c-%d reading %zu bytes.\n", iminor(file_inode(file)), count); ret = i2c_master_recv(client, tmp, count); if (ret >= 0) if (copy_to_user(buf, tmp, ret)) ret = -EFAULT; kfree(tmp); return ret; } static ssize_t i2cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { int ret; char *tmp; struct i2c_client *client = file->private_data; /* Adapter must support I2C transfers */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EOPNOTSUPP; if (count > 8192) count = 8192; tmp = memdup_user(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp); pr_debug("i2c-%d writing %zu bytes.\n", iminor(file_inode(file)), count); ret = i2c_master_send(client, tmp, count); kfree(tmp); return ret; } static int i2cdev_check(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); if (!client || client->addr != *(unsigned int *)addrp) return 0; return dev->driver ? -EBUSY : 0; } /* walk up mux tree */ static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, i2cdev_check); if (!result && parent) result = i2cdev_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2cdev_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2cdev_check_mux_children); else result = i2cdev_check(dev, addrp); return result; } /* This address checking function differs from the one in i2c-core in that it considers an address with a registered device, but no driver bound to it, as NOT busy. */ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2cdev_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2cdev_check_mux_children); return result; } static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, unsigned nmsgs, struct i2c_msg *msgs) { u8 __user **data_ptrs; int i, res; /* Adapter must support I2C transfers */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EOPNOTSUPP; data_ptrs = kmalloc_array(nmsgs, sizeof(u8 __user *), GFP_KERNEL); if (!data_ptrs) return -ENOMEM; res = 0; for (i = 0; i < nmsgs; i++) { /* Limit the size of the message to a sane amount */ if (msgs[i].len > 8192) { res = -EINVAL; break; } data_ptrs[i] = (u8 __user *)msgs[i].buf; msgs[i].buf = memdup_user(data_ptrs[i], msgs[i].len); if (IS_ERR(msgs[i].buf)) { res = PTR_ERR(msgs[i].buf); break; } /* memdup_user allocates with GFP_KERNEL, so DMA is ok */ msgs[i].flags |= I2C_M_DMA_SAFE; /* * If the message length is received from the slave (similar * to SMBus block read), we must ensure that the buffer will * be large enough to cope with a message length of * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus * drivers allow. The first byte in the buffer must be * pre-filled with the number of extra bytes, which must be * at least one to hold the message length, but can be * greater (for example to account for a checksum byte at * the end of the message.) */ if (msgs[i].flags & I2C_M_RECV_LEN) { if (!(msgs[i].flags & I2C_M_RD) || msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { i++; res = -EINVAL; break; } msgs[i].len = msgs[i].buf[0]; } } if (res < 0) { int j; for (j = 0; j < i; ++j) kfree(msgs[j].buf); kfree(data_ptrs); return res; } res = i2c_transfer(client->adapter, msgs, nmsgs); while (i-- > 0) { if (res >= 0 && (msgs[i].flags & I2C_M_RD)) { if (copy_to_user(data_ptrs[i], msgs[i].buf, msgs[i].len)) res = -EFAULT; } kfree(msgs[i].buf); } kfree(data_ptrs); return res; } static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, u8 read_write, u8 command, u32 size, union i2c_smbus_data __user *data) { union i2c_smbus_data temp = {}; int datasize, res; if ((size != I2C_SMBUS_BYTE) && (size != I2C_SMBUS_QUICK) && (size != I2C_SMBUS_BYTE_DATA) && (size != I2C_SMBUS_WORD_DATA) && (size != I2C_SMBUS_PROC_CALL) && (size != I2C_SMBUS_BLOCK_DATA) && (size != I2C_SMBUS_I2C_BLOCK_BROKEN) && (size != I2C_SMBUS_I2C_BLOCK_DATA) && (size != I2C_SMBUS_BLOCK_PROC_CALL)) { dev_dbg(&client->adapter->dev, "size out of range (%x) in ioctl I2C_SMBUS.\n", size); return -EINVAL; } /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, so the check is valid if size==I2C_SMBUS_QUICK too. */ if ((read_write != I2C_SMBUS_READ) && (read_write != I2C_SMBUS_WRITE)) { dev_dbg(&client->adapter->dev, "read_write out of range (%x) in ioctl I2C_SMBUS.\n", read_write); return -EINVAL; } /* Note that command values are always valid! */ if ((size == I2C_SMBUS_QUICK) || ((size == I2C_SMBUS_BYTE) && (read_write == I2C_SMBUS_WRITE))) /* These are special: we do not use data */ return i2c_smbus_xfer(client->adapter, client->addr, client->flags, read_write, command, size, NULL); if (data == NULL) { dev_dbg(&client->adapter->dev, "data is NULL pointer in ioctl I2C_SMBUS.\n"); return -EINVAL; } if ((size == I2C_SMBUS_BYTE_DATA) || (size == I2C_SMBUS_BYTE)) datasize = sizeof(data->byte); else if ((size == I2C_SMBUS_WORD_DATA) || (size == I2C_SMBUS_PROC_CALL)) datasize = sizeof(data->word); else /* size == smbus block, i2c block, or block proc. call */ datasize = sizeof(data->block); if ((size == I2C_SMBUS_PROC_CALL) || (size == I2C_SMBUS_BLOCK_PROC_CALL) || (size == I2C_SMBUS_I2C_BLOCK_DATA) || (read_write == I2C_SMBUS_WRITE)) { if (copy_from_user(&temp, data, datasize)) return -EFAULT; } if (size == I2C_SMBUS_I2C_BLOCK_BROKEN) { /* Convert old I2C block commands to the new convention. This preserves binary compatibility. */ size = I2C_SMBUS_I2C_BLOCK_DATA; if (read_write == I2C_SMBUS_READ) temp.block[0] = I2C_SMBUS_BLOCK_MAX; } res = i2c_smbus_xfer(client->adapter, client->addr, client->flags, read_write, command, size, &temp); if (!res && ((size == I2C_SMBUS_PROC_CALL) || (size == I2C_SMBUS_BLOCK_PROC_CALL) || (read_write == I2C_SMBUS_READ))) { if (copy_to_user(data, &temp, datasize)) return -EFAULT; } return res; } static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", cmd, arg); switch (cmd) { case I2C_SLAVE: case I2C_SLAVE_FORCE: if ((arg > 0x3ff) || (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f)) return -EINVAL; if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg)) return -EBUSY; /* REVISIT: address could become busy later */ client->addr = arg; return 0; case I2C_TENBIT: if (arg) client->flags |= I2C_M_TEN; else client->flags &= ~I2C_M_TEN; return 0; case I2C_PEC: /* * Setting the PEC flag here won't affect kernel drivers, * which will be using the i2c_client node registered with * the driver model core. Likewise, when that client has * the PEC flag already set, the i2c-dev driver won't see * (or use) this setting. */ if (arg) client->flags |= I2C_CLIENT_PEC; else client->flags &= ~I2C_CLIENT_PEC; return 0; case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (unsigned long __user *)arg); case I2C_RDWR: { struct i2c_rdwr_ioctl_data rdwr_arg; struct i2c_msg *rdwr_pa; int res; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data __user *)arg, sizeof(rdwr_arg))) return -EFAULT; if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) return -EINVAL; /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = memdup_array_user(rdwr_arg.msgs, rdwr_arg.nmsgs, sizeof(struct i2c_msg)); if (IS_ERR(rdwr_pa)) return PTR_ERR(rdwr_pa); res = i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); kfree(rdwr_pa); return res; } case I2C_SMBUS: { struct i2c_smbus_ioctl_data data_arg; if (copy_from_user(&data_arg, (struct i2c_smbus_ioctl_data __user *) arg, sizeof(struct i2c_smbus_ioctl_data))) return -EFAULT; return i2cdev_ioctl_smbus(client, data_arg.read_write, data_arg.command, data_arg.size, data_arg.data); } case I2C_RETRIES: if (arg > INT_MAX) return -EINVAL; client->adapter->retries = arg; break; case I2C_TIMEOUT: if (arg > INT_MAX) return -EINVAL; /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ client->adapter->timeout = msecs_to_jiffies(arg * 10); break; default: /* NOTE: returning a fault code here could cause trouble * in buggy userspace code. Some old kernel bugs returned * zero in this case, and userspace code might accidentally * have depended on that bug. */ return -ENOTTY; } return 0; } #ifdef CONFIG_COMPAT struct i2c_smbus_ioctl_data32 { u8 read_write; u8 command; u32 size; compat_caddr_t data; /* union i2c_smbus_data *data */ }; struct i2c_msg32 { u16 addr; u16 flags; u16 len; compat_caddr_t buf; }; struct i2c_rdwr_ioctl_data32 { compat_caddr_t msgs; /* struct i2c_msg __user *msgs */ u32 nmsgs; }; static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; switch (cmd) { case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (compat_ulong_t __user *)arg); case I2C_RDWR: { struct i2c_rdwr_ioctl_data32 rdwr_arg; struct i2c_msg32 __user *p; struct i2c_msg *rdwr_pa; int i, res; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data32 __user *)arg, sizeof(rdwr_arg))) return -EFAULT; if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) return -EINVAL; if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = kmalloc_array(rdwr_arg.nmsgs, sizeof(struct i2c_msg), GFP_KERNEL); if (!rdwr_pa) return -ENOMEM; p = compat_ptr(rdwr_arg.msgs); for (i = 0; i < rdwr_arg.nmsgs; i++) { struct i2c_msg32 umsg; if (copy_from_user(&umsg, p + i, sizeof(umsg))) { kfree(rdwr_pa); return -EFAULT; } rdwr_pa[i] = (struct i2c_msg) { .addr = umsg.addr, .flags = umsg.flags, .len = umsg.len, .buf = (__force __u8 *)compat_ptr(umsg.buf), }; } res = i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); kfree(rdwr_pa); return res; } case I2C_SMBUS: { struct i2c_smbus_ioctl_data32 data32; if (copy_from_user(&data32, (void __user *) arg, sizeof(data32))) return -EFAULT; return i2cdev_ioctl_smbus(client, data32.read_write, data32.command, data32.size, compat_ptr(data32.data)); } default: return i2cdev_ioctl(file, cmd, arg); } } #else #define compat_i2cdev_ioctl NULL #endif static int i2cdev_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct i2c_client *client; struct i2c_adapter *adap; adap = i2c_get_adapter(minor); if (!adap) return -ENODEV; /* This creates an anonymous i2c_client, which may later be * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE. * * This client is ** NEVER REGISTERED ** with the driver model * or I2C core code!! It just holds private copies of addressing * information and maybe a PEC flag. */ client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { i2c_put_adapter(adap); return -ENOMEM; } snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr); client->adapter = adap; file->private_data = client; return 0; } static int i2cdev_release(struct inode *inode, struct file *file) { struct i2c_client *client = file->private_data; i2c_put_adapter(client->adapter); kfree(client); file->private_data = NULL; return 0; } static const struct file_operations i2cdev_fops = { .owner = THIS_MODULE, .read = i2cdev_read, .write = i2cdev_write, .unlocked_ioctl = i2cdev_ioctl, .compat_ioctl = compat_i2cdev_ioctl, .open = i2cdev_open, .release = i2cdev_release, }; /* ------------------------------------------------------------------------- */ static const struct class i2c_dev_class = { .name = "i2c-dev", .dev_groups = i2c_groups, }; static void i2cdev_dev_release(struct device *dev) { struct i2c_dev *i2c_dev; i2c_dev = container_of(dev, struct i2c_dev, dev); kfree(i2c_dev); } static int i2cdev_attach_adapter(struct device *dev) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; int res; if (dev->type != &i2c_adapter_type) return NOTIFY_DONE; adap = to_i2c_adapter(dev); i2c_dev = get_free_i2c_dev(adap); if (IS_ERR(i2c_dev)) return NOTIFY_DONE; cdev_init(&i2c_dev->cdev, &i2cdev_fops); i2c_dev->cdev.owner = THIS_MODULE; device_initialize(&i2c_dev->dev); i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); i2c_dev->dev.class = &i2c_dev_class; i2c_dev->dev.parent = &adap->dev; i2c_dev->dev.release = i2cdev_dev_release; res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); if (res) goto err_put_i2c_dev; res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); if (res) goto err_put_i2c_dev; pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr); return NOTIFY_OK; err_put_i2c_dev: put_i2c_dev(i2c_dev, false); return NOTIFY_DONE; } static int i2cdev_detach_adapter(struct device *dev) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; if (dev->type != &i2c_adapter_type) return NOTIFY_DONE; adap = to_i2c_adapter(dev); i2c_dev = i2c_dev_get_by_minor(adap->nr); if (!i2c_dev) /* attach_adapter must have failed */ return NOTIFY_DONE; put_i2c_dev(i2c_dev, true); pr_debug("adapter [%s] unregistered\n", adap->name); return NOTIFY_OK; } static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; switch (action) { case BUS_NOTIFY_ADD_DEVICE: return i2cdev_attach_adapter(dev); case BUS_NOTIFY_DEL_DEVICE: return i2cdev_detach_adapter(dev); } return NOTIFY_DONE; } static struct notifier_block i2cdev_notifier = { .notifier_call = i2cdev_notifier_call, }; /* ------------------------------------------------------------------------- */ static int __init i2c_dev_attach_adapter(struct device *dev, void *dummy) { i2cdev_attach_adapter(dev); return 0; } static int __exit i2c_dev_detach_adapter(struct device *dev, void *dummy) { i2cdev_detach_adapter(dev); return 0; } /* * module load/unload record keeping */ static int __init i2c_dev_init(void) { int res; pr_info("i2c /dev entries driver\n"); res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c"); if (res) goto out; res = class_register(&i2c_dev_class); if (res) goto out_unreg_chrdev; /* Keep track of adapters which will be added or removed later */ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier); if (res) goto out_unreg_class; /* Bind to already existing adapters right away */ i2c_for_each_dev(NULL, i2c_dev_attach_adapter); return 0; out_unreg_class: class_unregister(&i2c_dev_class); out_unreg_chrdev: unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); out: pr_err("Driver Initialisation failed\n"); return res; } static void __exit i2c_dev_exit(void) { bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); i2c_for_each_dev(NULL, i2c_dev_detach_adapter); class_unregister(&i2c_dev_class); unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C /dev entries driver"); MODULE_LICENSE("GPL"); module_init(i2c_dev_init); module_exit(i2c_dev_exit); |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 | // SPDX-License-Identifier: GPL-2.0-or-later /* cx231xx_avcore.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> This program contains the specific code to control the avdecoder chip and other related usb control functions for cx231xx based chipset. */ #include "cx231xx.h" #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <linux/i2c.h> #include <linux/mm.h> #include <linux/mutex.h> #include <media/tuner.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "cx231xx-dif.h" #define TUNER_MODE_FM_RADIO 0 /****************************************************************************** -: BLOCK ARRANGEMENT :- I2S block ----------------------| [I2S audio] | | Analog Front End --> Direct IF -|-> Cx25840 --> Audio [video & audio] | [Audio] | |-> Cx25840 --> Video [Video] *******************************************************************************/ /****************************************************************************** * VERVE REGISTER * * * ******************************************************************************/ static int verve_write_byte(struct cx231xx *dev, u8 saddr, u8 data) { return cx231xx_write_i2c_data(dev, VERVE_I2C_ADDRESS, saddr, 1, data, 1); } static int verve_read_byte(struct cx231xx *dev, u8 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, VERVE_I2C_ADDRESS, saddr, 1, &temp, 1); *data = (u8) temp; return status; } void initGPIO(struct cx231xx *dev) { u32 _gpio_direction = 0; u32 value = 0; u8 val = 0; _gpio_direction = _gpio_direction & 0xFC0003FF; _gpio_direction = _gpio_direction | 0x03FDFC00; cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0); verve_read_byte(dev, 0x07, &val); dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val); verve_write_byte(dev, 0x07, 0xF4); verve_read_byte(dev, 0x07, &val); dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val); cx231xx_capture_start(dev, 1, Vbi); cx231xx_mode_register(dev, EP_MODE_SET, 0x0500FE00); cx231xx_mode_register(dev, GBULK_BIT_EN, 0xFFFDFFFF); } void uninitGPIO(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; cx231xx_capture_start(dev, 0, Vbi); verve_write_byte(dev, 0x07, 0x14); cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, 0x68, value, 4); } /****************************************************************************** * A F E - B L O C K C O N T R O L functions * * [ANALOG FRONT END] * ******************************************************************************/ static int afe_write_byte(struct cx231xx *dev, u16 saddr, u8 data) { return cx231xx_write_i2c_data(dev, AFE_DEVICE_ADDRESS, saddr, 2, data, 1); } static int afe_read_byte(struct cx231xx *dev, u16 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, AFE_DEVICE_ADDRESS, saddr, 2, &temp, 1); *data = (u8) temp; return status; } int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count) { int status = 0; u8 temp = 0; u8 afe_power_status = 0; int i = 0; /* super block initialize */ temp = (u8) (ref_count & 0xff); status = afe_write_byte(dev, SUP_BLK_TUNE2, temp); if (status < 0) return status; status = afe_read_byte(dev, SUP_BLK_TUNE2, &afe_power_status); if (status < 0) return status; temp = (u8) ((ref_count & 0x300) >> 8); temp |= 0x40; status = afe_write_byte(dev, SUP_BLK_TUNE1, temp); if (status < 0) return status; status = afe_write_byte(dev, SUP_BLK_PLL2, 0x0f); if (status < 0) return status; /* enable pll */ while (afe_power_status != 0x18) { status = afe_write_byte(dev, SUP_BLK_PWRDN, 0x18); if (status < 0) { dev_dbg(dev->dev, "%s: Init Super Block failed in send cmd\n", __func__); break; } status = afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status &= 0xff; if (status < 0) { dev_dbg(dev->dev, "%s: Init Super Block failed in receive cmd\n", __func__); break; } i++; if (i == 10) { dev_dbg(dev->dev, "%s: Init Super Block force break in loop !!!!\n", __func__); status = -1; break; } } if (status < 0) return status; /* start tuning filter */ status = afe_write_byte(dev, SUP_BLK_TUNE3, 0x40); if (status < 0) return status; msleep(5); /* exit tuning */ status = afe_write_byte(dev, SUP_BLK_TUNE3, 0x00); return status; } int cx231xx_afe_init_channels(struct cx231xx *dev) { int status = 0; /* power up all 3 channels, clear pd_buffer */ status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); /* Enable quantizer calibration */ status = afe_write_byte(dev, ADC_COM_QUANT, 0x02); /* channel initialize, force modulator (fb) reset */ status = afe_write_byte(dev, ADC_FB_FRCRST_CH1, 0x17); status = afe_write_byte(dev, ADC_FB_FRCRST_CH2, 0x17); status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, 0x17); /* start quantilizer calibration */ status = afe_write_byte(dev, ADC_CAL_ATEST_CH1, 0x10); status = afe_write_byte(dev, ADC_CAL_ATEST_CH2, 0x10); status = afe_write_byte(dev, ADC_CAL_ATEST_CH3, 0x10); msleep(5); /* exit modulator (fb) reset */ status = afe_write_byte(dev, ADC_FB_FRCRST_CH1, 0x07); status = afe_write_byte(dev, ADC_FB_FRCRST_CH2, 0x07); status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, 0x07); /* enable the pre_clamp in each channel for single-ended input */ status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH1, 0xf0); status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH2, 0xf0); status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, 0xf0); /* use diode instead of resistor, so set term_en to 0, res_en to 0 */ status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH1, 3, 7, 0x00); status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH2, 3, 7, 0x00); status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH3, 3, 7, 0x00); /* dynamic element matching off */ status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH1, 0x03); status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH2, 0x03); status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, 0x03); return status; } int cx231xx_afe_setup_AFE_for_baseband(struct cx231xx *dev) { u8 c_value = 0; int status = 0; status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH2, &c_value); c_value &= (~(0x50)); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, c_value); return status; } /* The Analog Front End in Cx231xx has 3 channels. These channels are used to share between different inputs like tuner, s-video and composite inputs. channel 1 ----- pin 1 to pin4(in reg is 1-4) channel 2 ----- pin 5 to pin8(in reg is 5-8) channel 3 ----- pin 9 to pin 12(in reg is 9-11) */ int cx231xx_afe_set_input_mux(struct cx231xx *dev, u32 input_mux) { u8 ch1_setting = (u8) input_mux; u8 ch2_setting = (u8) (input_mux >> 8); u8 ch3_setting = (u8) (input_mux >> 16); int status = 0; u8 value = 0; if (ch1_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH1, &value); value &= ~INPUT_SEL_MASK; value |= (ch1_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH1, value); } if (ch2_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH2, &value); value &= ~INPUT_SEL_MASK; value |= (ch2_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH2, value); } /* For ch3_setting, the value to put in the register is 7 less than the input number */ if (ch3_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH3, &value); value &= ~INPUT_SEL_MASK; value |= (ch3_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH3, value); } return status; } int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode) { int status = 0; /* * FIXME: We need to implement the AFE code for LOW IF and for HI IF. * Currently, only baseband works. */ switch (mode) { case AFE_MODE_LOW_IF: cx231xx_Setup_AFE_for_LowIF(dev); break; case AFE_MODE_BASEBAND: status = cx231xx_afe_setup_AFE_for_baseband(dev); break; case AFE_MODE_EU_HI_IF: /* SetupAFEforEuHiIF(); */ break; case AFE_MODE_US_HI_IF: /* SetupAFEforUsHiIF(); */ break; case AFE_MODE_JAPAN_HI_IF: /* SetupAFEforJapanHiIF(); */ break; } if ((mode != dev->afe_mode) && (dev->video_input == CX231XX_VMUX_TELEVISION)) status = cx231xx_afe_adjust_ref_count(dev, CX231XX_VMUX_TELEVISION); dev->afe_mode = mode; return status; } int cx231xx_afe_update_power_control(struct cx231xx *dev, enum AV_MODE avmode) { u8 afe_power_status = 0; int status = 0; switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_CNXT_VIDEO_GRABBER: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_HAUPPAUGE_USBLIVE2: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: case CX231XX_BOARD_OTG102: if (avmode == POLARIS_AVMODE_ANALOGT_TV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else if (avmode == POLARIS_AVMODE_DIGITAL) { status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x70); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status |= FLD_PWRDN_PD_BANDGAP | FLD_PWRDN_PD_BIAS | FLD_PWRDN_PD_TUNECK; status |= afe_write_byte(dev, SUP_BLK_PWRDN, afe_power_status); } else if (avmode == POLARIS_AVMODE_ENXTERNAL_AV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else { dev_dbg(dev->dev, "Invalid AV mode input\n"); status = -1; } break; default: if (avmode == POLARIS_AVMODE_ANALOGT_TV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x40); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x40); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else if (avmode == POLARIS_AVMODE_DIGITAL) { status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x70); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status |= FLD_PWRDN_PD_BANDGAP | FLD_PWRDN_PD_BIAS | FLD_PWRDN_PD_TUNECK; status |= afe_write_byte(dev, SUP_BLK_PWRDN, afe_power_status); } else if (avmode == POLARIS_AVMODE_ENXTERNAL_AV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x40); } else { dev_dbg(dev->dev, "Invalid AV mode input\n"); status = -1; } } /* switch */ return status; } int cx231xx_afe_adjust_ref_count(struct cx231xx *dev, u32 video_input) { u8 input_mode = 0; u8 ntf_mode = 0; int status = 0; dev->video_input = video_input; if (video_input == CX231XX_VMUX_TELEVISION) { status = afe_read_byte(dev, ADC_INPUT_CH3, &input_mode); status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &ntf_mode); } else { status = afe_read_byte(dev, ADC_INPUT_CH1, &input_mode); status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH1, &ntf_mode); } input_mode = (ntf_mode & 0x3) | ((input_mode & 0x6) << 1); switch (input_mode) { case SINGLE_ENDED: dev->afe_ref_count = 0x23C; break; case LOW_IF: dev->afe_ref_count = 0x24C; break; case EU_IF: dev->afe_ref_count = 0x258; break; case US_IF: dev->afe_ref_count = 0x260; break; default: break; } status = cx231xx_afe_init_super_block(dev, dev->afe_ref_count); return status; } /****************************************************************************** * V I D E O / A U D I O D E C O D E R C O N T R O L functions * ******************************************************************************/ static int vid_blk_write_byte(struct cx231xx *dev, u16 saddr, u8 data) { return cx231xx_write_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 1); } static int vid_blk_read_byte(struct cx231xx *dev, u16 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, &temp, 1); *data = (u8) temp; return status; } static int vid_blk_write_word(struct cx231xx *dev, u16 saddr, u32 data) { return cx231xx_write_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 4); } static int vid_blk_read_word(struct cx231xx *dev, u16 saddr, u32 *data) { return cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 4); } int cx231xx_check_fw(struct cx231xx *dev) { u8 temp = 0; int status = 0; status = vid_blk_read_byte(dev, DL_CTL_ADDRESS_LOW, &temp); if (status < 0) return status; else return temp; } int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input) { int status = 0; switch (INPUT(input)->type) { case CX231XX_VMUX_COMPOSITE1: case CX231XX_VMUX_SVIDEO: if ((dev->current_pcb_config.type == USB_BUS_POWER) && (dev->power_mode != POLARIS_AVMODE_ENXTERNAL_AV)) { /* External AV */ status = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ENXTERNAL_AV); if (status < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, status); return status; } } status = cx231xx_set_decoder_video_input(dev, INPUT(input)->type, INPUT(input)->vmux); break; case CX231XX_VMUX_TELEVISION: case CX231XX_VMUX_CABLE: if ((dev->current_pcb_config.type == USB_BUS_POWER) && (dev->power_mode != POLARIS_AVMODE_ANALOGT_TV)) { /* Tuner */ status = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV); if (status < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, status); return status; } } switch (dev->model) { /* i2c device tuners */ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx: case CX231XX_BOARD_HAUPPAUGE_935C: case CX231XX_BOARD_HAUPPAUGE_955Q: case CX231XX_BOARD_HAUPPAUGE_975: case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD: status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_TELEVISION, INPUT(input)->vmux); break; default: if (dev->tuner_type == TUNER_NXP_TDA18271) status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_TELEVISION, INPUT(input)->vmux); else status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_COMPOSITE1, INPUT(input)->vmux); break; } break; default: dev_err(dev->dev, "%s: Unknown Input %d !\n", __func__, INPUT(input)->type); break; } /* save the selection */ dev->video_input = input; return status; } int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u32 input) { int status = 0; u32 value = 0; if (pin_type != dev->video_input) { status = cx231xx_afe_adjust_ref_count(dev, pin_type); if (status < 0) { dev_err(dev->dev, "%s: adjust_ref_count :Failed to set AFE input mux - errCode [%d]!\n", __func__, status); return status; } } /* call afe block to set video inputs */ status = cx231xx_afe_set_input_mux(dev, input); if (status < 0) { dev_err(dev->dev, "%s: set_input_mux :Failed to set AFE input mux - errCode [%d]!\n", __func__, status); return status; } switch (pin_type) { case CX231XX_VMUX_COMPOSITE1: status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= (0 << 13) | (1 << 4); value &= ~(1 << 5); /* set [24:23] [22:15] to 0 */ value &= (~(0x1ff8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 */ value |= 0x1000000; status = vid_blk_write_word(dev, AFE_CTRL, value); status = vid_blk_read_word(dev, OUT_CTRL1, &value); value |= (1 << 7); status = vid_blk_write_word(dev, OUT_CTRL1, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); break; case CX231XX_VMUX_SVIDEO: /* Disable the use of DIF */ status = vid_blk_read_word(dev, AFE_CTRL, &value); /* set [24:23] [22:15] to 0 */ value &= (~(0x1ff8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 DCR_BYP_CH2[4:4] = 1; */ value |= 0x1000010; status = vid_blk_write_word(dev, AFE_CTRL, value); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set YC input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_YC_1)); /* Chroma to ADC2 */ status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= FLD_CHROMA_IN_SEL; /* set the chroma in select */ /* Clear VGA_SEL_CH2 and VGA_SEL_CH3 (bits 7 and 8) This sets them to use video rather than audio. Only one of the two will be in use. */ value &= ~(FLD_VGA_SEL_CH2 | FLD_VGA_SEL_CH3); status = vid_blk_write_word(dev, AFE_CTRL, value); status = cx231xx_afe_set_mode(dev, AFE_MODE_BASEBAND); break; case CX231XX_VMUX_TELEVISION: case CX231XX_VMUX_CABLE: default: /* TODO: Test if this is also needed for xc2028/xc3028 */ if (dev->board.tuner_type == TUNER_XC5000) { /* Disable the use of DIF */ status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= (0 << 13) | (1 << 4); value &= ~(1 << 5); /* set [24:23] [22:15] to 0 */ value &= (~(0x1FF8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 */ value |= 0x1000000; status = vid_blk_write_word(dev, AFE_CTRL, value); status = vid_blk_read_word(dev, OUT_CTRL1, &value); value |= (1 << 7); status = vid_blk_write_word(dev, OUT_CTRL1, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); } else { /* Enable the DIF for the tuner */ /* Reinitialize the DIF */ status = cx231xx_dif_set_standard(dev, dev->norm); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Make sure bypass is cleared */ status = vid_blk_read_word(dev, DIF_MISC_CTRL, &value); /* Clear the bypass bit */ value &= ~FLD_DIF_DIF_BYPASS; /* Enable the use of the DIF block */ status = vid_blk_write_word(dev, DIF_MISC_CTRL, value); /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* Disable the VBI_GATE_EN */ value &= ~FLD_VBI_GATE_EN; /* Enable the auto-VGA enable, AGC, and set the skip count to 2 */ value |= FLD_VGA_AUTO_EN | FLD_AGC_AUTO_EN | 0x00200000; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Wait until AGC locks up */ msleep(1); /* Disable the auto-VGA enable AGC */ value &= ~(FLD_VGA_AUTO_EN); /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Enable Polaris B0 AGC output */ status = vid_blk_read_word(dev, PIN_CTRL, &value); value |= (FLD_OEF_AGC_RF) | (FLD_OEF_AGC_IFVGA) | (FLD_OEF_AGC_IF); status = vid_blk_write_word(dev, PIN_CTRL, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); /* Set some bits in AFE_CTRL so that channel 2 or 3 * is ready to receive audio */ /* Clear clamp for channels 2 and 3 (bit 16-17) */ /* Clear droop comp (bit 19-20) */ /* Set VGA_SEL (for audio control) (bit 7-8) */ status = vid_blk_read_word(dev, AFE_CTRL, &value); /*Set Func mode:01-DIF 10-baseband 11-YUV*/ value &= (~(FLD_FUNC_MODE)); value |= 0x800000; value |= FLD_VGA_SEL_CH3 | FLD_VGA_SEL_CH2; status = vid_blk_write_word(dev, AFE_CTRL, value); if (dev->tuner_type == TUNER_NXP_TDA18271) { status = vid_blk_read_word(dev, PIN_CTRL, &value); status = vid_blk_write_word(dev, PIN_CTRL, (value & 0xFFFFFFEF)); } break; } break; } /* Set raw VBI mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_VBIHACTRAW_EN, cx231xx_set_field(FLD_VBIHACTRAW_EN, 1)); status = vid_blk_read_word(dev, OUT_CTRL1, &value); if (value & 0x02) { value |= (1 << 19); status = vid_blk_write_word(dev, OUT_CTRL1, value); } return status; } void cx231xx_enable656(struct cx231xx *dev) { u8 temp = 0; /*enable TS1 data[0:7] as output to export 656*/ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF); /*enable TS1 clock as output to export 656*/ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp); temp = temp|0x04; vid_blk_write_byte(dev, TS1_PIN_CTL1, temp); } EXPORT_SYMBOL_GPL(cx231xx_enable656); void cx231xx_disable656(struct cx231xx *dev) { u8 temp = 0; vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00); vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp); temp = temp&0xFB; vid_blk_write_byte(dev, TS1_PIN_CTL1, temp); } EXPORT_SYMBOL_GPL(cx231xx_disable656); /* * Handle any video-mode specific overrides that are different * on a per video standards basis after touching the MODE_CTRL * register which resets many values for autodetect */ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev) { int status = 0; dev_dbg(dev->dev, "%s: 0x%x\n", __func__, (unsigned int)dev->norm); /* Change the DFE_CTRL3 bp_percent to fix flagging */ status = vid_blk_write_word(dev, DFE_CTRL3, 0xCD3F0280); if (dev->norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) { dev_dbg(dev->dev, "%s: NTSC\n", __func__); /* Move the close caption lines out of active video, adjust the active video start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x18); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, 0x1E7000); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, 0x1C000000); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x79)); } else if (dev->norm & V4L2_STD_SECAM) { dev_dbg(dev->dev, "%s: SECAM\n", __func__); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x20); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, cx231xx_set_field (FLD_VACTIVE_CNT, 0x244)); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, cx231xx_set_field (FLD_V656BLANK_CNT, 0x24)); /* Adjust the active video horizontal start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x85)); } else { dev_dbg(dev->dev, "%s: PAL\n", __func__); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x20); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, cx231xx_set_field (FLD_VACTIVE_CNT, 0x244)); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, cx231xx_set_field (FLD_V656BLANK_CNT, 0x24)); /* Adjust the active video horizontal start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x85)); } return status; } int cx231xx_unmute_audio(struct cx231xx *dev) { return vid_blk_write_byte(dev, PATH1_VOL_CTL, 0x24); } EXPORT_SYMBOL_GPL(cx231xx_unmute_audio); static int stopAudioFirmware(struct cx231xx *dev) { return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03); } static int restartAudioFirmware(struct cx231xx *dev) { return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13); } int cx231xx_set_audio_input(struct cx231xx *dev, u8 input) { int status = 0; enum AUDIO_INPUT ainput = AUDIO_INPUT_LINE; switch (INPUT(input)->amux) { case CX231XX_AMUX_VIDEO: ainput = AUDIO_INPUT_TUNER_TV; break; case CX231XX_AMUX_LINE_IN: status = cx231xx_i2s_blk_set_audio_input(dev, input); ainput = AUDIO_INPUT_LINE; break; default: break; } status = cx231xx_set_audio_decoder_input(dev, ainput); return status; } int cx231xx_set_audio_decoder_input(struct cx231xx *dev, enum AUDIO_INPUT audio_input) { u32 dwval; int status; u8 gen_ctrl; u32 value = 0; /* Put it in soft reset */ status = vid_blk_read_byte(dev, GENERAL_CTL, &gen_ctrl); gen_ctrl |= 1; status = vid_blk_write_byte(dev, GENERAL_CTL, gen_ctrl); switch (audio_input) { case AUDIO_INPUT_LINE: /* setup AUD_IO control from Merlin paralle output */ value = cx231xx_set_field(FLD_AUD_CHAN1_SRC, AUD_CHAN_SRC_PARALLEL); status = vid_blk_write_word(dev, AUD_IO_CTRL, value); /* setup input to Merlin, SRC2 connect to AC97 bypass upsample-by-2, slave mode, sony mode, left justify adr 091c, dat 01000000 */ status = vid_blk_read_word(dev, AC97_CTL, &dwval); status = vid_blk_write_word(dev, AC97_CTL, (dwval | FLD_AC97_UP2X_BYPASS)); /* select the parallel1 and SRC3 */ status = vid_blk_write_word(dev, BAND_OUT_SEL, cx231xx_set_field(FLD_SRC3_IN_SEL, 0x0) | cx231xx_set_field(FLD_SRC3_CLK_SEL, 0x0) | cx231xx_set_field(FLD_PARALLEL1_SRC_SEL, 0x0)); /* unmute all, AC97 in, independence mode adr 08d0, data 0x00063073 */ status = vid_blk_write_word(dev, DL_CTL, 0x3000001); status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063073); /* set AVC maximum threshold, adr 08d4, dat ffff0024 */ status = vid_blk_read_word(dev, PATH1_VOL_CTL, &dwval); status = vid_blk_write_word(dev, PATH1_VOL_CTL, (dwval | FLD_PATH1_AVC_THRESHOLD)); /* set SC maximum threshold, adr 08ec, dat ffffb3a3 */ status = vid_blk_read_word(dev, PATH1_SC_CTL, &dwval); status = vid_blk_write_word(dev, PATH1_SC_CTL, (dwval | FLD_PATH1_SC_THRESHOLD)); break; case AUDIO_INPUT_TUNER_TV: default: status = stopAudioFirmware(dev); /* Setup SRC sources and clocks */ status = vid_blk_write_word(dev, BAND_OUT_SEL, cx231xx_set_field(FLD_SRC6_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC6_CLK_SEL, 0x01) | cx231xx_set_field(FLD_SRC5_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC5_CLK_SEL, 0x02) | cx231xx_set_field(FLD_SRC4_IN_SEL, 0x02) | cx231xx_set_field(FLD_SRC4_CLK_SEL, 0x03) | cx231xx_set_field(FLD_SRC3_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC3_CLK_SEL, 0x00) | cx231xx_set_field(FLD_BASEBAND_BYPASS_CTL, 0x00) | cx231xx_set_field(FLD_AC97_SRC_SEL, 0x03) | cx231xx_set_field(FLD_I2S_SRC_SEL, 0x00) | cx231xx_set_field(FLD_PARALLEL2_SRC_SEL, 0x02) | cx231xx_set_field(FLD_PARALLEL1_SRC_SEL, 0x01)); /* Setup the AUD_IO control */ status = vid_blk_write_word(dev, AUD_IO_CTRL, cx231xx_set_field(FLD_I2S_PORT_DIR, 0x00) | cx231xx_set_field(FLD_I2S_OUT_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN3_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN2_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN1_SRC, 0x03)); status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F063870); /* setAudioStandard(_audio_standard); */ status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063870); status = restartAudioFirmware(dev); switch (dev->board.tuner_type) { case TUNER_XC5000: /* SIF passthrough at 28.6363 MHz sample rate */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, CHIP_CTRL, FLD_SIF_EN, cx231xx_set_field(FLD_SIF_EN, 1)); break; case TUNER_NXP_TDA18271: /* Normal mode: SIF passthrough at 14.32 MHz */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, CHIP_CTRL, FLD_SIF_EN, cx231xx_set_field(FLD_SIF_EN, 0)); break; default: switch (dev->model) { /* i2c device tuners */ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx: case CX231XX_BOARD_HAUPPAUGE_935C: case CX231XX_BOARD_HAUPPAUGE_955Q: case CX231XX_BOARD_HAUPPAUGE_975: case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD: /* TODO: Normal mode: SIF passthrough at 14.32 MHz?? */ break; default: /* This is just a casual suggestion to people adding new boards in case they use a tuner type we don't currently know about */ dev_info(dev->dev, "Unknown tuner type configuring SIF"); break; } } break; case AUDIO_INPUT_TUNER_FM: /* use SIF for FM radio setupFM(); setAudioStandard(_audio_standard); */ break; case AUDIO_INPUT_MUTE: status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F011012); break; } /* Take it out of soft reset */ status = vid_blk_read_byte(dev, GENERAL_CTL, &gen_ctrl); gen_ctrl &= ~1; status = vid_blk_write_byte(dev, GENERAL_CTL, gen_ctrl); return status; } /****************************************************************************** * C H I P Specific C O N T R O L functions * ******************************************************************************/ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev) { u32 value; int status = 0; status = vid_blk_read_word(dev, PIN_CTRL, &value); value |= (~dev->board.ctl_pin_status_mask); status = vid_blk_write_word(dev, PIN_CTRL, value); return status; } int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, u8 analog_or_digital) { int status; /* first set the direction to output */ status = cx231xx_set_gpio_direction(dev, dev->board. agc_analog_digital_select_gpio, 1); /* 0 - demod ; 1 - Analog mode */ status = cx231xx_set_gpio_value(dev, dev->board.agc_analog_digital_select_gpio, analog_or_digital); if (status < 0) return status; return 0; } int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) { u8 value[4] = { 0, 0, 0, 0 }; int status = 0; bool current_is_port_3; /* * Should this code check dev->port_3_switch_enabled first * to skip unnecessary reading of the register? * If yes, the flag dev->port_3_switch_enabled must be initialized * correctly. */ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status < 0) return status; current_is_port_3 = value[0] & I2C_DEMOD_EN ? true : false; /* Just return, if already using the right port */ if (current_is_port_3 == is_port_3) return 0; if (is_port_3) value[0] |= I2C_DEMOD_EN; else value[0] &= ~I2C_DEMOD_EN; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /* remember status of the switch for usage in is_tuner */ if (status >= 0) dev->port_3_switch_enabled = is_port_3; return status; } EXPORT_SYMBOL_GPL(cx231xx_enable_i2c_port_3); void update_HH_register_after_set_DIF(struct cx231xx *dev) { /* u8 status = 0; u32 value = 0; vid_blk_write_word(dev, PIN_CTRL, 0xA0FFF82F); vid_blk_write_word(dev, DIF_MISC_CTRL, 0x0A203F11); vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1BEFBF06); status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390); status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); */ } #if 0 static void cx231xx_dump_SC_reg(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; dev_dbg(dev->dev, "%s!\n", __func__); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); } #endif void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev) { u8 value = 0; afe_read_byte(dev, ADC_STATUS2_CH3, &value); value = (value & 0xFE)|0x01; afe_write_byte(dev, ADC_STATUS2_CH3, value); afe_read_byte(dev, ADC_STATUS2_CH3, &value); value = (value & 0xFE)|0x00; afe_write_byte(dev, ADC_STATUS2_CH3, value); /* config colibri to lo-if mode FIXME: ntf_mode = 2'b00 by default. But set 0x1 would reduce the diff IF input by half, for low-if agc defect */ afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value); value = (value & 0xFC)|0x00; afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value); afe_read_byte(dev, ADC_INPUT_CH3, &value); value = (value & 0xF9)|0x02; afe_write_byte(dev, ADC_INPUT_CH3, value); afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value); value = (value & 0xFB)|0x04; afe_write_byte(dev, ADC_FB_FRCRST_CH3, value); afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value); value = (value & 0xFC)|0x03; afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value); value = (value & 0xFB)|0x04; afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value); value = (value & 0xF8)|0x06; afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value); value = (value & 0x8F)|0x40; afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value); afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value); value = (value & 0xDF)|0x20; afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value); } void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode) { u32 colibri_carrier_offset = 0; u32 func_mode = 0x01; /* Device has a DIF if this function is called */ u32 standard = 0; u8 value[4] = { 0, 0, 0, 0 }; dev_dbg(dev->dev, "Enter cx231xx_set_Colibri_For_LowIF()\n"); value[0] = (u8) 0x6F; value[1] = (u8) 0x6F; value[2] = (u8) 0x6F; value[3] = (u8) 0x6F; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /*Set colibri for low IF*/ cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF); /* Set C2HH for low IF operation.*/ standard = dev->norm; cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode, func_mode, standard); /* Get colibri offsets.*/ colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode, standard); dev_dbg(dev->dev, "colibri_carrier_offset=%d, standard=0x%x\n", colibri_carrier_offset, standard); /* Set the band Pass filter for DIF*/ cx231xx_set_DIF_bandpass(dev, (if_freq+colibri_carrier_offset), spectral_invert, mode); } u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd) { u32 colibri_carrier_offset = 0; if (mode == TUNER_MODE_FM_RADIO) { colibri_carrier_offset = 1100000; } else if (standerd & (V4L2_STD_MN | V4L2_STD_NTSC_M_JP)) { colibri_carrier_offset = 4832000; /*4.83MHz */ } else if (standerd & (V4L2_STD_PAL_B | V4L2_STD_PAL_G)) { colibri_carrier_offset = 2700000; /*2.70MHz */ } else if (standerd & (V4L2_STD_PAL_D | V4L2_STD_PAL_I | V4L2_STD_SECAM)) { colibri_carrier_offset = 2100000; /*2.10MHz */ } return colibri_carrier_offset; } void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode) { unsigned long pll_freq_word; u32 dif_misc_ctrl_value = 0; u64 pll_freq_u64 = 0; u32 i = 0; dev_dbg(dev->dev, "if_freq=%d;spectral_invert=0x%x;mode=0x%x\n", if_freq, spectral_invert, mode); if (mode == TUNER_MODE_FM_RADIO) { pll_freq_word = 0x905A1CAC; vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word); } else /*KSPROPERTY_TUNER_MODE_TV*/{ /* Calculate the PLL frequency word based on the adjusted if_freq*/ pll_freq_word = if_freq; pll_freq_u64 = (u64)pll_freq_word << 28L; do_div(pll_freq_u64, 50000000); pll_freq_word = (u32)pll_freq_u64; /*pll_freq_word = 0x3463497;*/ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word); if (spectral_invert) { if_freq -= 400000; /* Enable Spectral Invert*/ vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000; vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } else { if_freq += 400000; /* Disable Spectral Invert*/ vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF; vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } if_freq = (if_freq / 100000) * 100000; if (if_freq < 3000000) if_freq = 3000000; if (if_freq > 16000000) if_freq = 16000000; } dev_dbg(dev->dev, "Enter IF=%zu\n", ARRAY_SIZE(Dif_set_array)); for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) { if (Dif_set_array[i].if_freq == if_freq) { vid_blk_write_word(dev, Dif_set_array[i].register_address, Dif_set_array[i].value); } } } /****************************************************************************** * D I F - B L O C K C O N T R O L functions * ******************************************************************************/ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode, u32 function_mode, u32 standard) { int status = 0; if (mode == V4L2_TUNER_RADIO) { /* C2HH */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xFF); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } else if (standard != DIF_USE_BASEBAND) { if (standard & V4L2_STD_MN) { /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xb); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); /* 0x124, AUD_CHAN1_SRC = 0x3 */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AUD_IO_CTRL, 0, 31, 0x00000003); } else if ((standard == V4L2_STD_PAL_I) | (standard & V4L2_STD_PAL_D) | (standard & V4L2_STD_SECAM)) { /* C2HH setup */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xF); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } else { /* default PAL BG */ /* C2HH setup */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xE); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } } return status; } int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard) { int status = 0; u32 dif_misc_ctrl_value = 0; u32 func_mode = 0; dev_dbg(dev->dev, "%s: setStandard to %x\n", __func__, standard); status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); if (standard != DIF_USE_BASEBAND) dev->norm = standard; switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: case CX231XX_BOARD_CNXT_VIDEO_GRABBER: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_OTG102: func_mode = 0x03; break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: func_mode = 0x01; break; default: func_mode = 0x01; } status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode, func_mode, standard); if (standard == DIF_USE_BASEBAND) { /* base band */ /* There is a different SRC_PHASE_INC value for baseband vs. DIF */ status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0xDF7DF83); status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value |= FLD_DIF_DIF_BYPASS; status = vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } else if (standard & V4L2_STD_PAL_D) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3934EA); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & V4L2_STD_PAL_I) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x5F39A934); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a033F11; } else if (standard & V4L2_STD_PAL_M) { /* improved Low Frequency Phase Noise */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0xFF01FF0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xbd038c85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1db4640a); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C1380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x72500800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x012c405d); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_SOFT_RST_CTRL_REVB, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3A0A3F10; } else if (standard & (V4L2_STD_PAL_N | V4L2_STD_PAL_Nc)) { /* improved Low Frequency Phase Noise */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0xFF01FF0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xbd038c85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1db4640a); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C1380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x72500800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x012c405d); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_SOFT_RST_CTRL_REVB, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value = 0x3A093F10; } else if (standard & (V4L2_STD_SECAM_B | V4L2_STD_SECAM_D | V4L2_STD_SECAM_G | V4L2_STD_SECAM_K | V4L2_STD_SECAM_K1)) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x888C0380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xe0262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xc2171700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xc2262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530ec); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0xf4000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)) { /* Is it SECAM_L1? */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x888C0380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xe0262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xc2171700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xc2262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530ec); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0xf2560000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & V4L2_STD_NTSC_M) { /* V4L2_STD_NTSC_M (75 IRE Setup) Or V4L2_STD_NTSC_M_JP (Japan, 0 IRE Setup) */ /* For NTSC the centre frequency of video coming out of sidewinder is around 7.1MHz or 3.6MHz depending on the spectral inversion. so for a non spectrally inverted channel the pll freq word is 0x03420c49 */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0x6503BC0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xBD038C85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1DB4640A); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C0380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x04000800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x01296e1f); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_AGC_CTRL_IF, 0xC2262600); status = vid_blk_write_word(dev, DIF_AGC_CTRL_INT, 0xC2262600); status = vid_blk_write_word(dev, DIF_AGC_CTRL_RF, 0xC2262600); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a003F10; } else { /* default PAL BG */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530EC); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00A653A8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a013F11; } /* The AGC values should be the same for all standards, AUD_SRC_SEL[19] should always be disabled */ dif_misc_ctrl_value &= ~FLD_DIF_AUD_SRC_SEL; /* It is still possible to get Set Standard calls even when we are in FM mode. This is done to override the value for FM. */ if (dev->active_mode == V4L2_TUNER_RADIO) dif_misc_ctrl_value = 0x7a080000; /* Write the calculated value for misc ontrol register */ status = vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); return status; } int cx231xx_tuner_pre_channel_change(struct cx231xx *dev) { int status = 0; u32 dwval; /* Set the RF and IF k_agc values to 3 */ status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF); dwval |= 0x33000000; status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval); return status; } int cx231xx_tuner_post_channel_change(struct cx231xx *dev) { int status = 0; u32 dwval; dev_dbg(dev->dev, "%s: dev->tuner_type =0%d\n", __func__, dev->tuner_type); /* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for * SECAM L/B/D standards */ status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF); if (dev->norm & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_B | V4L2_STD_SECAM_D)) { if (dev->tuner_type == TUNER_NXP_TDA18271) { dwval &= ~FLD_DIF_IF_REF; dwval |= 0x88000300; } else dwval |= 0x88000000; } else { if (dev->tuner_type == TUNER_NXP_TDA18271) { dwval &= ~FLD_DIF_IF_REF; dwval |= 0xCC000300; } else dwval |= 0x44000000; } status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval); return status == sizeof(dwval) ? 0 : -EIO; } /****************************************************************************** * I 2 S - B L O C K C O N T R O L functions * ******************************************************************************/ int cx231xx_i2s_blk_initialize(struct cx231xx *dev) { int status = 0; u32 value; status = cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, &value, 1); /* enables clock to delta-sigma and decimation filter */ value |= 0x80; status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, value, 1); /* power up all channel */ status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); return status; } int cx231xx_i2s_blk_update_power_control(struct cx231xx *dev, enum AV_MODE avmode) { int status = 0; u32 value = 0; if (avmode != POLARIS_AVMODE_ENXTERNAL_AV) { status = cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, &value, 1); value |= 0xfe; status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, value, 1); } else { status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); } return status; } /* set i2s_blk for audio input types */ int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input) { int status = 0; switch (audio_input) { case CX231XX_AMUX_LINE_IN: status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, 0x80, 1); break; case CX231XX_AMUX_VIDEO: default: break; } dev->ctl_ainput = audio_input; return status; } /****************************************************************************** * P O W E R C O N T R O L functions * ******************************************************************************/ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode) { u8 value[4] = { 0, 0, 0, 0 }; u32 tmp = 0; int status = 0; if (dev->power_mode != mode) dev->power_mode = mode; else { dev_dbg(dev->dev, "%s: mode = %d, No Change req.\n", __func__, mode); return 0; } status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); switch (mode) { case POLARIS_AVMODE_ENXTERNAL_AV: tmp &= (~PWR_MODE_MASK); tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); tmp |= POLARIS_AVMODE_ENXTERNAL_AV; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /* reset state of xceive tuner */ dev->xc_fw_load_done = 0; break; case POLARIS_AVMODE_ANALOGT_TV: tmp |= PWR_DEMOD_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); if (!(tmp & PWR_TUNER_EN)) { tmp |= (PWR_TUNER_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_AV_EN)) { tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_ISO_EN)) { tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & POLARIS_AVMODE_ANALOGT_TV)) { tmp |= POLARIS_AVMODE_ANALOGT_TV; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (dev->board.tuner_type != TUNER_ABSENT) { /* reset the Tuner */ if (dev->board.tuner_gpio) cx231xx_gpio_set(dev, dev->board.tuner_gpio); if (dev->cx231xx_reset_analog_tuner) dev->cx231xx_reset_analog_tuner(dev); } break; case POLARIS_AVMODE_DIGITAL: if (!(tmp & PWR_TUNER_EN)) { tmp |= (PWR_TUNER_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_AV_EN)) { tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_ISO_EN)) { tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } tmp &= (~PWR_AV_MODE); tmp |= POLARIS_AVMODE_DIGITAL; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); if (!(tmp & PWR_DEMOD_EN)) { tmp |= PWR_DEMOD_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (dev->board.tuner_type != TUNER_ABSENT) { /* reset the Tuner */ if (dev->board.tuner_gpio) cx231xx_gpio_set(dev, dev->board.tuner_gpio); if (dev->cx231xx_reset_analog_tuner) dev->cx231xx_reset_analog_tuner(dev); } break; default: break; } msleep(PWR_SLEEP_INTERVAL); /* For power saving, only enable Pwr_resetout_n when digital TV is selected. */ if (mode == POLARIS_AVMODE_DIGITAL) { tmp |= PWR_RESETOUT_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } /* update power control for afe */ status = cx231xx_afe_update_power_control(dev, mode); /* update power control for i2s_blk */ status = cx231xx_i2s_blk_update_power_control(dev, mode); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); return status; } /****************************************************************************** * S T R E A M C O N T R O L functions * ******************************************************************************/ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); tmp |= ep_mask; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, EP_MODE_SET, value, 4); return status; } int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); tmp &= (~ep_mask); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, EP_MODE_SET, value, 4); return status; } int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type) { int status = 0; u32 value = 0; u8 val[4] = { 0, 0, 0, 0 }; if (dev->udev->speed == USB_SPEED_HIGH) { switch (media_type) { case Audio: dev_dbg(dev->dev, "%s: Audio enter HANC\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x9300); break; case Vbi: dev_dbg(dev->dev, "%s: set vanc registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300); break; case Sliced_cc: dev_dbg(dev->dev, "%s: set hanc registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x1300); break; case Raw_Video: dev_dbg(dev->dev, "%s: set video registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); break; case TS1_serial_mode: dev_dbg(dev->dev, "%s: set ts1 registers", __func__); if (dev->board.has_417) { dev_dbg(dev->dev, "%s: MPEG\n", __func__); value &= 0xFFFFFFFC; value |= 0x3; status = cx231xx_mode_register(dev, TS_MODE_REG, value); val[0] = 0x04; val[1] = 0xA3; val[2] = 0x3B; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); val[0] = 0x00; val[1] = 0x08; val[2] = 0x00; val[3] = 0x08; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_LENGTH_REG, val, 4); } else { dev_dbg(dev->dev, "%s: BDA\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010); } break; case TS1_parallel_mode: dev_dbg(dev->dev, "%s: set ts1 parallel mode registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400); break; } } else { status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); } return status; } int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) { int rc = -1; u32 ep_mask = -1; struct pcb_config *pcb_config; /* get EP for media type */ pcb_config = (struct pcb_config *)&dev->current_pcb_config; if (pcb_config->config_num) { switch (media_type) { case Raw_Video: ep_mask = ENABLE_EP4; /* ep4 [00:1000] */ break; case Audio: ep_mask = ENABLE_EP3; /* ep3 [00:0100] */ break; case Vbi: ep_mask = ENABLE_EP5; /* ep5 [01:0000] */ break; case Sliced_cc: ep_mask = ENABLE_EP6; /* ep6 [10:0000] */ break; case TS1_serial_mode: case TS1_parallel_mode: ep_mask = ENABLE_EP1; /* ep1 [00:0001] */ break; case TS2: ep_mask = ENABLE_EP2; /* ep2 [00:0010] */ break; } } if (start) { rc = cx231xx_initialize_stream_xfer(dev, media_type); if (rc < 0) return rc; /* enable video capture */ if (ep_mask > 0) rc = cx231xx_start_stream(dev, ep_mask); } else { /* disable video capture */ if (ep_mask > 0) rc = cx231xx_stop_stream(dev, ep_mask); } return rc; } EXPORT_SYMBOL_GPL(cx231xx_capture_start); /***************************************************************************** * G P I O B I T control functions * ******************************************************************************/ static int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 gpio_val) { int status = 0; gpio_val = (__force u32)cpu_to_le32(gpio_val); status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&gpio_val, 4, 0, 0); return status; } static int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 *gpio_val) { __le32 tmp; int status = 0; status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&tmp, 4, 0, 1); *gpio_val = le32_to_cpu(tmp); return status; } /* * cx231xx_set_gpio_direction * Sets the direction of the GPIO pin to input or output * * Parameters : * pin_number : The GPIO Pin number to program the direction for * from 0 to 31 * pin_value : The Direction of the GPIO Pin under reference. * 0 = Input direction * 1 = Output direction */ int cx231xx_set_gpio_direction(struct cx231xx *dev, int pin_number, int pin_value) { int status = 0; u32 value = 0; /* Check for valid pin_number - if 32 , bail out */ if (pin_number >= 32) return -EINVAL; /* input */ if (pin_value == 0) value = dev->gpio_dir & (~(1 << pin_number)); /* clear */ else value = dev->gpio_dir | (1 << pin_number); status = cx231xx_set_gpio_bit(dev, value, dev->gpio_val); /* cache the value for future */ dev->gpio_dir = value; return status; } /* * cx231xx_set_gpio_value * Sets the value of the GPIO pin to Logic high or low. The Pin under * reference should ALREADY BE SET IN OUTPUT MODE !!!!!!!!! * * Parameters : * pin_number : The GPIO Pin number to program the direction for * pin_value : The value of the GPIO Pin under reference. * 0 = set it to 0 * 1 = set it to 1 */ int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value) { int status = 0; u32 value = 0; /* Check for valid pin_number - if 0xFF , bail out */ if (pin_number >= 32) return -EINVAL; /* first do a sanity check - if the Pin is not output, make it output */ if ((dev->gpio_dir & (1 << pin_number)) == 0x00) { /* It was in input mode */ value = dev->gpio_dir | (1 << pin_number); dev->gpio_dir = value; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } if (pin_value == 0) value = dev->gpio_val & (~(1 << pin_number)); else value = dev->gpio_val | (1 << pin_number); /* store the value */ dev->gpio_val = value; /* toggle bit0 of GP_IO */ status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } /***************************************************************************** * G P I O I2C related functions * ******************************************************************************/ int cx231xx_gpio_i2c_start(struct cx231xx *dev) { int status = 0; /* set SCL to output 1 ; set SDA to output 1 */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; return status; } int cx231xx_gpio_i2c_end(struct cx231xx *dev) { int status = 0; /* set SCL to output 0; set SDA to output 0 */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to input ,release SCL cable control set SDA to input ,release SDA cable control */ dev->gpio_dir &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; return status; } int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data) { int status = 0; u8 i; /* set SCL to output ; set SDA to output */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; for (i = 0; i < 8; i++) { if (((data << i) & 0x80) == 0) { /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } else { /* set SCL to output 0; set SDA to output 1 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to output 1 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 0; set SDA to output 1 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } } return status; } int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf) { u8 value = 0; int status = 0; u32 gpio_logic_value = 0; u8 i; /* read byte */ for (i = 0; i < 8; i++) { /* send write I2c addr */ /* set SCL to output 0; set SDA to input */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to input */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* get SDA data bit */ gpio_logic_value = dev->gpio_val; status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); if ((dev->gpio_val & (1 << dev->board.tuner_sda_gpio)) != 0) value |= (1 << (8 - i - 1)); dev->gpio_val = gpio_logic_value; } /* set SCL to output 0,finish the read latest SCL signal. !!!set SDA to input, never to modify SDA direction at the same times */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* store the value */ *buf = value & 0xff; return status; } int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev) { int status = 0; u32 gpio_logic_value = 0; int nCnt = 10; int nInit = nCnt; /* clock stretch; set SCL to input; set SDA to input; get SCL value till SCL = 1 */ dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); dev->gpio_dir &= ~(1 << dev->board.tuner_scl_gpio); gpio_logic_value = dev->gpio_val; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); do { msleep(2); status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); nCnt--; } while (((dev->gpio_val & (1 << dev->board.tuner_scl_gpio)) == 0) && (nCnt > 0)); if (nCnt == 0) dev_dbg(dev->dev, "No ACK after %d msec -GPIO I2C failed!", nInit * 10); /* * readAck * through clock stretch, slave has given a SCL signal, * so the SDA data can be directly read. */ status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); if ((dev->gpio_val & 1 << dev->board.tuner_sda_gpio) == 0) { dev->gpio_val = gpio_logic_value; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = 0; } else { dev->gpio_val = gpio_logic_value; dev->gpio_val |= (1 << dev->board.tuner_sda_gpio); } /* read SDA end, set the SCL to output 0, after this operation, SDA direction can be changed. */ dev->gpio_val = gpio_logic_value; dev->gpio_dir |= (1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev) { int status = 0; /* set SDA to output */ dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 0 (output); set SDA = 0 (output) */ dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 1 (output); set SDA = 0 (output) */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 0 (output); set SDA = 0 (output) */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SDA to input,and then the slave will read data from SDA. */ dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev) { int status = 0; /* set scl to output ; set sda to input */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set scl to output 0; set sda to input */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set scl to output 1; set sda to input */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } /***************************************************************************** * G P I O I2C related functions * ******************************************************************************/ /* cx231xx_gpio_i2c_read * Function to read data from gpio based I2C interface */ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len) { int status = 0; int i = 0; /* get the lock */ mutex_lock(&dev->gpio_i2c_lock); /* start */ status = cx231xx_gpio_i2c_start(dev); /* write dev_addr */ status = cx231xx_gpio_i2c_write_byte(dev, (dev_addr << 1) + 1); /* readAck */ status = cx231xx_gpio_i2c_read_ack(dev); /* read data */ for (i = 0; i < len; i++) { /* read data */ buf[i] = 0; status = cx231xx_gpio_i2c_read_byte(dev, &buf[i]); if ((i + 1) != len) { /* only do write ack if we more length */ status = cx231xx_gpio_i2c_write_ack(dev); } } /* write NAK - inform reads are complete */ status = cx231xx_gpio_i2c_write_nak(dev); /* write end */ status = cx231xx_gpio_i2c_end(dev); /* release the lock */ mutex_unlock(&dev->gpio_i2c_lock); return status; } /* cx231xx_gpio_i2c_write * Function to write data to gpio based I2C interface */ int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len) { int i = 0; /* get the lock */ mutex_lock(&dev->gpio_i2c_lock); /* start */ cx231xx_gpio_i2c_start(dev); /* write dev_addr */ cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1); /* read Ack */ cx231xx_gpio_i2c_read_ack(dev); for (i = 0; i < len; i++) { /* Write data */ cx231xx_gpio_i2c_write_byte(dev, buf[i]); /* read Ack */ cx231xx_gpio_i2c_read_ack(dev); } /* write End */ cx231xx_gpio_i2c_end(dev); /* release the lock */ mutex_unlock(&dev->gpio_i2c_lock); return 0; } |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ #include "internal.h" #include <linux/sched/mm.h> #include <trace/events/erofs.h> void erofs_unmap_metabuf(struct erofs_buf *buf) { if (!buf->base) return; kunmap_local(buf->base); buf->base = NULL; } void erofs_put_metabuf(struct erofs_buf *buf) { if (!buf->page) return; erofs_unmap_metabuf(buf); folio_put(page_folio(buf->page)); buf->page = NULL; } void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type) { pgoff_t index = offset >> PAGE_SHIFT; struct folio *folio = NULL; if (buf->page) { folio = page_folio(buf->page); if (folio_file_page(folio, index) != buf->page) erofs_unmap_metabuf(buf); } if (!folio || !folio_contains(folio, index)) { erofs_put_metabuf(buf); folio = read_mapping_folio(buf->mapping, index, buf->file); if (IS_ERR(folio)) return folio; } buf->page = folio_file_page(folio, index); if (!buf->base && type == EROFS_KMAP) buf->base = kmap_local_page(buf->page); if (type == EROFS_NO_KMAP) return NULL; return buf->base + (offset & ~PAGE_MASK); } void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); buf->file = NULL; if (erofs_is_fileio_mode(sbi)) { buf->file = sbi->dif0.file; /* some fs like FUSE needs it */ buf->mapping = buf->file->f_mapping; } else if (erofs_is_fscache_mode(sb)) buf->mapping = sbi->dif0.fscache->inode->i_mapping; else buf->mapping = sb->s_bdev->bd_mapping; } void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, erofs_off_t offset, enum erofs_kmap_type type) { erofs_init_metabuf(buf, sb); return erofs_bread(buf, offset, type); } static int erofs_map_blocks_flatmode(struct inode *inode, struct erofs_map_blocks *map) { struct erofs_inode *vi = EROFS_I(inode); struct super_block *sb = inode->i_sb; bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); erofs_blk_t lastblk = erofs_iblks(inode) - tailendpacking; map->m_flags = EROFS_MAP_MAPPED; /* no hole in flat inodes */ if (map->m_la < erofs_pos(sb, lastblk)) { map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; map->m_plen = erofs_pos(sb, lastblk) - map->m_la; } else { DBG_BUGON(!tailendpacking); map->m_pa = erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize + erofs_blkoff(sb, map->m_la); map->m_plen = inode->i_size - map->m_la; /* inline data should be located in the same meta block */ if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid); DBG_BUGON(1); return -EFSCORRUPTED; } map->m_flags |= EROFS_MAP_META; } return 0; } int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) { struct super_block *sb = inode->i_sb; struct erofs_inode *vi = EROFS_I(inode); struct erofs_inode_chunk_index *idx; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; u64 chunknr; unsigned int unit; erofs_off_t pos; void *kaddr; int err = 0; trace_erofs_map_blocks_enter(inode, map, 0); map->m_deviceid = 0; if (map->m_la >= inode->i_size) { /* leave out-of-bound access unmapped */ map->m_flags = 0; map->m_plen = map->m_llen; goto out; } if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { err = erofs_map_blocks_flatmode(inode, map); goto out; } if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) unit = sizeof(*idx); /* chunk index */ else unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ chunknr = map->m_la >> vi->chunkbits; pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, unit) + unit * chunknr; kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out; } map->m_la = chunknr << vi->chunkbits; map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, round_up(inode->i_size - map->m_la, sb->s_blocksize)); /* handle block map */ if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { __le32 *blkaddr = kaddr; if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { map->m_flags = 0; } else { map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr)); map->m_flags = EROFS_MAP_MAPPED; } goto out_unlock; } /* parse chunk indexes */ idx = kaddr; switch (le32_to_cpu(idx->blkaddr)) { case EROFS_NULL_ADDR: map->m_flags = 0; break; default: map->m_deviceid = le16_to_cpu(idx->device_id) & EROFS_SB(sb)->device_id_mask; map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr)); map->m_flags = EROFS_MAP_MAPPED; break; } out_unlock: erofs_put_metabuf(&buf); out: if (!err) map->m_llen = map->m_plen; trace_erofs_map_blocks_exit(inode, map, 0, err); return err; } static void erofs_fill_from_devinfo(struct erofs_map_dev *map, struct super_block *sb, struct erofs_device_info *dif) { map->m_sb = sb; map->m_dif = dif; map->m_bdev = NULL; if (dif->file && S_ISBLK(file_inode(dif->file)->i_mode)) map->m_bdev = file_bdev(dif->file); } int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) { struct erofs_dev_context *devs = EROFS_SB(sb)->devs; struct erofs_device_info *dif; erofs_off_t startoff, length; int id; erofs_fill_from_devinfo(map, sb, &EROFS_SB(sb)->dif0); map->m_bdev = sb->s_bdev; /* use s_bdev for the primary device */ if (map->m_deviceid) { down_read(&devs->rwsem); dif = idr_find(&devs->tree, map->m_deviceid - 1); if (!dif) { up_read(&devs->rwsem); return -ENODEV; } if (devs->flatdev) { map->m_pa += erofs_pos(sb, dif->mapped_blkaddr); up_read(&devs->rwsem); return 0; } erofs_fill_from_devinfo(map, sb, dif); up_read(&devs->rwsem); } else if (devs->extra_devices && !devs->flatdev) { down_read(&devs->rwsem); idr_for_each_entry(&devs->tree, dif, id) { if (!dif->mapped_blkaddr) continue; startoff = erofs_pos(sb, dif->mapped_blkaddr); length = erofs_pos(sb, dif->blocks); if (map->m_pa >= startoff && map->m_pa < startoff + length) { map->m_pa -= startoff; erofs_fill_from_devinfo(map, sb, dif); break; } } up_read(&devs->rwsem); } return 0; } /* * bit 30: I/O error occurred on this folio * bit 0 - 29: remaining parts to complete this folio */ #define EROFS_ONLINEFOLIO_EIO (1 << 30) void erofs_onlinefolio_init(struct folio *folio) { union { atomic_t o; void *v; } u = { .o = ATOMIC_INIT(1) }; folio->private = u.v; /* valid only if file-backed folio is locked */ } void erofs_onlinefolio_split(struct folio *folio) { atomic_inc((atomic_t *)&folio->private); } void erofs_onlinefolio_end(struct folio *folio, int err) { int orig, v; do { orig = atomic_read((atomic_t *)&folio->private); v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0); } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); if (v & ~EROFS_ONLINEFOLIO_EIO) return; folio->private = 0; folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO)); } static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { int ret; struct super_block *sb = inode->i_sb; struct erofs_map_blocks map; struct erofs_map_dev mdev; map.m_la = offset; map.m_llen = length; ret = erofs_map_blocks(inode, &map); if (ret < 0) return ret; mdev = (struct erofs_map_dev) { .m_deviceid = map.m_deviceid, .m_pa = map.m_pa, }; ret = erofs_map_dev(sb, &mdev); if (ret) return ret; iomap->offset = map.m_la; if (flags & IOMAP_DAX) iomap->dax_dev = mdev.m_dif->dax_dev; else iomap->bdev = mdev.m_bdev; iomap->length = map.m_llen; iomap->flags = 0; iomap->private = NULL; if (!(map.m_flags & EROFS_MAP_MAPPED)) { iomap->type = IOMAP_HOLE; iomap->addr = IOMAP_NULL_ADDR; if (!iomap->length) iomap->length = length; return 0; } if (map.m_flags & EROFS_MAP_META) { void *ptr; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; iomap->type = IOMAP_INLINE; ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP); if (IS_ERR(ptr)) return PTR_ERR(ptr); iomap->inline_data = ptr; iomap->private = buf.base; } else { iomap->type = IOMAP_MAPPED; iomap->addr = mdev.m_pa; if (flags & IOMAP_DAX) iomap->addr += mdev.m_dif->dax_part_off; } return 0; } static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned int flags, struct iomap *iomap) { void *ptr = iomap->private; if (ptr) { struct erofs_buf buf = { .page = kmap_to_page(ptr), .base = ptr, }; DBG_BUGON(iomap->type != IOMAP_INLINE); erofs_put_metabuf(&buf); } else { DBG_BUGON(iomap->type == IOMAP_INLINE); } return written; } static const struct iomap_ops erofs_iomap_ops = { .iomap_begin = erofs_iomap_begin, .iomap_end = erofs_iomap_end, }; int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP return iomap_fiemap(inode, fieinfo, start, len, &z_erofs_iomap_report_ops); #else return -EOPNOTSUPP; #endif } return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops); } /* * since we dont have write or truncate flows, so no inode * locking needs to be held at the moment. */ static int erofs_read_folio(struct file *file, struct folio *folio) { return iomap_read_folio(folio, &erofs_iomap_ops); } static void erofs_readahead(struct readahead_control *rac) { return iomap_readahead(rac, &erofs_iomap_ops); } static sector_t erofs_bmap(struct address_space *mapping, sector_t block) { return iomap_bmap(mapping, block, &erofs_iomap_ops); } static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); /* no need taking (shared) inode lock since it's a ro filesystem */ if (!iov_iter_count(to)) return 0; #ifdef CONFIG_FS_DAX if (IS_DAX(inode)) return dax_iomap_rw(iocb, to, &erofs_iomap_ops); #endif if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) return iomap_dio_rw(iocb, to, &erofs_iomap_ops, NULL, 0, NULL, 0); return filemap_read(iocb, to, 0); } /* for uncompressed (aligned) files and raw access for other files */ const struct address_space_operations erofs_aops = { .read_folio = erofs_read_folio, .readahead = erofs_readahead, .bmap = erofs_bmap, .direct_IO = noop_direct_IO, .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, }; #ifdef CONFIG_FS_DAX static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf, unsigned int order) { return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops); } static vm_fault_t erofs_dax_fault(struct vm_fault *vmf) { return erofs_dax_huge_fault(vmf, 0); } static const struct vm_operations_struct erofs_dax_vm_ops = { .fault = erofs_dax_fault, .huge_fault = erofs_dax_huge_fault, }; static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) { if (!IS_DAX(file_inode(file))) return generic_file_readonly_mmap(file, vma); if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -EINVAL; vma->vm_ops = &erofs_dax_vm_ops; vm_flags_set(vma, VM_HUGEPAGE); return 0; } #else #define erofs_file_mmap generic_file_readonly_mmap #endif static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; const struct iomap_ops *ops = &erofs_iomap_ops; if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) #ifdef CONFIG_EROFS_FS_ZIP ops = &z_erofs_iomap_report_ops; #else return generic_file_llseek(file, offset, whence); #endif if (whence == SEEK_HOLE) offset = iomap_seek_hole(inode, offset, ops); else if (whence == SEEK_DATA) offset = iomap_seek_data(inode, offset, ops); else return generic_file_llseek(file, offset, whence); if (offset < 0) return offset; return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); } const struct file_operations erofs_file_fops = { .llseek = erofs_file_llseek, .read_iter = erofs_file_read_iter, .mmap = erofs_file_mmap, .get_unmapped_area = thp_get_unmapped_area, .splice_read = filemap_splice_read, }; |
2 2 462 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Dynamic queue limits (dql) - Definitions * * Copyright (c) 2011, Tom Herbert <therbert@google.com> * * This header file contains the definitions for dynamic queue limits (dql). * dql would be used in conjunction with a producer/consumer type queue * (possibly a HW queue). Such a queue would have these general properties: * * 1) Objects are queued up to some limit specified as number of objects. * 2) Periodically a completion process executes which retires consumed * objects. * 3) Starvation occurs when limit has been reached, all queued data has * actually been consumed, but completion processing has not yet run * so queuing new data is blocked. * 4) Minimizing the amount of queued data is desirable. * * The goal of dql is to calculate the limit as the minimum number of objects * needed to prevent starvation. * * The primary functions of dql are: * dql_queued - called when objects are enqueued to record number of objects * dql_avail - returns how many objects are available to be queued based * on the object limit and how many objects are already enqueued * dql_completed - called at completion time to indicate how many objects * were retired from the queue * * The dql implementation does not implement any locking for the dql data * structures, the higher layer should provide this. dql_queued should * be serialized to prevent concurrent execution of the function; this * is also true for dql_completed. However, dql_queued and dlq_completed can * be executed concurrently (i.e. they can be protected by different locks). */ #ifndef _LINUX_DQL_H #define _LINUX_DQL_H #ifdef __KERNEL__ #include <linux/bitops.h> #include <asm/bug.h> #define DQL_HIST_LEN 4 #define DQL_HIST_ENT(dql, idx) ((dql)->history[(idx) % DQL_HIST_LEN]) struct dql { /* Fields accessed in enqueue path (dql_queued) */ unsigned int num_queued; /* Total ever queued */ unsigned int adj_limit; /* limit + num_completed */ unsigned int last_obj_cnt; /* Count at last queuing */ /* Stall threshold (in jiffies), defined by user */ unsigned short stall_thrs; unsigned long history_head; /* top 58 bits of jiffies */ /* stall entries, a bit per entry */ unsigned long history[DQL_HIST_LEN]; /* Fields accessed only by completion path (dql_completed) */ unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */ unsigned int num_completed; /* Total ever completed */ unsigned int prev_ovlimit; /* Previous over limit */ unsigned int prev_num_queued; /* Previous queue total */ unsigned int prev_last_obj_cnt; /* Previous queuing cnt */ unsigned int lowest_slack; /* Lowest slack found */ unsigned long slack_start_time; /* Time slacks seen */ /* Configuration */ unsigned int max_limit; /* Max limit */ unsigned int min_limit; /* Minimum limit */ unsigned int slack_hold_time; /* Time to measure slack */ /* Longest stall detected, reported to user */ unsigned short stall_max; unsigned long last_reap; /* Last reap (in jiffies) */ unsigned long stall_cnt; /* Number of stalls */ }; /* Set some static maximums */ #define DQL_MAX_OBJECT (UINT_MAX / 16) #define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) /* Populate the bitmap to be processed later in dql_check_stall() */ static inline void dql_queue_stall(struct dql *dql) { unsigned long map, now, now_hi, i; now = jiffies; now_hi = now / BITS_PER_LONG; /* The following code set a bit in the ring buffer, where each * bit trackes time the packet was queued. The dql->history buffer * tracks DQL_HIST_LEN * BITS_PER_LONG time (jiffies) slot */ if (unlikely(now_hi != dql->history_head)) { /* About to reuse slots, clear them */ for (i = 0; i < DQL_HIST_LEN; i++) { /* Multiplication masks high bits */ if (now_hi * BITS_PER_LONG == (dql->history_head + i) * BITS_PER_LONG) break; DQL_HIST_ENT(dql, dql->history_head + i + 1) = 0; } /* pairs with smp_rmb() in dql_check_stall() */ smp_wmb(); WRITE_ONCE(dql->history_head, now_hi); } /* __set_bit() does not guarantee WRITE_ONCE() semantics */ map = DQL_HIST_ENT(dql, now_hi); /* Populate the history with an entry (bit) per queued */ if (!(map & BIT_MASK(now))) WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now)); } /* * Record number of objects queued. Assumes that caller has already checked * availability in the queue with dql_avail. */ static inline void dql_queued(struct dql *dql, unsigned int count) { if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) return; WRITE_ONCE(dql->last_obj_cnt, count); /* We want to force a write first, so that cpu do not attempt * to get cache line containing last_obj_cnt, num_queued, adj_limit * in Shared state, but directly does a Request For Ownership * It is only a hint, we use barrier() only. */ barrier(); dql->num_queued += count; /* Only populate stall information if the threshold is set */ if (READ_ONCE(dql->stall_thrs)) dql_queue_stall(dql); } /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); } /* Record number of completed objects and recalculate the limit. */ void dql_completed(struct dql *dql, unsigned int count); /* Reset dql state */ void dql_reset(struct dql *dql); /* Initialize dql state */ void dql_init(struct dql *dql, unsigned int hold_time); #endif /* _KERNEL_ */ #endif /* _LINUX_DQL_H */ |
132 132 162 132 26 396 398 396 132 16 133 132 130 15 132 117 122 14 119 107 132 133 133 131 6 6 6 6 6 6 5 6 138 139 132 138 269 148 148 119 269 268 270 268 1 267 266 177 268 269 1 1 120 117 117 120 120 55 120 26 26 26 8 8 26 26 26 26 26 119 120 120 120 120 120 66 54 66 41 120 119 120 100 22 120 120 117 3 120 120 100 120 55 120 120 11 11 11 26 8 26 2473 2476 2477 2472 2477 3 3 3 3 3 3 3 23 23 23 23 24 24 24 24 10 16 24 24 1 2 2 2 2 1907 1910 1910 1906 3 1910 1903 1908 3445 3449 3448 3448 3446 3446 3447 89 3448 3446 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 | // SPDX-License-Identifier: GPL-2.0-only /* * fs/kernfs/file.c - kernfs file implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> */ #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/pagemap.h> #include <linux/sched/mm.h> #include <linux/fsnotify.h> #include <linux/uio.h> #include "kernfs-internal.h" struct kernfs_open_node { struct rcu_head rcu_head; atomic_t event; wait_queue_head_t poll; struct list_head files; /* goes through kernfs_open_file.list */ unsigned int nr_mmapped; unsigned int nr_to_release; }; /* * kernfs_notify() may be called from any context and bounces notifications * through a work item. To minimize space overhead in kernfs_node, the * pending queue is implemented as a singly linked list of kernfs_nodes. * The list is terminated with the self pointer so that whether a * kernfs_node is on the list or not can be determined by testing the next * pointer for %NULL. */ #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list) static DEFINE_SPINLOCK(kernfs_notify_lock); static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL; static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn) { int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS); return &kernfs_locks->open_file_mutex[idx]; } static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn) { struct mutex *lock; lock = kernfs_open_file_mutex_ptr(kn); mutex_lock(lock); return lock; } /** * of_on - Get the kernfs_open_node of the specified kernfs_open_file * @of: target kernfs_open_file * * Return: the kernfs_open_node of the kernfs_open_file */ static struct kernfs_open_node *of_on(struct kernfs_open_file *of) { return rcu_dereference_protected(of->kn->attr.open, !list_empty(&of->list)); } /** * kernfs_deref_open_node_locked - Get kernfs_open_node corresponding to @kn * * @kn: target kernfs_node. * * Fetch and return ->attr.open of @kn when caller holds the * kernfs_open_file_mutex_ptr(kn). * * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when * the caller guarantees that this mutex is being held, other updaters can't * change ->attr.open and this means that we can safely deref ->attr.open * outside RCU read-side critical section. * * The caller needs to make sure that kernfs_open_file_mutex is held. * * Return: @kn->attr.open when kernfs_open_file_mutex is held. */ static struct kernfs_open_node * kernfs_deref_open_node_locked(struct kernfs_node *kn) { return rcu_dereference_protected(kn->attr.open, lockdep_is_held(kernfs_open_file_mutex_ptr(kn))); } static struct kernfs_open_file *kernfs_of(struct file *file) { return ((struct seq_file *)file->private_data)->private; } /* * Determine the kernfs_ops for the given kernfs_node. This function must * be called while holding an active reference. */ static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn) { if (kn->flags & KERNFS_LOCKDEP) lockdep_assert_held(kn); return kn->attr.ops; } /* * As kernfs_seq_stop() is also called after kernfs_seq_start() or * kernfs_seq_next() failure, it needs to distinguish whether it's stopping * a seq_file iteration which is fully initialized with an active reference * or an aborted kernfs_seq_start() due to get_active failure. The * position pointer is the only context for each seq_file iteration and * thus the stop condition should be encoded in it. As the return value is * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable * choice to indicate get_active failure. * * Unfortunately, this is complicated due to the optional custom seq_file * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop() * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or * custom seq_file operations and thus can't decide whether put_active * should be performed or not only on ERR_PTR(-ENODEV). * * This is worked around by factoring out the custom seq_stop() and * put_active part into kernfs_seq_stop_active(), skipping it from * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures * that kernfs_seq_stop_active() is skipped only after get_active failure. */ static void kernfs_seq_stop_active(struct seq_file *sf, void *v) { struct kernfs_open_file *of = sf->private; const struct kernfs_ops *ops = kernfs_ops(of->kn); if (ops->seq_stop) ops->seq_stop(sf, v); kernfs_put_active(of->kn); } static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) { struct kernfs_open_file *of = sf->private; const struct kernfs_ops *ops; /* * @of->mutex nests outside active ref and is primarily to ensure that * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) return ERR_PTR(-ENODEV); ops = kernfs_ops(of->kn); if (ops->seq_start) { void *next = ops->seq_start(sf, ppos); /* see the comment above kernfs_seq_stop_active() */ if (next == ERR_PTR(-ENODEV)) kernfs_seq_stop_active(sf, next); return next; } return single_start(sf, ppos); } static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos) { struct kernfs_open_file *of = sf->private; const struct kernfs_ops *ops = kernfs_ops(of->kn); if (ops->seq_next) { void *next = ops->seq_next(sf, v, ppos); /* see the comment above kernfs_seq_stop_active() */ if (next == ERR_PTR(-ENODEV)) kernfs_seq_stop_active(sf, next); return next; } else { /* * The same behavior and code as single_open(), always * terminate after the initial read. */ ++*ppos; return NULL; } } static void kernfs_seq_stop(struct seq_file *sf, void *v) { struct kernfs_open_file *of = sf->private; if (v != ERR_PTR(-ENODEV)) kernfs_seq_stop_active(sf, v); mutex_unlock(&of->mutex); } static int kernfs_seq_show(struct seq_file *sf, void *v) { struct kernfs_open_file *of = sf->private; of->event = atomic_read(&of_on(of)->event); return of->kn->attr.ops->seq_show(sf, v); } static const struct seq_operations kernfs_seq_ops = { .start = kernfs_seq_start, .next = kernfs_seq_next, .stop = kernfs_seq_stop, .show = kernfs_seq_show, }; /* * As reading a bin file can have side-effects, the exact offset and bytes * specified in read(2) call should be passed to the read callback making * it difficult to use seq_file. Implement simplistic custom buffering for * bin files. */ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE); const struct kernfs_ops *ops; char *buf; buf = of->prealloc_buf; if (buf) mutex_lock(&of->prealloc_mutex); else buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; /* * @of->mutex nests outside active ref and is used both to ensure that * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { len = -ENODEV; mutex_unlock(&of->mutex); goto out_free; } of->event = atomic_read(&of_on(of)->event); ops = kernfs_ops(of->kn); if (ops->read) len = ops->read(of, buf, len, iocb->ki_pos); else len = -EINVAL; kernfs_put_active(of->kn); mutex_unlock(&of->mutex); if (len < 0) goto out_free; if (copy_to_iter(buf, len, iter) != len) { len = -EFAULT; goto out_free; } iocb->ki_pos += len; out_free: if (buf == of->prealloc_buf) mutex_unlock(&of->prealloc_mutex); else kfree(buf); return len; } static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter) { if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW) return seq_read_iter(iocb, iter); return kernfs_file_read_iter(iocb, iter); } /* * Copy data in from userland and pass it to the matching kernfs write * operation. * * There is no easy way for us to know if userspace is only doing a partial * write, so we don't support them. We expect the entire buffer to come on * the first write. Hint: if you're writing a value, first read the file, * modify only the value you're changing, then write entire buffer * back. */ static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) { struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); ssize_t len = iov_iter_count(iter); const struct kernfs_ops *ops; char *buf; if (of->atomic_write_len) { if (len > of->atomic_write_len) return -E2BIG; } else { len = min_t(size_t, len, PAGE_SIZE); } buf = of->prealloc_buf; if (buf) mutex_lock(&of->prealloc_mutex); else buf = kmalloc(len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; if (copy_from_iter(buf, len, iter) != len) { len = -EFAULT; goto out_free; } buf[len] = '\0'; /* guarantee string termination */ /* * @of->mutex nests outside active ref and is used both to ensure that * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { mutex_unlock(&of->mutex); len = -ENODEV; goto out_free; } ops = kernfs_ops(of->kn); if (ops->write) len = ops->write(of, buf, len, iocb->ki_pos); else len = -EINVAL; kernfs_put_active(of->kn); mutex_unlock(&of->mutex); if (len > 0) iocb->ki_pos += len; out_free: if (buf == of->prealloc_buf) mutex_unlock(&of->prealloc_mutex); else kfree(buf); return len; } static void kernfs_vma_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct kernfs_open_file *of = kernfs_of(file); if (!of->vm_ops) return; if (!kernfs_get_active(of->kn)) return; if (of->vm_ops->open) of->vm_ops->open(vma); kernfs_put_active(of->kn); } static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) { struct file *file = vmf->vma->vm_file; struct kernfs_open_file *of = kernfs_of(file); vm_fault_t ret; if (!of->vm_ops) return VM_FAULT_SIGBUS; if (!kernfs_get_active(of->kn)) return VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS; if (of->vm_ops->fault) ret = of->vm_ops->fault(vmf); kernfs_put_active(of->kn); return ret; } static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) { struct file *file = vmf->vma->vm_file; struct kernfs_open_file *of = kernfs_of(file); vm_fault_t ret; if (!of->vm_ops) return VM_FAULT_SIGBUS; if (!kernfs_get_active(of->kn)) return VM_FAULT_SIGBUS; ret = 0; if (of->vm_ops->page_mkwrite) ret = of->vm_ops->page_mkwrite(vmf); else file_update_time(file); kernfs_put_active(of->kn); return ret; } static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { struct file *file = vma->vm_file; struct kernfs_open_file *of = kernfs_of(file); int ret; if (!of->vm_ops) return -EINVAL; if (!kernfs_get_active(of->kn)) return -EINVAL; ret = -EINVAL; if (of->vm_ops->access) ret = of->vm_ops->access(vma, addr, buf, len, write); kernfs_put_active(of->kn); return ret; } static const struct vm_operations_struct kernfs_vm_ops = { .open = kernfs_vma_open, .fault = kernfs_vma_fault, .page_mkwrite = kernfs_vma_page_mkwrite, .access = kernfs_vma_access, }; static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) { struct kernfs_open_file *of = kernfs_of(file); const struct kernfs_ops *ops; int rc; /* * mmap path and of->mutex are prone to triggering spurious lockdep * warnings and we don't want to add spurious locking dependency * between the two. Check whether mmap is actually implemented * without grabbing @of->mutex by testing HAS_MMAP flag. See the * comment in kernfs_fop_open() for more details. */ if (!(of->kn->flags & KERNFS_HAS_MMAP)) return -ENODEV; mutex_lock(&of->mutex); rc = -ENODEV; if (!kernfs_get_active(of->kn)) goto out_unlock; ops = kernfs_ops(of->kn); rc = ops->mmap(of, vma); if (rc) goto out_put; /* * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup() * to satisfy versions of X which crash if the mmap fails: that * substitutes a new vm_file, and we don't then want bin_vm_ops. */ if (vma->vm_file != file) goto out_put; rc = -EINVAL; if (of->mmapped && of->vm_ops != vma->vm_ops) goto out_put; /* * It is not possible to successfully wrap close. * So error if someone is trying to use close. */ if (vma->vm_ops && vma->vm_ops->close) goto out_put; rc = 0; if (!of->mmapped) { of->mmapped = true; of_on(of)->nr_mmapped++; of->vm_ops = vma->vm_ops; } vma->vm_ops = &kernfs_vm_ops; out_put: kernfs_put_active(of->kn); out_unlock: mutex_unlock(&of->mutex); return rc; } /** * kernfs_get_open_node - get or create kernfs_open_node * @kn: target kernfs_node * @of: kernfs_open_file for this instance of open * * If @kn->attr.open exists, increment its reference count; otherwise, * create one. @of is chained to the files list. * * Locking: * Kernel thread context (may sleep). * * Return: * %0 on success, -errno on failure. */ static int kernfs_get_open_node(struct kernfs_node *kn, struct kernfs_open_file *of) { struct kernfs_open_node *on; struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); on = kernfs_deref_open_node_locked(kn); if (!on) { /* not there, initialize a new one */ on = kzalloc(sizeof(*on), GFP_KERNEL); if (!on) { mutex_unlock(mutex); return -ENOMEM; } atomic_set(&on->event, 1); init_waitqueue_head(&on->poll); INIT_LIST_HEAD(&on->files); rcu_assign_pointer(kn->attr.open, on); } list_add_tail(&of->list, &on->files); if (kn->flags & KERNFS_HAS_RELEASE) on->nr_to_release++; mutex_unlock(mutex); return 0; } /** * kernfs_unlink_open_file - Unlink @of from @kn. * * @kn: target kernfs_node * @of: associated kernfs_open_file * @open_failed: ->open() failed, cancel ->release() * * Unlink @of from list of @kn's associated open files. If list of * associated open files becomes empty, disassociate and free * kernfs_open_node. * * LOCKING: * None. */ static void kernfs_unlink_open_file(struct kernfs_node *kn, struct kernfs_open_file *of, bool open_failed) { struct kernfs_open_node *on; struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); on = kernfs_deref_open_node_locked(kn); if (!on) { mutex_unlock(mutex); return; } if (of) { if (kn->flags & KERNFS_HAS_RELEASE) { WARN_ON_ONCE(of->released == open_failed); if (open_failed) on->nr_to_release--; } if (of->mmapped) on->nr_mmapped--; list_del(&of->list); } if (list_empty(&on->files)) { rcu_assign_pointer(kn->attr.open, NULL); kfree_rcu(on, rcu_head); } mutex_unlock(mutex); } static int kernfs_fop_open(struct inode *inode, struct file *file) { struct kernfs_node *kn = inode->i_private; struct kernfs_root *root = kernfs_root(kn); const struct kernfs_ops *ops; struct kernfs_open_file *of; bool has_read, has_write, has_mmap; int error = -EACCES; if (!kernfs_get_active(kn)) return -ENODEV; ops = kernfs_ops(kn); has_read = ops->seq_show || ops->read || ops->mmap; has_write = ops->write || ops->mmap; has_mmap = ops->mmap; /* see the flag definition for details */ if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { if ((file->f_mode & FMODE_WRITE) && (!(inode->i_mode & S_IWUGO) || !has_write)) goto err_out; if ((file->f_mode & FMODE_READ) && (!(inode->i_mode & S_IRUGO) || !has_read)) goto err_out; } /* allocate a kernfs_open_file for the file */ error = -ENOMEM; of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL); if (!of) goto err_out; /* * The following is done to give a different lockdep key to * @of->mutex for files which implement mmap. This is a rather * crude way to avoid false positive lockdep warning around * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under * which mm->mmap_lock nests, while holding @of->mutex. As each * open file has a separate mutex, it's okay as long as those don't * happen on the same file. At this point, we can't easily give * each file a separate locking class. Let's differentiate on * whether the file has mmap or not for now. * * For similar reasons, writable and readonly files are given different * lockdep key, because the writable file /sys/power/resume may call vfs * lookup helpers for arbitrary paths and readonly files can be read by * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs. * * All three cases look the same. They're supposed to * look that way and give @of->mutex different static lockdep keys. */ if (has_mmap) mutex_init(&of->mutex); else if (file->f_mode & FMODE_WRITE) mutex_init(&of->mutex); else mutex_init(&of->mutex); of->kn = kn; of->file = file; /* * Write path needs to atomic_write_len outside active reference. * Cache it in open_file. See kernfs_fop_write_iter() for details. */ of->atomic_write_len = ops->atomic_write_len; error = -EINVAL; /* * ->seq_show is incompatible with ->prealloc, * as seq_read does its own allocation. * ->read must be used instead. */ if (ops->prealloc && ops->seq_show) goto err_free; if (ops->prealloc) { int len = of->atomic_write_len ?: PAGE_SIZE; of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL); error = -ENOMEM; if (!of->prealloc_buf) goto err_free; mutex_init(&of->prealloc_mutex); } /* * Always instantiate seq_file even if read access doesn't use * seq_file or is not requested. This unifies private data access * and readable regular files are the vast majority anyway. */ if (ops->seq_show) error = seq_open(file, &kernfs_seq_ops); else error = seq_open(file, NULL); if (error) goto err_free; of->seq_file = file->private_data; of->seq_file->private = of; /* seq_file clears PWRITE unconditionally, restore it if WRITE */ if (file->f_mode & FMODE_WRITE) file->f_mode |= FMODE_PWRITE; /* make sure we have open node struct */ error = kernfs_get_open_node(kn, of); if (error) goto err_seq_release; if (ops->open) { /* nobody has access to @of yet, skip @of->mutex */ error = ops->open(of); if (error) goto err_put_node; } /* open succeeded, put active references */ kernfs_put_active(kn); return 0; err_put_node: kernfs_unlink_open_file(kn, of, true); err_seq_release: seq_release(inode, file); err_free: kfree(of->prealloc_buf); kfree(of); err_out: kernfs_put_active(kn); return error; } /* used from release/drain to ensure that ->release() is called exactly once */ static void kernfs_release_file(struct kernfs_node *kn, struct kernfs_open_file *of) { /* * @of is guaranteed to have no other file operations in flight and * we just want to synchronize release and drain paths. * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used * here because drain path may be called from places which can * cause circular dependency. */ lockdep_assert_held(kernfs_open_file_mutex_ptr(kn)); if (!of->released) { /* * A file is never detached without being released and we * need to be able to release files which are deactivated * and being drained. Don't use kernfs_ops(). */ kn->attr.ops->release(of); of->released = true; of_on(of)->nr_to_release--; } } static int kernfs_fop_release(struct inode *inode, struct file *filp) { struct kernfs_node *kn = inode->i_private; struct kernfs_open_file *of = kernfs_of(filp); if (kn->flags & KERNFS_HAS_RELEASE) { struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); kernfs_release_file(kn, of); mutex_unlock(mutex); } kernfs_unlink_open_file(kn, of, false); seq_release(inode, filp); kfree(of->prealloc_buf); kfree(of); return 0; } bool kernfs_should_drain_open_files(struct kernfs_node *kn) { struct kernfs_open_node *on; bool ret; /* * @kn being deactivated guarantees that @kn->attr.open can't change * beneath us making the lockless test below safe. */ WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); rcu_read_lock(); on = rcu_dereference(kn->attr.open); ret = on && (on->nr_mmapped || on->nr_to_release); rcu_read_unlock(); return ret; } void kernfs_drain_open_files(struct kernfs_node *kn) { struct kernfs_open_node *on; struct kernfs_open_file *of; struct mutex *mutex; mutex = kernfs_open_file_mutex_lock(kn); on = kernfs_deref_open_node_locked(kn); if (!on) { mutex_unlock(mutex); return; } list_for_each_entry(of, &on->files, list) { struct inode *inode = file_inode(of->file); if (of->mmapped) { unmap_mapping_range(inode->i_mapping, 0, 0, 1); of->mmapped = false; on->nr_mmapped--; } if (kn->flags & KERNFS_HAS_RELEASE) kernfs_release_file(kn, of); } WARN_ON_ONCE(on->nr_mmapped || on->nr_to_release); mutex_unlock(mutex); } /* * Kernfs attribute files are pollable. The idea is that you read * the content and then you use 'poll' or 'select' to wait for * the content to change. When the content changes (assuming the * manager for the kobject supports notification), poll will * return EPOLLERR|EPOLLPRI, and select will return the fd whether * it is waiting for read, write, or exceptions. * Once poll/select indicates that the value has changed, you * need to close and re-open the file, or seek to 0 and read again. * Reminder: this only works for attributes which actively support * it, and it is not possible to test an attribute from userspace * to see if it supports poll (Neither 'poll' nor 'select' return * an appropriate error code). When in doubt, set a suitable timeout value. */ __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait) { struct kernfs_open_node *on = of_on(of); poll_wait(of->file, &on->poll, wait); if (of->event != atomic_read(&on->event)) return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; return DEFAULT_POLLMASK; } static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) { struct kernfs_open_file *of = kernfs_of(filp); struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); __poll_t ret; if (!kernfs_get_active(kn)) return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; if (kn->attr.ops->poll) ret = kn->attr.ops->poll(of, wait); else ret = kernfs_generic_poll(of, wait); kernfs_put_active(kn); return ret; } static loff_t kernfs_fop_llseek(struct file *file, loff_t offset, int whence) { struct kernfs_open_file *of = kernfs_of(file); const struct kernfs_ops *ops; loff_t ret; /* * @of->mutex nests outside active ref and is primarily to ensure that * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { mutex_unlock(&of->mutex); return -ENODEV; } ops = kernfs_ops(of->kn); if (ops->llseek) ret = ops->llseek(of, offset, whence); else ret = generic_file_llseek(file, offset, whence); kernfs_put_active(of->kn); mutex_unlock(&of->mutex); return ret; } static void kernfs_notify_workfn(struct work_struct *work) { struct kernfs_node *kn; struct kernfs_super_info *info; struct kernfs_root *root; repeat: /* pop one off the notify_list */ spin_lock_irq(&kernfs_notify_lock); kn = kernfs_notify_list; if (kn == KERNFS_NOTIFY_EOL) { spin_unlock_irq(&kernfs_notify_lock); return; } kernfs_notify_list = kn->attr.notify_next; kn->attr.notify_next = NULL; spin_unlock_irq(&kernfs_notify_lock); root = kernfs_root(kn); /* kick fsnotify */ down_read(&root->kernfs_supers_rwsem); list_for_each_entry(info, &kernfs_root(kn)->supers, node) { struct kernfs_node *parent; struct inode *p_inode = NULL; struct inode *inode; struct qstr name; /* * We want fsnotify_modify() on @kn but as the * modifications aren't originating from userland don't * have the matching @file available. Look up the inodes * and generate the events manually. */ inode = ilookup(info->sb, kernfs_ino(kn)); if (!inode) continue; name = QSTR(kn->name); parent = kernfs_get_parent(kn); if (parent) { p_inode = ilookup(info->sb, kernfs_ino(parent)); if (p_inode) { fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD, inode, FSNOTIFY_EVENT_INODE, p_inode, &name, inode, 0); iput(p_inode); } kernfs_put(parent); } if (!p_inode) fsnotify_inode(inode, FS_MODIFY); iput(inode); } up_read(&root->kernfs_supers_rwsem); kernfs_put(kn); goto repeat; } /** * kernfs_notify - notify a kernfs file * @kn: file to notify * * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any * context. */ void kernfs_notify(struct kernfs_node *kn) { static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn); unsigned long flags; struct kernfs_open_node *on; if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) return; /* kick poll immediately */ rcu_read_lock(); on = rcu_dereference(kn->attr.open); if (on) { atomic_inc(&on->event); wake_up_interruptible(&on->poll); } rcu_read_unlock(); /* schedule work to kick fsnotify */ spin_lock_irqsave(&kernfs_notify_lock, flags); if (!kn->attr.notify_next) { kernfs_get(kn); kn->attr.notify_next = kernfs_notify_list; kernfs_notify_list = kn; schedule_work(&kernfs_notify_work); } spin_unlock_irqrestore(&kernfs_notify_lock, flags); } EXPORT_SYMBOL_GPL(kernfs_notify); const struct file_operations kernfs_file_fops = { .read_iter = kernfs_fop_read_iter, .write_iter = kernfs_fop_write_iter, .llseek = kernfs_fop_llseek, .mmap = kernfs_fop_mmap, .open = kernfs_fop_open, .release = kernfs_fop_release, .poll = kernfs_fop_poll, .fsync = noop_fsync, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, }; /** * __kernfs_create_file - kernfs internal function to create a file * @parent: directory to create the file in * @name: name of the file * @mode: mode of the file * @uid: uid of the file * @gid: gid of the file * @size: size of the file * @ops: kernfs operations for the file * @priv: private data for the file * @ns: optional namespace tag of the file * @key: lockdep key for the file's active_ref, %NULL to disable lockdep * * Return: the created node on success, ERR_PTR() value on error. */ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, struct lock_class_key *key) { struct kernfs_node *kn; unsigned flags; int rc; flags = KERNFS_FILE; kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, uid, gid, flags); if (!kn) return ERR_PTR(-ENOMEM); kn->attr.ops = ops; kn->attr.size = size; kn->ns = ns; kn->priv = priv; #ifdef CONFIG_DEBUG_LOCK_ALLOC if (key) { lockdep_init_map(&kn->dep_map, "kn->active", key, 0); kn->flags |= KERNFS_LOCKDEP; } #endif /* * kn->attr.ops is accessible only while holding active ref. We * need to know whether some ops are implemented outside active * ref. Cache their existence in flags. */ if (ops->seq_show) kn->flags |= KERNFS_HAS_SEQ_SHOW; if (ops->mmap) kn->flags |= KERNFS_HAS_MMAP; if (ops->release) kn->flags |= KERNFS_HAS_RELEASE; rc = kernfs_add_one(kn); if (rc) { kernfs_put(kn); return ERR_PTR(rc); } return kn; } |
5 1 1 1 5 5 1 1 5 5 5 4 1 3 3 2 3 1 2 2 5 2 2 2 2 1 2 2 2 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 | // SPDX-License-Identifier: GPL-2.0-or-later /* * GSPCA Endpoints (formerly known as AOX) se401 USB Camera sub Driver * * Copyright (C) 2011 Hans de Goede <hdegoede@redhat.com> * * Based on the v4l1 se401 driver which is: * * Copyright (c) 2000 Jeroen B. Vreeken (pe1rxq@amsat.org) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "se401" #define BULK_SIZE 4096 #define PACKET_SIZE 1024 #define READ_REQ_SIZE 64 #define MAX_MODES ((READ_REQ_SIZE - 6) / 4) /* The se401 compression algorithm uses a fixed quant factor, which can be configured by setting the high nibble of the SE401_OPERATINGMODE feature. This needs to exactly match what is in libv4l! */ #define SE401_QUANT_FACT 8 #include <linux/input.h> #include <linux/slab.h> #include "gspca.h" #include "se401.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Endpoints se401"); MODULE_LICENSE("GPL"); /* exposure change state machine states */ enum { EXPO_CHANGED, EXPO_DROP_FRAME, EXPO_NO_CHANGE, }; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct { /* exposure/freq control cluster */ struct v4l2_ctrl *exposure; struct v4l2_ctrl *freq; }; bool has_brightness; struct v4l2_pix_format fmts[MAX_MODES]; int pixels_read; int packet_read; u8 packet[PACKET_SIZE]; u8 restart_stream; u8 button_state; u8 resetlevel; u8 resetlevel_frame_count; int resetlevel_adjust_dir; int expo_change_state; }; static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value, int silent) { int err; if (gspca_dev->usb_err < 0) return; err = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, NULL, 0, 1000); if (err < 0) { if (!silent) pr_err("write req failed req %#04x val %#04x error %d\n", req, value, err); gspca_dev->usb_err = err; } } static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent) { int err; if (gspca_dev->usb_err < 0) return; if (USB_BUF_SZ < READ_REQ_SIZE) { pr_err("USB_BUF_SZ too small!!\n"); gspca_dev->usb_err = -ENOBUFS; return; } err = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, gspca_dev->usb_buf, READ_REQ_SIZE, 1000); if (err < 0) { if (!silent) pr_err("read req failed req %#04x error %d\n", req, err); gspca_dev->usb_err = err; /* * Make sure the buffer is zeroed to avoid uninitialized * values. */ memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE); } } static void se401_set_feature(struct gspca_dev *gspca_dev, u16 selector, u16 param) { int err; if (gspca_dev->usb_err < 0) return; err = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), SE401_REQ_SET_EXT_FEATURE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, param, selector, NULL, 0, 1000); if (err < 0) { pr_err("set feature failed sel %#04x param %#04x error %d\n", selector, param, err); gspca_dev->usb_err = err; } } static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector) { int err; if (gspca_dev->usb_err < 0) return gspca_dev->usb_err; if (USB_BUF_SZ < 2) { pr_err("USB_BUF_SZ too small!!\n"); gspca_dev->usb_err = -ENOBUFS; return gspca_dev->usb_err; } err = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), SE401_REQ_GET_EXT_FEATURE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, selector, gspca_dev->usb_buf, 2, 1000); if (err < 0) { pr_err("get feature failed sel %#04x error %d\n", selector, err); gspca_dev->usb_err = err; return err; } return gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8); } static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { /* HDG: this does not seem to do anything on my cam */ se401_write_req(gspca_dev, SE401_REQ_SET_BRT, val, 0); } static void setgain(struct gspca_dev *gspca_dev, s32 val) { u16 gain = 63 - val; /* red color gain */ se401_set_feature(gspca_dev, HV7131_REG_ARCG, gain); /* green color gain */ se401_set_feature(gspca_dev, HV7131_REG_AGCG, gain); /* blue color gain */ se401_set_feature(gspca_dev, HV7131_REG_ABCG, gain); } static void setexposure(struct gspca_dev *gspca_dev, s32 val, s32 freq) { struct sd *sd = (struct sd *) gspca_dev; int integration = val << 6; u8 expose_h, expose_m, expose_l; /* Do this before the set_feature calls, for proper timing wrt the interrupt driven pkt_scan. Note we may still race but that is not a big issue, the expo change state machine is merely for avoiding underexposed frames getting send out, if one sneaks through so be it */ sd->expo_change_state = EXPO_CHANGED; if (freq == V4L2_CID_POWER_LINE_FREQUENCY_50HZ) integration = integration - integration % 106667; if (freq == V4L2_CID_POWER_LINE_FREQUENCY_60HZ) integration = integration - integration % 88889; expose_h = (integration >> 16); expose_m = (integration >> 8); expose_l = integration; /* integration time low */ se401_set_feature(gspca_dev, HV7131_REG_TITL, expose_l); /* integration time mid */ se401_set_feature(gspca_dev, HV7131_REG_TITM, expose_m); /* integration time high */ se401_set_feature(gspca_dev, HV7131_REG_TITU, expose_h); } static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *)gspca_dev; struct cam *cam = &gspca_dev->cam; u8 *cd = gspca_dev->usb_buf; int i, j, n; int widths[MAX_MODES], heights[MAX_MODES]; /* Read the camera descriptor */ se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 1); if (gspca_dev->usb_err) { /* Sometimes after being idle for a while the se401 won't respond and needs a good kicking */ usb_reset_device(gspca_dev->dev); gspca_dev->usb_err = 0; se401_read_req(gspca_dev, SE401_REQ_GET_CAMERA_DESCRIPTOR, 0); } /* Some cameras start with their LED on */ se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0); if (gspca_dev->usb_err) return gspca_dev->usb_err; if (cd[1] != 0x41) { pr_err("Wrong descriptor type\n"); return -ENODEV; } if (!(cd[2] & SE401_FORMAT_BAYER)) { pr_err("Bayer format not supported!\n"); return -ENODEV; } if (cd[3]) pr_info("ExtraFeatures: %d\n", cd[3]); n = cd[4] | (cd[5] << 8); if (n > MAX_MODES) { pr_err("Too many frame sizes\n"); return -ENODEV; } for (i = 0; i < n ; i++) { widths[i] = cd[6 + i * 4 + 0] | (cd[6 + i * 4 + 1] << 8); heights[i] = cd[6 + i * 4 + 2] | (cd[6 + i * 4 + 3] << 8); } for (i = 0; i < n ; i++) { sd->fmts[i].width = widths[i]; sd->fmts[i].height = heights[i]; sd->fmts[i].field = V4L2_FIELD_NONE; sd->fmts[i].colorspace = V4L2_COLORSPACE_SRGB; sd->fmts[i].priv = 1; /* janggu compression only works for 1/4th or 1/16th res */ for (j = 0; j < n; j++) { if (widths[j] / 2 == widths[i] && heights[j] / 2 == heights[i]) { sd->fmts[i].priv = 2; break; } } /* 1/16th if available too is better then 1/4th, because we then use a larger area of the sensor */ for (j = 0; j < n; j++) { if (widths[j] / 4 == widths[i] && heights[j] / 4 == heights[i]) { sd->fmts[i].priv = 4; break; } } if (sd->fmts[i].priv == 1) { /* Not a 1/4th or 1/16th res, use bayer */ sd->fmts[i].pixelformat = V4L2_PIX_FMT_SBGGR8; sd->fmts[i].bytesperline = widths[i]; sd->fmts[i].sizeimage = widths[i] * heights[i]; pr_info("Frame size: %dx%d bayer\n", widths[i], heights[i]); } else { /* Found a match use janggu compression */ sd->fmts[i].pixelformat = V4L2_PIX_FMT_SE401; sd->fmts[i].bytesperline = 0; sd->fmts[i].sizeimage = widths[i] * heights[i] * 3; pr_info("Frame size: %dx%d 1/%dth janggu\n", widths[i], heights[i], sd->fmts[i].priv * sd->fmts[i].priv); } } cam->cam_mode = sd->fmts; cam->nmodes = n; cam->bulk = 1; cam->bulk_size = BULK_SIZE; cam->bulk_nurbs = 4; sd->resetlevel = 0x2d; /* Set initial resetlevel */ /* See if the camera supports brightness */ se401_read_req(gspca_dev, SE401_REQ_GET_BRT, 1); sd->has_brightness = !!gspca_dev->usb_err; gspca_dev->usb_err = 0; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* function called at start time before URB creation */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { gspca_dev->alt = 1; /* Ignore the bogus isoc alt settings */ return gspca_dev->usb_err; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; int mode = 0; se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 1); if (gspca_dev->usb_err) { /* Sometimes after being idle for a while the se401 won't respond and needs a good kicking */ usb_reset_device(gspca_dev->dev); gspca_dev->usb_err = 0; se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 1, 0); } se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 1, 0); se401_set_feature(gspca_dev, HV7131_REG_MODE_B, 0x05); /* set size + mode */ se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH, gspca_dev->pixfmt.width * mult, 0); se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT, gspca_dev->pixfmt.height * mult, 0); /* * HDG: disabled this as it does not seem to do anything * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE, * SE401_FORMAT_BAYER, 0); */ switch (mult) { case 1: /* Raw bayer */ mode = 0x03; break; case 2: /* 1/4th janggu */ mode = SE401_QUANT_FACT << 4; break; case 4: /* 1/16th janggu */ mode = (SE401_QUANT_FACT << 4) | 0x02; break; } se401_set_feature(gspca_dev, SE401_OPERATINGMODE, mode); se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel); sd->packet_read = 0; sd->pixels_read = 0; sd->restart_stream = 0; sd->resetlevel_frame_count = 0; sd->resetlevel_adjust_dir = 0; sd->expo_change_state = EXPO_NO_CHANGE; se401_write_req(gspca_dev, SE401_REQ_START_CONTINUOUS_CAPTURE, 0, 0); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { se401_write_req(gspca_dev, SE401_REQ_STOP_CONTINUOUS_CAPTURE, 0, 0); se401_write_req(gspca_dev, SE401_REQ_LED_CONTROL, 0, 0); se401_write_req(gspca_dev, SE401_REQ_CAMERA_POWER, 0, 0); } static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; unsigned int ahrc, alrc; int oldreset, adjust_dir; /* Restart the stream if requested do so by pkt_scan */ if (sd->restart_stream) { sd_stopN(gspca_dev); sd_start(gspca_dev); sd->restart_stream = 0; } /* Automatically adjust sensor reset level Hyundai have some really nice docs about this and other sensor related stuff on their homepage: www.hei.co.kr */ sd->resetlevel_frame_count++; if (sd->resetlevel_frame_count < 20) return; /* For some reason this normally read-only register doesn't get reset to zero after reading them just once... */ se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH); se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL); se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH); se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL); ahrc = 256*se401_get_feature(gspca_dev, HV7131_REG_HIREFNOH) + se401_get_feature(gspca_dev, HV7131_REG_HIREFNOL); alrc = 256*se401_get_feature(gspca_dev, HV7131_REG_LOREFNOH) + se401_get_feature(gspca_dev, HV7131_REG_LOREFNOL); /* Not an exact science, but it seems to work pretty well... */ oldreset = sd->resetlevel; if (alrc > 10) { while (alrc >= 10 && sd->resetlevel < 63) { sd->resetlevel++; alrc /= 2; } } else if (ahrc > 20) { while (ahrc >= 20 && sd->resetlevel > 0) { sd->resetlevel--; ahrc /= 2; } } /* Detect ping-pong-ing and halve adjustment to avoid overshoot */ if (sd->resetlevel > oldreset) adjust_dir = 1; else adjust_dir = -1; if (sd->resetlevel_adjust_dir && sd->resetlevel_adjust_dir != adjust_dir) sd->resetlevel = oldreset + (sd->resetlevel - oldreset) / 2; if (sd->resetlevel != oldreset) { sd->resetlevel_adjust_dir = adjust_dir; se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel); } sd->resetlevel_frame_count = 0; } static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *)gspca_dev; switch (sd->expo_change_state) { case EXPO_CHANGED: /* The exposure was changed while this frame was being send, so this frame is ok */ sd->expo_change_state = EXPO_DROP_FRAME; break; case EXPO_DROP_FRAME: /* The exposure was changed while this frame was being captured, drop it! */ gspca_dev->last_packet_type = DISCARD_PACKET; sd->expo_change_state = EXPO_NO_CHANGE; break; case EXPO_NO_CHANGE: break; } gspca_frame_add(gspca_dev, LAST_PACKET, data, len); } static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *)gspca_dev; int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height; int i, plen, bits, pixels, info, count; if (sd->restart_stream) return; /* Sometimes a 1024 bytes garbage bulk packet is send between frames */ if (gspca_dev->last_packet_type == LAST_PACKET && len == 1024) { gspca_dev->last_packet_type = DISCARD_PACKET; return; } i = 0; while (i < len) { /* Read header if not already be present from prev bulk pkt */ if (sd->packet_read < 4) { count = 4 - sd->packet_read; if (count > len - i) count = len - i; memcpy(&sd->packet[sd->packet_read], &data[i], count); sd->packet_read += count; i += count; if (sd->packet_read < 4) break; } bits = sd->packet[3] + (sd->packet[2] << 8); pixels = sd->packet[1] + ((sd->packet[0] & 0x3f) << 8); info = (sd->packet[0] & 0xc0) >> 6; plen = ((bits + 47) >> 4) << 1; /* Sanity checks */ if (plen > 1024) { pr_err("invalid packet len %d restarting stream\n", plen); goto error; } if (info == 3) { pr_err("unknown frame info value restarting stream\n"); goto error; } /* Read (remainder of) packet contents */ count = plen - sd->packet_read; if (count > len - i) count = len - i; memcpy(&sd->packet[sd->packet_read], &data[i], count); sd->packet_read += count; i += count; if (sd->packet_read < plen) break; sd->pixels_read += pixels; sd->packet_read = 0; switch (info) { case 0: /* Frame data */ gspca_frame_add(gspca_dev, INTER_PACKET, sd->packet, plen); break; case 1: /* EOF */ if (sd->pixels_read != imagesize) { pr_err("frame size %d expected %d\n", sd->pixels_read, imagesize); goto error; } sd_complete_frame(gspca_dev, sd->packet, plen); return; /* Discard the rest of the bulk packet !! */ case 2: /* SOF */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->packet, plen); sd->pixels_read = pixels; break; } } return; error: sd->restart_stream = 1; /* Give userspace a 0 bytes frame, so our dq callback gets called and it can restart the stream */ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } static void sd_pkt_scan_bayer(struct gspca_dev *gspca_dev, u8 *data, int len) { struct cam *cam = &gspca_dev->cam; int imagesize = cam->cam_mode[gspca_dev->curr_mode].sizeimage; if (gspca_dev->image_len == 0) { gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } if (gspca_dev->image_len + len >= imagesize) { sd_complete_frame(gspca_dev, data, len); return; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { int mult = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; if (len == 0) return; if (mult == 1) /* mult == 1 means raw bayer */ sd_pkt_scan_bayer(gspca_dev, data, len); else sd_pkt_scan_janggu(gspca_dev, data, len); } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *)gspca_dev; u8 state; if (len != 2) return -EINVAL; switch (data[0]) { case 0: case 1: state = data[0]; break; default: return -EINVAL; } if (sd->button_state != state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, state); input_sync(gspca_dev->input_dev); sd->button_state = state; } return 0; } #endif static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: setgain(gspca_dev, ctrl->val); break; case V4L2_CID_EXPOSURE: setexposure(gspca_dev, ctrl->val, sd->freq->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 4); if (sd->has_brightness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 15); /* max is really 63 but > 50 is not pretty */ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 50, 1, 25); sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 32767, 1, 15000); sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->exposure); return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .dq_callback = sd_dq_callback, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x03e8, 0x0004)}, /* Endpoints/Aox SE401 */ {USB_DEVICE(0x0471, 0x030b)}, /* Philips PCVC665K */ {USB_DEVICE(0x047d, 0x5001)}, /* Kensington 67014 */ {USB_DEVICE(0x047d, 0x5002)}, /* Kensington 6701(5/7) */ {USB_DEVICE(0x047d, 0x5003)}, /* Kensington 67016 */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static int sd_pre_reset(struct usb_interface *intf) { return 0; } static int sd_post_reset(struct usb_interface *intf) { return 0; } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif .pre_reset = sd_pre_reset, .post_reset = sd_post_reset, }; module_usb_driver(sd_driver); |
17011 17074 17073 5 188 17076 17076 17071 17071 17072 17072 5 15150 15134 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 | /* SPDX-License-Identifier: GPL-2.0 */ /* * An extensible bitmap is a bitmap that supports an * arbitrary number of bits. Extensible bitmaps are * used to represent sets of values, such as types, * roles, categories, and classes. * * Each extensible bitmap is implemented as a linked * list of bitmap nodes, where each bitmap node has * an explicitly specified starting bit position within * the total bitmap. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ #ifndef _SS_EBITMAP_H_ #define _SS_EBITMAP_H_ #include <net/netlabel.h> #ifdef CONFIG_64BIT #define EBITMAP_NODE_SIZE 64 #else #define EBITMAP_NODE_SIZE 32 #endif #define EBITMAP_UNIT_NUMS \ ((EBITMAP_NODE_SIZE - sizeof(void *) - sizeof(u32)) / \ sizeof(unsigned long)) #define EBITMAP_UNIT_SIZE BITS_PER_LONG #define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE) #define EBITMAP_BIT 1UL #define EBITMAP_SHIFT_UNIT_SIZE(x) \ (((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2) struct ebitmap_node { struct ebitmap_node *next; unsigned long maps[EBITMAP_UNIT_NUMS]; u32 startbit; }; struct ebitmap { struct ebitmap_node *node; /* first node in the bitmap */ u32 highbit; /* highest position in the total bitmap */ }; #define ebitmap_length(e) ((e)->highbit) static inline u32 ebitmap_start_positive(const struct ebitmap *e, struct ebitmap_node **n) { u32 ofs; for (*n = e->node; *n; *n = (*n)->next) { ofs = find_first_bit((*n)->maps, EBITMAP_SIZE); if (ofs < EBITMAP_SIZE) return (*n)->startbit + ofs; } return ebitmap_length(e); } static inline void ebitmap_init(struct ebitmap *e) { memset(e, 0, sizeof(*e)); } static inline u32 ebitmap_next_positive(const struct ebitmap *e, struct ebitmap_node **n, u32 bit) { u32 ofs; ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1); if (ofs < EBITMAP_SIZE) return ofs + (*n)->startbit; for (*n = (*n)->next; *n; *n = (*n)->next) { ofs = find_first_bit((*n)->maps, EBITMAP_SIZE); if (ofs < EBITMAP_SIZE) return ofs + (*n)->startbit; } return ebitmap_length(e); } #define EBITMAP_NODE_INDEX(node, bit) \ (((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE) #define EBITMAP_NODE_OFFSET(node, bit) \ (((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE) static inline int ebitmap_node_get_bit(const struct ebitmap_node *n, u32 bit) { u32 index = EBITMAP_NODE_INDEX(n, bit); u32 ofs = EBITMAP_NODE_OFFSET(n, bit); BUG_ON(index >= EBITMAP_UNIT_NUMS); if ((n->maps[index] & (EBITMAP_BIT << ofs))) return 1; return 0; } static inline void ebitmap_node_set_bit(struct ebitmap_node *n, u32 bit) { u32 index = EBITMAP_NODE_INDEX(n, bit); u32 ofs = EBITMAP_NODE_OFFSET(n, bit); BUG_ON(index >= EBITMAP_UNIT_NUMS); n->maps[index] |= (EBITMAP_BIT << ofs); } static inline void ebitmap_node_clr_bit(struct ebitmap_node *n, u32 bit) { u32 index = EBITMAP_NODE_INDEX(n, bit); u32 ofs = EBITMAP_NODE_OFFSET(n, bit); BUG_ON(index >= EBITMAP_UNIT_NUMS); n->maps[index] &= ~(EBITMAP_BIT << ofs); } #define ebitmap_for_each_positive_bit(e, n, bit) \ for ((bit) = ebitmap_start_positive(e, &(n)); \ (bit) < ebitmap_length(e); \ (bit) = ebitmap_next_positive(e, &(n), bit)) bool ebitmap_equal(const struct ebitmap *e1, const struct ebitmap *e2); int ebitmap_cpy(struct ebitmap *dst, const struct ebitmap *src); int ebitmap_and(struct ebitmap *dst, const struct ebitmap *e1, const struct ebitmap *e2); int ebitmap_contains(const struct ebitmap *e1, const struct ebitmap *e2, u32 last_e2bit); int ebitmap_get_bit(const struct ebitmap *e, u32 bit); int ebitmap_set_bit(struct ebitmap *e, u32 bit, int value); void ebitmap_destroy(struct ebitmap *e); struct policy_file; int ebitmap_read(struct ebitmap *e, struct policy_file *fp); int ebitmap_write(const struct ebitmap *e, struct policy_file *fp); u32 ebitmap_hash(const struct ebitmap *e, u32 hash); #ifdef CONFIG_NETLABEL int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_catmap **catmap); int ebitmap_netlbl_import(struct ebitmap *ebmap, struct netlbl_lsm_catmap *catmap); #else static inline int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_catmap **catmap) { return -ENOMEM; } static inline int ebitmap_netlbl_import(struct ebitmap *ebmap, struct netlbl_lsm_catmap *catmap) { return -ENOMEM; } #endif #endif /* _SS_EBITMAP_H_ */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _UFS_UFS_H #define _UFS_UFS_H 1 #ifdef pr_fmt #undef pr_fmt #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define UFS_MAX_GROUP_LOADED 8 #define UFS_CGNO_EMPTY ((unsigned)-1) struct ufs_sb_private_info; struct ufs_cg_private_info; struct ufs_csum; struct ufs_sb_info { struct ufs_sb_private_info * s_uspi; struct ufs_csum * s_csp; unsigned s_bytesex; unsigned s_flags; struct buffer_head ** s_ucg; struct ufs_cg_private_info * s_ucpi[UFS_MAX_GROUP_LOADED]; unsigned s_cgno[UFS_MAX_GROUP_LOADED]; unsigned short s_cg_loaded; unsigned s_mount_opt; struct super_block *sb; int work_queued; /* non-zero if the delayed work is queued */ struct delayed_work sync_work; /* FS sync delayed work */ spinlock_t work_lock; /* protects sync_work and work_queued */ struct mutex s_lock; }; struct ufs_inode_info { union { __fs32 i_data[15]; __u8 i_symlink[2 * 4 * 15]; __fs64 u2_i_data[15]; } i_u1; __u32 i_flags; __u32 i_shadow; __u32 i_unused1; __u32 i_unused2; __u32 i_oeftflag; __u16 i_osync; __u64 i_lastfrag; seqlock_t meta_lock; struct mutex truncate_mutex; __u32 i_dir_start_lookup; struct inode vfs_inode; }; /* mount options */ #define UFS_MOUNT_ONERROR 0x0000000F #define UFS_MOUNT_ONERROR_PANIC 0x00000001 #define UFS_MOUNT_ONERROR_LOCK 0x00000002 #define UFS_MOUNT_ONERROR_UMOUNT 0x00000004 #define UFS_MOUNT_ONERROR_REPAIR 0x00000008 #define UFS_MOUNT_UFSTYPE 0x0000FFF0 #define UFS_MOUNT_UFSTYPE_OLD 0x00000010 #define UFS_MOUNT_UFSTYPE_44BSD 0x00000020 #define UFS_MOUNT_UFSTYPE_SUN 0x00000040 #define UFS_MOUNT_UFSTYPE_NEXTSTEP 0x00000080 #define UFS_MOUNT_UFSTYPE_NEXTSTEP_CD 0x00000100 #define UFS_MOUNT_UFSTYPE_OPENSTEP 0x00000200 #define UFS_MOUNT_UFSTYPE_SUNx86 0x00000400 #define UFS_MOUNT_UFSTYPE_HP 0x00000800 #define UFS_MOUNT_UFSTYPE_UFS2 0x00001000 #define UFS_MOUNT_UFSTYPE_SUNOS 0x00002000 #define ufs_clear_opt(o,opt) o &= ~UFS_MOUNT_##opt #define ufs_set_opt(o,opt) o |= UFS_MOUNT_##opt #define ufs_test_opt(o,opt) ((o) & UFS_MOUNT_##opt) /* * Debug code */ #ifdef CONFIG_UFS_DEBUG # define UFSD(f, a...) { \ pr_debug("UFSD (%s, %d): %s:", \ __FILE__, __LINE__, __func__); \ pr_debug(f, ## a); \ } #else # define UFSD(f, a...) /**/ #endif /* balloc.c */ void ufs_free_fragments (struct inode *, u64 fragment, unsigned count); void ufs_free_blocks (struct inode *, u64 fragment, unsigned count); u64 ufs_new_fragments(struct inode *, void *, u64 fragment, u64 goal, unsigned count, int *err, struct folio *); /* cylinder.c */ extern struct ufs_cg_private_info * ufs_load_cylinder (struct super_block *, unsigned); extern void ufs_put_cylinder (struct super_block *, unsigned); /* dir.c */ extern const struct inode_operations ufs_dir_inode_operations; int ufs_add_link(struct dentry *, struct inode *); ino_t ufs_inode_by_name(struct inode *, const struct qstr *); int ufs_make_empty(struct inode *, struct inode *); struct ufs_dir_entry *ufs_find_entry(struct inode *, const struct qstr *, struct folio **); int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct folio *); int ufs_empty_dir(struct inode *); struct ufs_dir_entry *ufs_dotdot(struct inode *, struct folio **); int ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, struct folio *folio, struct inode *inode, bool update_times); /* file.c */ extern const struct inode_operations ufs_file_inode_operations; extern const struct file_operations ufs_file_operations; extern const struct address_space_operations ufs_aops; /* ialloc.c */ extern void ufs_free_inode (struct inode *inode); extern struct inode * ufs_new_inode (struct inode *, umode_t); /* inode.c */ extern struct inode *ufs_iget(struct super_block *, unsigned long); extern int ufs_write_inode (struct inode *, struct writeback_control *); extern int ufs_sync_inode (struct inode *); extern void ufs_evict_inode (struct inode *); extern int ufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); /* namei.c */ extern const struct file_operations ufs_dir_operations; /* super.c */ extern __printf(3, 4) void ufs_warning(struct super_block *, const char *, const char *, ...); extern __printf(3, 4) void ufs_error(struct super_block *, const char *, const char *, ...); extern __printf(3, 4) void ufs_panic(struct super_block *, const char *, const char *, ...); void ufs_mark_sb_dirty(struct super_block *sb); static inline struct ufs_sb_info *UFS_SB(struct super_block *sb) { return sb->s_fs_info; } static inline struct ufs_inode_info *UFS_I(struct inode *inode) { return container_of(inode, struct ufs_inode_info, vfs_inode); } /* * Give cylinder group number for a file system block. * Give cylinder group block number for a file system block. */ /* #define ufs_dtog(d) ((d) / uspi->s_fpg) */ static inline u64 ufs_dtog(struct ufs_sb_private_info * uspi, u64 b) { do_div(b, uspi->s_fpg); return b; } /* #define ufs_dtogd(d) ((d) % uspi->s_fpg) */ static inline u32 ufs_dtogd(struct ufs_sb_private_info * uspi, u64 b) { return do_div(b, uspi->s_fpg); } #endif /* _UFS_UFS_H */ |
32 30 28 28 27 8 2 3 31 31 32 2 30 1 25 24 23 20 1 19 1 32 55 49 47 48 48 13 14 13 12 11 11 11 14 16 16 15 11 16 10 9 8 7 10 29 26 28 28 2 26 15 10 32 32 32 32 32 31 30 5 24 25 1 24 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 | // SPDX-License-Identifier: GPL-2.0-only /* * Landlock LSM - System call implementations and user space interfaces * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI */ #include <asm/current.h> #include <linux/anon_inodes.h> #include <linux/build_bug.h> #include <linux/capability.h> #include <linux/cleanup.h> #include <linux/compiler_types.h> #include <linux/dcache.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/limits.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/sched.h> #include <linux/security.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/uaccess.h> #include <uapi/linux/landlock.h> #include "cred.h" #include "fs.h" #include "limits.h" #include "net.h" #include "ruleset.h" #include "setup.h" static bool is_initialized(void) { if (likely(landlock_initialized)) return true; pr_warn_once( "Disabled but requested by user space. " "You should enable Landlock at boot time: " "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n"); return false; } /** * copy_min_struct_from_user - Safe future-proof argument copying * * Extend copy_struct_from_user() to check for consistent user buffer. * * @dst: Kernel space pointer or NULL. * @ksize: Actual size of the data pointed to by @dst. * @ksize_min: Minimal required size to be copied. * @src: User space pointer or NULL. * @usize: (Alleged) size of the data pointed to by @src. */ static __always_inline int copy_min_struct_from_user(void *const dst, const size_t ksize, const size_t ksize_min, const void __user *const src, const size_t usize) { /* Checks buffer inconsistencies. */ BUILD_BUG_ON(!dst); if (!src) return -EFAULT; /* Checks size ranges. */ BUILD_BUG_ON(ksize <= 0); BUILD_BUG_ON(ksize < ksize_min); if (usize < ksize_min) return -EINVAL; if (usize > PAGE_SIZE) return -E2BIG; /* Copies user buffer and fills with zeros. */ return copy_struct_from_user(dst, ksize, src, usize); } /* * This function only contains arithmetic operations with constants, leading to * BUILD_BUG_ON(). The related code is evaluated and checked at build time, * but it is then ignored thanks to compiler optimizations. */ static void build_check_abi(void) { struct landlock_ruleset_attr ruleset_attr; struct landlock_path_beneath_attr path_beneath_attr; struct landlock_net_port_attr net_port_attr; size_t ruleset_size, path_beneath_size, net_port_size; /* * For each user space ABI structures, first checks that there is no * hole in them, then checks that all architectures have the same * struct size. */ ruleset_size = sizeof(ruleset_attr.handled_access_fs); ruleset_size += sizeof(ruleset_attr.handled_access_net); ruleset_size += sizeof(ruleset_attr.scoped); BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size); BUILD_BUG_ON(sizeof(ruleset_attr) != 24); path_beneath_size = sizeof(path_beneath_attr.allowed_access); path_beneath_size += sizeof(path_beneath_attr.parent_fd); BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size); BUILD_BUG_ON(sizeof(path_beneath_attr) != 12); net_port_size = sizeof(net_port_attr.allowed_access); net_port_size += sizeof(net_port_attr.port); BUILD_BUG_ON(sizeof(net_port_attr) != net_port_size); BUILD_BUG_ON(sizeof(net_port_attr) != 16); } /* Ruleset handling */ static int fop_ruleset_release(struct inode *const inode, struct file *const filp) { struct landlock_ruleset *ruleset = filp->private_data; landlock_put_ruleset(ruleset); return 0; } static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf, const size_t size, loff_t *const ppos) { /* Dummy handler to enable FMODE_CAN_READ. */ return -EINVAL; } static ssize_t fop_dummy_write(struct file *const filp, const char __user *const buf, const size_t size, loff_t *const ppos) { /* Dummy handler to enable FMODE_CAN_WRITE. */ return -EINVAL; } /* * A ruleset file descriptor enables to build a ruleset by adding (i.e. * writing) rule after rule, without relying on the task's context. This * reentrant design is also used in a read way to enforce the ruleset on the * current task. */ static const struct file_operations ruleset_fops = { .release = fop_ruleset_release, .read = fop_dummy_read, .write = fop_dummy_write, }; #define LANDLOCK_ABI_VERSION 6 /** * sys_landlock_create_ruleset - Create a new ruleset * * @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION. * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. * * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is * 0, then the returned value is the highest supported Landlock ABI version * (starting at 1). * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EINVAL: unknown @flags, or unknown access, or unknown scope, or too small @size; * - %E2BIG: @attr or @size inconsistencies; * - %EFAULT: @attr or @size inconsistencies; * - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs. */ SYSCALL_DEFINE3(landlock_create_ruleset, const struct landlock_ruleset_attr __user *const, attr, const size_t, size, const __u32, flags) { struct landlock_ruleset_attr ruleset_attr; struct landlock_ruleset *ruleset; int err, ruleset_fd; /* Build-time checks. */ build_check_abi(); if (!is_initialized()) return -EOPNOTSUPP; if (flags) { if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr && !size) return LANDLOCK_ABI_VERSION; return -EINVAL; } /* Copies raw user space buffer. */ err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr), offsetofend(typeof(ruleset_attr), handled_access_fs), attr, size); if (err) return err; /* Checks content (and 32-bits cast). */ if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != LANDLOCK_MASK_ACCESS_FS) return -EINVAL; /* Checks network content (and 32-bits cast). */ if ((ruleset_attr.handled_access_net | LANDLOCK_MASK_ACCESS_NET) != LANDLOCK_MASK_ACCESS_NET) return -EINVAL; /* Checks IPC scoping content (and 32-bits cast). */ if ((ruleset_attr.scoped | LANDLOCK_MASK_SCOPE) != LANDLOCK_MASK_SCOPE) return -EINVAL; /* Checks arguments and transforms to kernel struct. */ ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs, ruleset_attr.handled_access_net, ruleset_attr.scoped); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* Creates anonymous FD referring to the ruleset. */ ruleset_fd = anon_inode_getfd("[landlock-ruleset]", &ruleset_fops, ruleset, O_RDWR | O_CLOEXEC); if (ruleset_fd < 0) landlock_put_ruleset(ruleset); return ruleset_fd; } /* * Returns an owned ruleset from a FD. It is thus needed to call * landlock_put_ruleset() on the return value. */ static struct landlock_ruleset *get_ruleset_from_fd(const int fd, const fmode_t mode) { CLASS(fd, ruleset_f)(fd); struct landlock_ruleset *ruleset; if (fd_empty(ruleset_f)) return ERR_PTR(-EBADF); /* Checks FD type and access right. */ if (fd_file(ruleset_f)->f_op != &ruleset_fops) return ERR_PTR(-EBADFD); if (!(fd_file(ruleset_f)->f_mode & mode)) return ERR_PTR(-EPERM); ruleset = fd_file(ruleset_f)->private_data; if (WARN_ON_ONCE(ruleset->num_layers != 1)) return ERR_PTR(-EINVAL); landlock_get_ruleset(ruleset); return ruleset; } /* Path handling */ /* * @path: Must call put_path(@path) after the call if it succeeded. */ static int get_path_from_fd(const s32 fd, struct path *const path) { CLASS(fd_raw, f)(fd); BUILD_BUG_ON(!__same_type( fd, ((struct landlock_path_beneath_attr *)NULL)->parent_fd)); if (fd_empty(f)) return -EBADF; /* * Forbids ruleset FDs, internal filesystems (e.g. nsfs), including * pseudo filesystems that will never be mountable (e.g. sockfs, * pipefs). */ if ((fd_file(f)->f_op == &ruleset_fops) || (fd_file(f)->f_path.mnt->mnt_flags & MNT_INTERNAL) || (fd_file(f)->f_path.dentry->d_sb->s_flags & SB_NOUSER) || d_is_negative(fd_file(f)->f_path.dentry) || IS_PRIVATE(d_backing_inode(fd_file(f)->f_path.dentry))) return -EBADFD; *path = fd_file(f)->f_path; path_get(path); return 0; } static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, const void __user *const rule_attr) { struct landlock_path_beneath_attr path_beneath_attr; struct path path; int res, err; access_mask_t mask; /* Copies raw user space buffer. */ res = copy_from_user(&path_beneath_attr, rule_attr, sizeof(path_beneath_attr)); if (res) return -EFAULT; /* * Informs about useless rule: empty allowed_access (i.e. deny rules) * are ignored in path walks. */ if (!path_beneath_attr.allowed_access) return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ mask = ruleset->access_masks[0].fs; if ((path_beneath_attr.allowed_access | mask) != mask) return -EINVAL; /* Gets and checks the new rule. */ err = get_path_from_fd(path_beneath_attr.parent_fd, &path); if (err) return err; /* Imports the new rule. */ err = landlock_append_fs_rule(ruleset, &path, path_beneath_attr.allowed_access); path_put(&path); return err; } static int add_rule_net_port(struct landlock_ruleset *ruleset, const void __user *const rule_attr) { struct landlock_net_port_attr net_port_attr; int res; access_mask_t mask; /* Copies raw user space buffer. */ res = copy_from_user(&net_port_attr, rule_attr, sizeof(net_port_attr)); if (res) return -EFAULT; /* * Informs about useless rule: empty allowed_access (i.e. deny rules) * are ignored by network actions. */ if (!net_port_attr.allowed_access) return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ mask = landlock_get_net_access_mask(ruleset, 0); if ((net_port_attr.allowed_access | mask) != mask) return -EINVAL; /* Denies inserting a rule with port greater than 65535. */ if (net_port_attr.port > U16_MAX) return -EINVAL; /* Imports the new rule. */ return landlock_append_net_rule(ruleset, net_port_attr.port, net_port_attr.allowed_access); } /** * sys_landlock_add_rule - Add a new rule to a ruleset * * @ruleset_fd: File descriptor tied to the ruleset that should be extended * with the new rule. * @rule_type: Identify the structure type pointed to by @rule_attr: * %LANDLOCK_RULE_PATH_BENEATH or %LANDLOCK_RULE_NET_PORT. * @rule_attr: Pointer to a rule (matching the @rule_type). * @flags: Must be 0. * * This system call enables to define a new rule and add it to an existing * ruleset. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EAFNOSUPPORT: @rule_type is %LANDLOCK_RULE_NET_PORT but TCP/IP is not * supported by the running kernel; * - %EINVAL: @flags is not 0; * - %EINVAL: The rule accesses are inconsistent (i.e. * &landlock_path_beneath_attr.allowed_access or * &landlock_net_port_attr.allowed_access is not a subset of the ruleset * handled accesses) * - %EINVAL: &landlock_net_port_attr.port is greater than 65535; * - %ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access is * 0); * - %EBADF: @ruleset_fd is not a file descriptor for the current thread, or a * member of @rule_attr is not a file descriptor as expected; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of * @rule_attr is not the expected file descriptor type; * - %EPERM: @ruleset_fd has no write access to the underlying ruleset; * - %EFAULT: @rule_attr was not a valid address. */ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, const enum landlock_rule_type, rule_type, const void __user *const, rule_attr, const __u32, flags) { struct landlock_ruleset *ruleset __free(landlock_put_ruleset) = NULL; if (!is_initialized()) return -EOPNOTSUPP; /* No flag for now. */ if (flags) return -EINVAL; /* Gets and checks the ruleset. */ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: return add_rule_path_beneath(ruleset, rule_attr); case LANDLOCK_RULE_NET_PORT: return add_rule_net_port(ruleset, rule_attr); default: return -EINVAL; } } /* Enforcement */ /** * sys_landlock_restrict_self - Enforce a ruleset on the calling thread * * @ruleset_fd: File descriptor tied to the ruleset to merge with the target. * @flags: Must be 0. * * This system call enables to enforce a Landlock ruleset on the current * thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its * namespace or is running with no_new_privs. This avoids scenarios where * unprivileged tasks can affect the behavior of privileged children. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EINVAL: @flags is not 0. * - %EBADF: @ruleset_fd is not a file descriptor for the current thread; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor; * - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the * current thread is not running with no_new_privs, or it doesn't have * %CAP_SYS_ADMIN in its namespace. * - %E2BIG: The maximum number of stacked rulesets is reached for the current * thread. */ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, flags) { struct landlock_ruleset *new_dom, *ruleset __free(landlock_put_ruleset) = NULL; struct cred *new_cred; struct landlock_cred_security *new_llcred; if (!is_initialized()) return -EOPNOTSUPP; /* * Similar checks as for seccomp(2), except that an -EPERM may be * returned. */ if (!task_no_new_privs(current) && !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; /* No flag for now. */ if (flags) return -EINVAL; /* Gets and checks the ruleset. */ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* Prepares new credentials. */ new_cred = prepare_creds(); if (!new_cred) return -ENOMEM; new_llcred = landlock_cred(new_cred); /* * There is no possible race condition while copying and manipulating * the current credentials because they are dedicated per thread. */ new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset); if (IS_ERR(new_dom)) { abort_creds(new_cred); return PTR_ERR(new_dom); } /* Replaces the old (prepared) domain. */ landlock_put_ruleset(new_llcred->domain); new_llcred->domain = new_dom; return commit_creds(new_cred); } |
11 11 11 11 11 4 9 9 9 9 9 9 9 3 3 3 3 5 5 1 5 1 5 1 1 1 1 1 1 3 3 3 3 2 2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 | // SPDX-License-Identifier: GPL-2.0-only /* * vivid-vbi-out.c - vbi output support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "vivid-core.h" #include "vivid-kthread-out.h" #include "vivid-vbi-out.h" #include "vivid-vbi-cap.h" static int vbi_out_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], struct device *alloc_devs[]) { struct vivid_dev *dev = vb2_get_drv_priv(vq); bool is_60hz = dev->std_out & V4L2_STD_525_60; unsigned size = vq->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ? 36 * sizeof(struct v4l2_sliced_vbi_data) : 1440 * 2 * (is_60hz ? 12 : 18); if (!vivid_is_svid_out(dev)) return -EINVAL; if (*nplanes) return sizes[0] < size ? -EINVAL : 0; sizes[0] = size; *nplanes = 1; return 0; } static int vbi_out_buf_prepare(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); bool is_60hz = dev->std_out & V4L2_STD_525_60; unsigned size = vb->vb2_queue->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT ? 36 * sizeof(struct v4l2_sliced_vbi_data) : 1440 * 2 * (is_60hz ? 12 : 18); dprintk(dev, 1, "%s\n", __func__); if (dev->buf_prepare_error) { /* * Error injection: test what happens if buf_prepare() returns * an error. */ dev->buf_prepare_error = false; return -EINVAL; } if (vb2_plane_size(vb, 0) < size) { dprintk(dev, 1, "%s data will not fit into plane (%lu < %u)\n", __func__, vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } static void vbi_out_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); dprintk(dev, 1, "%s\n", __func__); spin_lock(&dev->slock); list_add_tail(&buf->list, &dev->vbi_out_active); spin_unlock(&dev->slock); } static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count) { struct vivid_dev *dev = vb2_get_drv_priv(vq); int err; dprintk(dev, 1, "%s\n", __func__); dev->vbi_out_seq_count = 0; if (dev->start_streaming_error) { dev->start_streaming_error = false; err = -EINVAL; } else { err = vivid_start_generating_vid_out(dev, &dev->vbi_out_streaming); } if (err) { struct vivid_buffer *buf, *tmp; list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } } return err; } /* abort streaming and wait for last buffer */ static void vbi_out_stop_streaming(struct vb2_queue *vq) { struct vivid_dev *dev = vb2_get_drv_priv(vq); dprintk(dev, 1, "%s\n", __func__); vivid_stop_generating_vid_out(dev, &dev->vbi_out_streaming); dev->vbi_out_have_wss = false; dev->vbi_out_have_cc[0] = false; dev->vbi_out_have_cc[1] = false; } static void vbi_out_buf_request_complete(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_out); } const struct vb2_ops vivid_vbi_out_qops = { .queue_setup = vbi_out_queue_setup, .buf_prepare = vbi_out_buf_prepare, .buf_queue = vbi_out_buf_queue, .start_streaming = vbi_out_start_streaming, .stop_streaming = vbi_out_stop_streaming, .buf_request_complete = vbi_out_buf_request_complete, }; int vidioc_g_fmt_vbi_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_vbi_format *vbi = &f->fmt.vbi; bool is_60hz = dev->std_out & V4L2_STD_525_60; if (!vivid_is_svid_out(dev) || !dev->has_raw_vbi_out) return -EINVAL; vbi->sampling_rate = 25000000; vbi->offset = 24; vbi->samples_per_line = 1440; vbi->sample_format = V4L2_PIX_FMT_GREY; vbi->start[0] = is_60hz ? V4L2_VBI_ITU_525_F1_START + 9 : V4L2_VBI_ITU_625_F1_START + 5; vbi->start[1] = is_60hz ? V4L2_VBI_ITU_525_F2_START + 9 : V4L2_VBI_ITU_625_F2_START + 5; vbi->count[0] = vbi->count[1] = is_60hz ? 12 : 18; vbi->flags = dev->vbi_cap_interlaced ? V4L2_VBI_INTERLACED : 0; vbi->reserved[0] = 0; vbi->reserved[1] = 0; return 0; } int vidioc_s_fmt_vbi_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); int ret = vidioc_g_fmt_vbi_out(file, priv, f); if (ret) return ret; if (vb2_is_busy(&dev->vb_vbi_out_q)) return -EBUSY; dev->stream_sliced_vbi_out = false; dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_VBI_OUTPUT; return 0; } int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out) return -EINVAL; vivid_fill_service_lines(vbi, dev->service_set_out); return 0; } int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; bool is_60hz = dev->std_out & V4L2_STD_525_60; u32 service_set = vbi->service_set; if (!vivid_is_svid_out(dev) || !dev->has_sliced_vbi_out) return -EINVAL; service_set &= is_60hz ? V4L2_SLICED_CAPTION_525 : V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; vivid_fill_service_lines(vbi, service_set); return 0; } int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_sliced_vbi_format *vbi = &fmt->fmt.sliced; int ret = vidioc_try_fmt_sliced_vbi_out(file, fh, fmt); if (ret) return ret; if (vb2_is_busy(&dev->vb_vbi_out_q)) return -EBUSY; dev->service_set_out = vbi->service_set; dev->stream_sliced_vbi_out = true; dev->vbi_out_dev.queue->type = V4L2_BUF_TYPE_SLICED_VBI_OUTPUT; return 0; } void vivid_sliced_vbi_out_process(struct vivid_dev *dev, struct vivid_buffer *buf) { struct v4l2_sliced_vbi_data *vbi = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); unsigned elems = vb2_get_plane_payload(&buf->vb.vb2_buf, 0) / sizeof(*vbi); dev->vbi_out_have_cc[0] = false; dev->vbi_out_have_cc[1] = false; dev->vbi_out_have_wss = false; while (elems--) { switch (vbi->id) { case V4L2_SLICED_CAPTION_525: if ((dev->std_out & V4L2_STD_525_60) && vbi->line == 21) { dev->vbi_out_have_cc[!!vbi->field] = true; dev->vbi_out_cc[!!vbi->field][0] = vbi->data[0]; dev->vbi_out_cc[!!vbi->field][1] = vbi->data[1]; } break; case V4L2_SLICED_WSS_625: if ((dev->std_out & V4L2_STD_625_50) && vbi->field == 0 && vbi->line == 23) { dev->vbi_out_have_wss = true; dev->vbi_out_wss[0] = vbi->data[0]; dev->vbi_out_wss[1] = vbi->data[1]; } break; } vbi++; } } |
97 96 97 97 96 97 96 96 97 90 96 6 95 52 52 96 6 90 73 73 14 59 47 32 2 59 41 34 7 97 63 62 63 63 62 63 24 21 21 19 20 42 22 23 23 23 12 12 12 8 7 7 6 6 6 8 1 17 14 14 14 14 14 14 14 17 98 19 19 23 16 9 66 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Device handling code * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/list.h> #include <linux/netfilter_bridge.h> #include <linux/uaccess.h> #include "br_private.h" #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM) const struct nf_br_ops __rcu *nf_br_ops __read_mostly; EXPORT_SYMBOL_GPL(nf_br_ops); /* net device transmit always called with BH disabled */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN); struct net_bridge_mcast_port *pmctx_null = NULL; struct net_bridge *br = netdev_priv(dev); struct net_bridge_mcast *brmctx = &br->multicast_ctx; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; const struct nf_br_ops *nf_ops; u8 state = BR_STATE_FORWARDING; struct net_bridge_vlan *vlan; const unsigned char *dest; u16 vid = 0; if (unlikely(reason != SKB_NOT_DROPPED_YET)) { kfree_skb_reason(skb, reason); return NETDEV_TX_OK; } memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); br_tc_skb_miss_set(skb, false); rcu_read_lock(); nf_ops = rcu_dereference(nf_br_ops); if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { rcu_read_unlock(); return NETDEV_TX_OK; } dev_sw_netstats_tx_add(dev, 1, skb->len); br_switchdev_frame_unmark(skb); BR_INPUT_SKB_CB(skb)->brdev = dev; BR_INPUT_SKB_CB(skb)->frag_max_size = 0; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state, &vlan)) goto out; if (IS_ENABLED(CONFIG_INET) && (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) || eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) && br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { br_do_proxy_suppress_arp(skb, br, vid, NULL); } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6) && br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)) && ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { struct nd_msg *msg, _msg; msg = br_is_nd_neigh_msg(skb, &_msg); if (msg) br_do_suppress_nd(skb, br, vid, NULL, msg); } dest = eth_hdr(skb)->h_dest; if (is_broadcast_ether_addr(dest)) { br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid); } else if (is_multicast_ether_addr(dest)) { if (unlikely(netpoll_tx_running(dev))) { br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid); goto out; } if (br_multicast_rcv(&brmctx, &pmctx_null, vlan, skb, vid)) { kfree_skb(skb); goto out; } mdst = br_mdb_entry_skb_get(brmctx, skb, vid); if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) br_multicast_flood(mdst, skb, brmctx, false, true); else br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid); } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) { br_forward(dst->dst, skb, false, true); } else { br_flood(br, skb, BR_PKT_UNICAST, false, true, vid); } out: rcu_read_unlock(); return NETDEV_TX_OK; } static int br_dev_init(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); int err; err = br_fdb_hash_init(br); if (err) return err; err = br_mdb_hash_init(br); if (err) { br_fdb_hash_fini(br); return err; } err = br_vlan_init(br); if (err) { br_mdb_hash_fini(br); br_fdb_hash_fini(br); return err; } err = br_multicast_init_stats(br); if (err) { br_vlan_flush(br); br_mdb_hash_fini(br); br_fdb_hash_fini(br); return err; } netdev_lockdep_set_classes(dev); return 0; } static void br_dev_uninit(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); br_multicast_dev_del(br); br_multicast_uninit_stats(br); br_vlan_flush(br); br_mdb_hash_fini(br); br_fdb_hash_fini(br); } static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); netdev_update_features(dev); netif_start_queue(dev); br_stp_enable_bridge(br); br_multicast_open(br); if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) br_multicast_join_snoopers(br); return 0; } static void br_dev_set_multicast_list(struct net_device *dev) { } static void br_dev_change_rx_flags(struct net_device *dev, int change) { if (change & IFF_PROMISC) br_manage_promisc(netdev_priv(dev)); } static int br_dev_stop(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); br_stp_disable_bridge(br); br_multicast_stop(br); if (br_opt_get(br, BROPT_MULTICAST_ENABLED)) br_multicast_leave_snoopers(br); netif_stop_queue(dev); return 0; } static int br_change_mtu(struct net_device *dev, int new_mtu) { struct net_bridge *br = netdev_priv(dev); WRITE_ONCE(dev->mtu, new_mtu); /* this flag will be cleared if the MTU was automatically adjusted */ br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* remember the MTU in the rtable for PMTU */ dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu); #endif return 0; } /* Allow setting mac address to any valid ethernet address. */ static int br_set_mac_address(struct net_device *dev, void *p) { struct net_bridge *br = netdev_priv(dev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; /* dev_set_mac_addr() can be called by a master device on bridge's * NETDEV_UNREGISTER, but since it's being destroyed do nothing */ if (dev->reg_state != NETREG_REGISTERED) return -EBUSY; spin_lock_bh(&br->lock); if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) { /* Mac address will be changed in br_stp_change_bridge_id(). */ br_stp_change_bridge_id(br, addr->sa_data); } spin_unlock_bh(&br->lock); return 0; } static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, "bridge", sizeof(info->driver)); strscpy(info->version, BR_VERSION, sizeof(info->version)); strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); strscpy(info->bus_info, "N/A", sizeof(info->bus_info)); } static int br_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; cmd->base.speed = SPEED_UNKNOWN; list_for_each_entry(p, &br->port_list, list) { struct ethtool_link_ksettings ecmd; struct net_device *pdev = p->dev; if (!netif_running(pdev) || !netif_oper_up(pdev)) continue; if (__ethtool_get_link_ksettings(pdev, &ecmd)) continue; if (ecmd.base.speed == (__u32)SPEED_UNKNOWN) continue; if (cmd->base.speed == (__u32)SPEED_UNKNOWN || cmd->base.speed < ecmd.base.speed) cmd->base.speed = ecmd.base.speed; } return 0; } static netdev_features_t br_fix_features(struct net_device *dev, netdev_features_t features) { struct net_bridge *br = netdev_priv(dev); return br_features_recompute(br, features); } #ifdef CONFIG_NET_POLL_CONTROLLER static void br_poll_controller(struct net_device *br_dev) { } static void br_netpoll_cleanup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) br_netpoll_disable(p); } static int __br_netpoll_enable(struct net_bridge_port *p) { struct netpoll *np; int err; np = kzalloc(sizeof(*p->np), GFP_KERNEL); if (!np) return -ENOMEM; err = __netpoll_setup(np, p->dev); if (err) { kfree(np); return err; } p->np = np; return err; } int br_netpoll_enable(struct net_bridge_port *p) { if (!p->br->dev->npinfo) return 0; return __br_netpoll_enable(p); } static int br_netpoll_setup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p; int err = 0; list_for_each_entry(p, &br->port_list, list) { if (!p->dev) continue; err = __br_netpoll_enable(p); if (err) goto fail; } out: return err; fail: br_netpoll_cleanup(dev); goto out; } void br_netpoll_disable(struct net_bridge_port *p) { struct netpoll *np = p->np; if (!np) return; p->np = NULL; __netpoll_free(np); } #endif static int br_add_slave(struct net_device *dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(dev); return br_add_if(br, slave_dev, extack); } static int br_del_slave(struct net_device *dev, struct net_device *slave_dev) { struct net_bridge *br = netdev_priv(dev); return br_del_if(br, slave_dev); } static int br_fill_forward_path(struct net_device_path_ctx *ctx, struct net_device_path *path) { struct net_bridge_fdb_entry *f; struct net_bridge_port *dst; struct net_bridge *br; if (netif_is_bridge_port(ctx->dev)) return -1; br = netdev_priv(ctx->dev); br_vlan_fill_forward_path_pvid(br, ctx, path); f = br_fdb_find_rcu(br, ctx->daddr, path->bridge.vlan_id); if (!f) return -1; dst = READ_ONCE(f->dst); if (!dst) return -1; if (br_vlan_fill_forward_path_mode(br, dst, path)) return -1; path->type = DEV_PATH_BRIDGE; path->dev = dst->br->dev; ctx->dev = dst->dev; switch (path->bridge.vlan_mode) { case DEV_PATH_BR_VLAN_TAG: if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) return -ENOSPC; ctx->vlan[ctx->num_vlans].id = path->bridge.vlan_id; ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto; ctx->num_vlans++; break; case DEV_PATH_BR_VLAN_UNTAG_HW: case DEV_PATH_BR_VLAN_UNTAG: ctx->num_vlans--; break; case DEV_PATH_BR_VLAN_KEEP: break; } return 0; } static const struct ethtool_ops br_ethtool_ops = { .get_drvinfo = br_getinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = br_get_link_ksettings, }; static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_init = br_dev_init, .ndo_uninit = br_dev_uninit, .ndo_start_xmit = br_dev_xmit, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = br_set_mac_address, .ndo_set_rx_mode = br_dev_set_multicast_list, .ndo_change_rx_flags = br_dev_change_rx_flags, .ndo_change_mtu = br_change_mtu, .ndo_siocdevprivate = br_dev_siocdevprivate, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_netpoll_setup = br_netpoll_setup, .ndo_netpoll_cleanup = br_netpoll_cleanup, .ndo_poll_controller = br_poll_controller, #endif .ndo_add_slave = br_add_slave, .ndo_del_slave = br_del_slave, .ndo_fix_features = br_fix_features, .ndo_fdb_add = br_fdb_add, .ndo_fdb_del = br_fdb_delete, .ndo_fdb_del_bulk = br_fdb_delete_bulk, .ndo_fdb_dump = br_fdb_dump, .ndo_fdb_get = br_fdb_get, .ndo_mdb_add = br_mdb_add, .ndo_mdb_del = br_mdb_del, .ndo_mdb_del_bulk = br_mdb_del_bulk, .ndo_mdb_dump = br_mdb_dump, .ndo_mdb_get = br_mdb_get, .ndo_bridge_getlink = br_getlink, .ndo_bridge_setlink = br_setlink, .ndo_bridge_dellink = br_dellink, .ndo_features_check = passthru_features_check, .ndo_fill_forward_path = br_fill_forward_path, }; static const struct device_type br_type = { .name = "bridge", }; void br_dev_setup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); eth_hw_addr_random(dev); ether_setup(dev); dev->netdev_ops = &br_netdev_ops; dev->needs_free_netdev = true; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; dev->lltx = true; dev->netns_local = true; dev->features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->vlan_features = COMMON_FEATURES; dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; br->dev = dev; spin_lock_init(&br->lock); INIT_LIST_HEAD(&br->port_list); INIT_HLIST_HEAD(&br->fdb_list); INIT_HLIST_HEAD(&br->frame_type_list); #if IS_ENABLED(CONFIG_BRIDGE_MRP) INIT_HLIST_HEAD(&br->mrp_list); #endif #if IS_ENABLED(CONFIG_BRIDGE_CFM) INIT_HLIST_HEAD(&br->mep_list); #endif spin_lock_init(&br->hash_lock); br->bridge_id.prio[0] = 0x80; br->bridge_id.prio[1] = 0x00; ether_addr_copy(br->group_addr, eth_stp_addr); br->stp_enabled = BR_NO_STP; br->group_fwd_mask = BR_GROUPFWD_DEFAULT; br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT; br->designated_root = br->bridge_id; br->bridge_max_age = br->max_age = 20 * HZ; br->bridge_hello_time = br->hello_time = 2 * HZ; br->bridge_forward_delay = br->forward_delay = 15 * HZ; br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME; dev->max_mtu = ETH_MAX_MTU; br_netfilter_rtable_init(br); br_stp_timer_init(br); br_multicast_init(br); INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup); } |
10 10 2 10 10 10 7 10 10 10 9 10 1 1 9 10 9 10 9 10 10 10 10 10 10 10 4 66 66 66 22 22 22 21 9 22 22 1 7 7 7 18 17 15 15 14 15 17 18 6 5 5 5 5 6 21 21 2 9 7 7 21 21 21 21 21 19 21 2 7 7 7 5 5 5 10 5 10 5 5 4 6 11 10 11 10 5 10 10 5 10 10 11 6 6 6 5 6 5 2 2 5 5 4 13 13 13 13 3 13 66 3 66 8 66 66 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63 63 4 66 66 63 63 4 59 59 4 1 1 62 63 63 59 4 62 62 62 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 | // SPDX-License-Identifier: GPL-2.0-only /* * Input driver to ExplorerPS/2 device driver module. * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2004 Dmitry Torokhov */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MOUSEDEV_MINOR_BASE 32 #define MOUSEDEV_MINORS 31 #define MOUSEDEV_MIX 63 #include <linux/bitops.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/random.h> #include <linux/major.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/kernel.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Mouse (ExplorerPS/2) device interfaces"); MODULE_LICENSE("GPL"); #ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_X #define CONFIG_INPUT_MOUSEDEV_SCREEN_X 1024 #endif #ifndef CONFIG_INPUT_MOUSEDEV_SCREEN_Y #define CONFIG_INPUT_MOUSEDEV_SCREEN_Y 768 #endif static int xres = CONFIG_INPUT_MOUSEDEV_SCREEN_X; module_param(xres, uint, 0644); MODULE_PARM_DESC(xres, "Horizontal screen resolution"); static int yres = CONFIG_INPUT_MOUSEDEV_SCREEN_Y; module_param(yres, uint, 0644); MODULE_PARM_DESC(yres, "Vertical screen resolution"); static unsigned tap_time = 200; module_param(tap_time, uint, 0644); MODULE_PARM_DESC(tap_time, "Tap time for touchpads in absolute mode (msecs)"); struct mousedev_hw_data { int dx, dy, dz; int x, y; int abs_event; unsigned long buttons; }; struct mousedev { int open; struct input_handle handle; wait_queue_head_t wait; struct list_head client_list; spinlock_t client_lock; /* protects client_list */ struct mutex mutex; struct device dev; struct cdev cdev; bool exist; struct list_head mixdev_node; bool opened_by_mixdev; struct mousedev_hw_data packet; unsigned int pkt_count; int old_x[4], old_y[4]; int frac_dx, frac_dy; unsigned long touch; int (*open_device)(struct mousedev *mousedev); void (*close_device)(struct mousedev *mousedev); }; enum mousedev_emul { MOUSEDEV_EMUL_PS2, MOUSEDEV_EMUL_IMPS, MOUSEDEV_EMUL_EXPS }; struct mousedev_motion { int dx, dy, dz; unsigned long buttons; }; #define PACKET_QUEUE_LEN 16 struct mousedev_client { struct fasync_struct *fasync; struct mousedev *mousedev; struct list_head node; struct mousedev_motion packets[PACKET_QUEUE_LEN]; unsigned int head, tail; spinlock_t packet_lock; int pos_x, pos_y; u8 ps2[6]; unsigned char ready, buffer, bufsiz; unsigned char imexseq, impsseq; enum mousedev_emul mode; unsigned long last_buttons; }; #define MOUSEDEV_SEQ_LEN 6 static unsigned char mousedev_imps_seq[] = { 0xf3, 200, 0xf3, 100, 0xf3, 80 }; static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; static struct mousedev *mousedev_mix; static LIST_HEAD(mousedev_mix_list); #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) static void mousedev_touchpad_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) { int size, tmp; enum { FRACTION_DENOM = 128 }; switch (code) { case ABS_X: fx(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { size = input_abs_get_max(dev, ABS_X) - input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; tmp = ((value - fx(2)) * 256 * FRACTION_DENOM) / size; tmp += mousedev->frac_dx; mousedev->packet.dx = tmp / FRACTION_DENOM; mousedev->frac_dx = tmp - mousedev->packet.dx * FRACTION_DENOM; } break; case ABS_Y: fy(0) = value; if (mousedev->touch && mousedev->pkt_count >= 2) { /* use X size for ABS_Y to keep the same scale */ size = input_abs_get_max(dev, ABS_X) - input_abs_get_min(dev, ABS_X); if (size == 0) size = 256 * 2; tmp = -((value - fy(2)) * 256 * FRACTION_DENOM) / size; tmp += mousedev->frac_dy; mousedev->packet.dy = tmp / FRACTION_DENOM; mousedev->frac_dy = tmp - mousedev->packet.dy * FRACTION_DENOM; } break; } } static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev, unsigned int code, int value) { int min, max, size; switch (code) { case ABS_X: min = input_abs_get_min(dev, ABS_X); max = input_abs_get_max(dev, ABS_X); size = max - min; if (size == 0) size = xres ? : 1; value = clamp(value, min, max); mousedev->packet.x = ((value - min) * xres) / size; mousedev->packet.abs_event = 1; break; case ABS_Y: min = input_abs_get_min(dev, ABS_Y); max = input_abs_get_max(dev, ABS_Y); size = max - min; if (size == 0) size = yres ? : 1; value = clamp(value, min, max); mousedev->packet.y = yres - ((value - min) * yres) / size; mousedev->packet.abs_event = 1; break; } } static void mousedev_rel_event(struct mousedev *mousedev, unsigned int code, int value) { switch (code) { case REL_X: mousedev->packet.dx += value; break; case REL_Y: mousedev->packet.dy -= value; break; case REL_WHEEL: mousedev->packet.dz -= value; break; } } static void mousedev_key_event(struct mousedev *mousedev, unsigned int code, int value) { int index; switch (code) { case BTN_TOUCH: case BTN_0: case BTN_LEFT: index = 0; break; case BTN_STYLUS: case BTN_1: case BTN_RIGHT: index = 1; break; case BTN_2: case BTN_FORWARD: case BTN_STYLUS2: case BTN_MIDDLE: index = 2; break; case BTN_3: case BTN_BACK: case BTN_SIDE: index = 3; break; case BTN_4: case BTN_EXTRA: index = 4; break; default: return; } if (value) { set_bit(index, &mousedev->packet.buttons); set_bit(index, &mousedev_mix->packet.buttons); } else { clear_bit(index, &mousedev->packet.buttons); clear_bit(index, &mousedev_mix->packet.buttons); } } static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_hw_data *packet) { struct mousedev_client *client; struct mousedev_motion *p; unsigned int new_head; int wake_readers = 0; rcu_read_lock(); list_for_each_entry_rcu(client, &mousedev->client_list, node) { /* Just acquire the lock, interrupts already disabled */ spin_lock(&client->packet_lock); p = &client->packets[client->head]; if (client->ready && p->buttons != mousedev->packet.buttons) { new_head = (client->head + 1) % PACKET_QUEUE_LEN; if (new_head != client->tail) { p = &client->packets[client->head = new_head]; memset(p, 0, sizeof(struct mousedev_motion)); } } if (packet->abs_event) { p->dx += packet->x - client->pos_x; p->dy += packet->y - client->pos_y; client->pos_x = packet->x; client->pos_y = packet->y; } client->pos_x += packet->dx; client->pos_x = clamp_val(client->pos_x, 0, xres); client->pos_y += packet->dy; client->pos_y = clamp_val(client->pos_y, 0, yres); p->dx += packet->dx; p->dy += packet->dy; p->dz += packet->dz; p->buttons = mousedev->packet.buttons; if (p->dx || p->dy || p->dz || p->buttons != client->last_buttons) client->ready = 1; spin_unlock(&client->packet_lock); if (client->ready) { kill_fasync(&client->fasync, SIGIO, POLL_IN); wake_readers = 1; } } rcu_read_unlock(); if (wake_readers) wake_up_interruptible(&mousedev->wait); } static void mousedev_touchpad_touch(struct mousedev *mousedev, int value) { if (!value) { if (mousedev->touch && time_before(jiffies, mousedev->touch + msecs_to_jiffies(tap_time))) { /* * Toggle left button to emulate tap. * We rely on the fact that mousedev_mix always has 0 * motion packet so we won't mess current position. */ set_bit(0, &mousedev->packet.buttons); set_bit(0, &mousedev_mix->packet.buttons); mousedev_notify_readers(mousedev, &mousedev_mix->packet); mousedev_notify_readers(mousedev_mix, &mousedev_mix->packet); clear_bit(0, &mousedev->packet.buttons); clear_bit(0, &mousedev_mix->packet.buttons); } mousedev->touch = mousedev->pkt_count = 0; mousedev->frac_dx = 0; mousedev->frac_dy = 0; } else if (!mousedev->touch) mousedev->touch = jiffies; } static void mousedev_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { struct mousedev *mousedev = handle->private; switch (type) { case EV_ABS: /* Ignore joysticks */ if (test_bit(BTN_TRIGGER, handle->dev->keybit)) return; if (test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) mousedev_touchpad_event(handle->dev, mousedev, code, value); else mousedev_abs_event(handle->dev, mousedev, code, value); break; case EV_REL: mousedev_rel_event(mousedev, code, value); break; case EV_KEY: if (value != 2) { if (code == BTN_TOUCH && test_bit(BTN_TOOL_FINGER, handle->dev->keybit)) mousedev_touchpad_touch(mousedev, value); else mousedev_key_event(mousedev, code, value); } break; case EV_SYN: if (code == SYN_REPORT) { if (mousedev->touch) { mousedev->pkt_count++; /* * Input system eats duplicate events, * but we need all of them to do correct * averaging so apply present one forward */ fx(0) = fx(1); fy(0) = fy(1); } mousedev_notify_readers(mousedev, &mousedev->packet); mousedev_notify_readers(mousedev_mix, &mousedev->packet); mousedev->packet.dx = mousedev->packet.dy = mousedev->packet.dz = 0; mousedev->packet.abs_event = 0; } break; } } static int mousedev_fasync(int fd, struct file *file, int on) { struct mousedev_client *client = file->private_data; return fasync_helper(fd, file, on, &client->fasync); } static void mousedev_free(struct device *dev) { struct mousedev *mousedev = container_of(dev, struct mousedev, dev); input_put_device(mousedev->handle.dev); kfree(mousedev); } static int mousedev_open_device(struct mousedev *mousedev) { int retval; retval = mutex_lock_interruptible(&mousedev->mutex); if (retval) return retval; if (!mousedev->exist) retval = -ENODEV; else if (!mousedev->open++) { retval = input_open_device(&mousedev->handle); if (retval) mousedev->open--; } mutex_unlock(&mousedev->mutex); return retval; } static void mousedev_close_device(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); if (mousedev->exist && !--mousedev->open) input_close_device(&mousedev->handle); mutex_unlock(&mousedev->mutex); } /* * Open all available devices so they can all be multiplexed in one. * stream. Note that this function is called with mousedev_mix->mutex * held. */ static int mixdev_open_devices(struct mousedev *mixdev) { int error; error = mutex_lock_interruptible(&mixdev->mutex); if (error) return error; if (!mixdev->open++) { struct mousedev *mousedev; list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { if (!mousedev->opened_by_mixdev) { if (mousedev_open_device(mousedev)) continue; mousedev->opened_by_mixdev = true; } } } mutex_unlock(&mixdev->mutex); return 0; } /* * Close all devices that were opened as part of multiplexed * device. Note that this function is called with mousedev_mix->mutex * held. */ static void mixdev_close_devices(struct mousedev *mixdev) { mutex_lock(&mixdev->mutex); if (!--mixdev->open) { struct mousedev *mousedev; list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { if (mousedev->opened_by_mixdev) { mousedev->opened_by_mixdev = false; mousedev_close_device(mousedev); } } } mutex_unlock(&mixdev->mutex); } static void mousedev_attach_client(struct mousedev *mousedev, struct mousedev_client *client) { spin_lock(&mousedev->client_lock); list_add_tail_rcu(&client->node, &mousedev->client_list); spin_unlock(&mousedev->client_lock); } static void mousedev_detach_client(struct mousedev *mousedev, struct mousedev_client *client) { spin_lock(&mousedev->client_lock); list_del_rcu(&client->node); spin_unlock(&mousedev->client_lock); synchronize_rcu(); } static int mousedev_release(struct inode *inode, struct file *file) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; mousedev_detach_client(mousedev, client); kfree(client); mousedev->close_device(mousedev); return 0; } static int mousedev_open(struct inode *inode, struct file *file) { struct mousedev_client *client; struct mousedev *mousedev; int error; #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX if (imajor(inode) == MISC_MAJOR) mousedev = mousedev_mix; else #endif mousedev = container_of(inode->i_cdev, struct mousedev, cdev); client = kzalloc(sizeof(struct mousedev_client), GFP_KERNEL); if (!client) return -ENOMEM; spin_lock_init(&client->packet_lock); client->pos_x = xres / 2; client->pos_y = yres / 2; client->mousedev = mousedev; mousedev_attach_client(mousedev, client); error = mousedev->open_device(mousedev); if (error) goto err_free_client; file->private_data = client; stream_open(inode, file); return 0; err_free_client: mousedev_detach_client(mousedev, client); kfree(client); return error; } static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) { struct mousedev_motion *p = &client->packets[client->tail]; s8 dx, dy, dz; dx = clamp_val(p->dx, -127, 127); p->dx -= dx; dy = clamp_val(p->dy, -127, 127); p->dy -= dy; ps2_data[0] = BIT(3); ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); ps2_data[0] |= p->buttons & 0x07; ps2_data[1] = dx; ps2_data[2] = dy; switch (client->mode) { case MOUSEDEV_EMUL_EXPS: dz = clamp_val(p->dz, -7, 7); p->dz -= dz; ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); client->bufsiz = 4; break; case MOUSEDEV_EMUL_IMPS: dz = clamp_val(p->dz, -127, 127); p->dz -= dz; ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); ps2_data[3] = dz; client->bufsiz = 4; break; case MOUSEDEV_EMUL_PS2: default: p->dz = 0; ps2_data[0] |= ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); client->bufsiz = 3; break; } if (!p->dx && !p->dy && !p->dz) { if (client->tail == client->head) { client->ready = 0; client->last_buttons = p->buttons; } else client->tail = (client->tail + 1) % PACKET_QUEUE_LEN; } } static void mousedev_generate_response(struct mousedev_client *client, int command) { client->ps2[0] = 0xfa; /* ACK */ switch (command) { case 0xeb: /* Poll */ mousedev_packet(client, &client->ps2[1]); client->bufsiz++; /* account for leading ACK */ break; case 0xf2: /* Get ID */ switch (client->mode) { case MOUSEDEV_EMUL_PS2: client->ps2[1] = 0; break; case MOUSEDEV_EMUL_IMPS: client->ps2[1] = 3; break; case MOUSEDEV_EMUL_EXPS: client->ps2[1] = 4; break; } client->bufsiz = 2; break; case 0xe9: /* Get info */ client->ps2[1] = 0x60; client->ps2[2] = 3; client->ps2[3] = 200; client->bufsiz = 4; break; case 0xff: /* Reset */ client->impsseq = client->imexseq = 0; client->mode = MOUSEDEV_EMUL_PS2; client->ps2[1] = 0xaa; client->ps2[2] = 0x00; client->bufsiz = 3; break; default: client->bufsiz = 1; break; } client->buffer = client->bufsiz; } static ssize_t mousedev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct mousedev_client *client = file->private_data; unsigned char c; unsigned int i; for (i = 0; i < count; i++) { if (get_user(c, buffer + i)) return -EFAULT; spin_lock_irq(&client->packet_lock); if (c == mousedev_imex_seq[client->imexseq]) { if (++client->imexseq == MOUSEDEV_SEQ_LEN) { client->imexseq = 0; client->mode = MOUSEDEV_EMUL_EXPS; } } else client->imexseq = 0; if (c == mousedev_imps_seq[client->impsseq]) { if (++client->impsseq == MOUSEDEV_SEQ_LEN) { client->impsseq = 0; client->mode = MOUSEDEV_EMUL_IMPS; } } else client->impsseq = 0; mousedev_generate_response(client, c); spin_unlock_irq(&client->packet_lock); cond_resched(); } kill_fasync(&client->fasync, SIGIO, POLL_IN); wake_up_interruptible(&client->mousedev->wait); return count; } static ssize_t mousedev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; u8 data[sizeof(client->ps2)]; int retval = 0; if (!client->ready && !client->buffer && mousedev->exist && (file->f_flags & O_NONBLOCK)) return -EAGAIN; retval = wait_event_interruptible(mousedev->wait, !mousedev->exist || client->ready || client->buffer); if (retval) return retval; if (!mousedev->exist) return -ENODEV; spin_lock_irq(&client->packet_lock); if (!client->buffer && client->ready) { mousedev_packet(client, client->ps2); client->buffer = client->bufsiz; } if (count > client->buffer) count = client->buffer; memcpy(data, client->ps2 + client->bufsiz - client->buffer, count); client->buffer -= count; spin_unlock_irq(&client->packet_lock); if (copy_to_user(buffer, data, count)) return -EFAULT; return count; } /* No kernel lock - fine */ static __poll_t mousedev_poll(struct file *file, poll_table *wait) { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; __poll_t mask; poll_wait(file, &mousedev->wait, wait); mask = mousedev->exist ? EPOLLOUT | EPOLLWRNORM : EPOLLHUP | EPOLLERR; if (client->ready || client->buffer) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static const struct file_operations mousedev_fops = { .owner = THIS_MODULE, .read = mousedev_read, .write = mousedev_write, .poll = mousedev_poll, .open = mousedev_open, .release = mousedev_release, .fasync = mousedev_fasync, .llseek = noop_llseek, }; /* * Mark device non-existent. This disables writes, ioctls and * prevents new users from opening the device. Already posted * blocking reads will stay, however new ones will fail. */ static void mousedev_mark_dead(struct mousedev *mousedev) { mutex_lock(&mousedev->mutex); mousedev->exist = false; mutex_unlock(&mousedev->mutex); } /* * Wake up users waiting for IO so they can disconnect from * dead device. */ static void mousedev_hangup(struct mousedev *mousedev) { struct mousedev_client *client; spin_lock(&mousedev->client_lock); list_for_each_entry(client, &mousedev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); spin_unlock(&mousedev->client_lock); wake_up_interruptible(&mousedev->wait); } static void mousedev_cleanup(struct mousedev *mousedev) { struct input_handle *handle = &mousedev->handle; mousedev_mark_dead(mousedev); mousedev_hangup(mousedev); /* mousedev is marked dead so no one else accesses mousedev->open */ if (mousedev->open) input_close_device(handle); } static int mousedev_reserve_minor(bool mixdev) { int minor; if (mixdev) { minor = input_get_new_minor(MOUSEDEV_MIX, 1, false); if (minor < 0) pr_err("failed to reserve mixdev minor: %d\n", minor); } else { minor = input_get_new_minor(MOUSEDEV_MINOR_BASE, MOUSEDEV_MINORS, true); if (minor < 0) pr_err("failed to reserve new minor: %d\n", minor); } return minor; } static struct mousedev *mousedev_create(struct input_dev *dev, struct input_handler *handler, bool mixdev) { struct mousedev *mousedev; int minor; int error; minor = mousedev_reserve_minor(mixdev); if (minor < 0) { error = minor; goto err_out; } mousedev = kzalloc(sizeof(struct mousedev), GFP_KERNEL); if (!mousedev) { error = -ENOMEM; goto err_free_minor; } INIT_LIST_HEAD(&mousedev->client_list); INIT_LIST_HEAD(&mousedev->mixdev_node); spin_lock_init(&mousedev->client_lock); mutex_init(&mousedev->mutex); lockdep_set_subclass(&mousedev->mutex, mixdev ? SINGLE_DEPTH_NESTING : 0); init_waitqueue_head(&mousedev->wait); if (mixdev) { dev_set_name(&mousedev->dev, "mice"); mousedev->open_device = mixdev_open_devices; mousedev->close_device = mixdev_close_devices; } else { int dev_no = minor; /* Normalize device number if it falls into legacy range */ if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) dev_no -= MOUSEDEV_MINOR_BASE; dev_set_name(&mousedev->dev, "mouse%d", dev_no); mousedev->open_device = mousedev_open_device; mousedev->close_device = mousedev_close_device; } mousedev->exist = true; mousedev->handle.dev = input_get_device(dev); mousedev->handle.name = dev_name(&mousedev->dev); mousedev->handle.handler = handler; mousedev->handle.private = mousedev; mousedev->dev.class = &input_class; if (dev) mousedev->dev.parent = &dev->dev; mousedev->dev.devt = MKDEV(INPUT_MAJOR, minor); mousedev->dev.release = mousedev_free; device_initialize(&mousedev->dev); if (!mixdev) { error = input_register_handle(&mousedev->handle); if (error) goto err_free_mousedev; } cdev_init(&mousedev->cdev, &mousedev_fops); error = cdev_device_add(&mousedev->cdev, &mousedev->dev); if (error) goto err_cleanup_mousedev; return mousedev; err_cleanup_mousedev: mousedev_cleanup(mousedev); if (!mixdev) input_unregister_handle(&mousedev->handle); err_free_mousedev: put_device(&mousedev->dev); err_free_minor: input_free_minor(minor); err_out: return ERR_PTR(error); } static void mousedev_destroy(struct mousedev *mousedev) { cdev_device_del(&mousedev->cdev, &mousedev->dev); mousedev_cleanup(mousedev); input_free_minor(MINOR(mousedev->dev.devt)); if (mousedev != mousedev_mix) input_unregister_handle(&mousedev->handle); put_device(&mousedev->dev); } static int mixdev_add_device(struct mousedev *mousedev) { int retval; retval = mutex_lock_interruptible(&mousedev_mix->mutex); if (retval) return retval; if (mousedev_mix->open) { retval = mousedev_open_device(mousedev); if (retval) goto out; mousedev->opened_by_mixdev = true; } get_device(&mousedev->dev); list_add_tail(&mousedev->mixdev_node, &mousedev_mix_list); out: mutex_unlock(&mousedev_mix->mutex); return retval; } static void mixdev_remove_device(struct mousedev *mousedev) { mutex_lock(&mousedev_mix->mutex); if (mousedev->opened_by_mixdev) { mousedev->opened_by_mixdev = false; mousedev_close_device(mousedev); } list_del_init(&mousedev->mixdev_node); mutex_unlock(&mousedev_mix->mutex); put_device(&mousedev->dev); } static int mousedev_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct mousedev *mousedev; int error; mousedev = mousedev_create(dev, handler, false); if (IS_ERR(mousedev)) return PTR_ERR(mousedev); error = mixdev_add_device(mousedev); if (error) { mousedev_destroy(mousedev); return error; } return 0; } static void mousedev_disconnect(struct input_handle *handle) { struct mousedev *mousedev = handle->private; mixdev_remove_device(mousedev); mousedev_destroy(mousedev); } static const struct input_device_id mousedev_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_RELBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) }, .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, .relbit = { BIT_MASK(REL_X) | BIT_MASK(REL_Y) }, }, /* A mouse like device, at least one button, two relative axes */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_RELBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) }, .relbit = { BIT_MASK(REL_WHEEL) }, }, /* A separate scrollwheel */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* A tablet like device, at least touch detection, two absolute axes */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_TOOL_FINGER)] = BIT_MASK(BTN_TOOL_FINGER) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) | BIT_MASK(ABS_PRESSURE) | BIT_MASK(ABS_TOOL_WIDTH) }, }, /* A touchpad */ { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) }, .keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) }, .absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, /* Mouse-like device with absolute X and Y but ordinary clicks, like hp ILO2 High Performance mouse */ { }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, mousedev_ids); static struct input_handler mousedev_handler = { .event = mousedev_event, .connect = mousedev_connect, .disconnect = mousedev_disconnect, .legacy_minors = true, .minor = MOUSEDEV_MINOR_BASE, .name = "mousedev", .id_table = mousedev_ids, }; #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX #include <linux/miscdevice.h> static struct miscdevice psaux_mouse = { .minor = PSMOUSE_MINOR, .name = "psaux", .fops = &mousedev_fops, }; static bool psaux_registered; static void __init mousedev_psaux_register(void) { int error; error = misc_register(&psaux_mouse); if (error) pr_warn("could not register psaux device, error: %d\n", error); else psaux_registered = true; } static void __exit mousedev_psaux_unregister(void) { if (psaux_registered) misc_deregister(&psaux_mouse); } #else static inline void mousedev_psaux_register(void) { } static inline void mousedev_psaux_unregister(void) { } #endif static int __init mousedev_init(void) { int error; mousedev_mix = mousedev_create(NULL, &mousedev_handler, true); if (IS_ERR(mousedev_mix)) return PTR_ERR(mousedev_mix); error = input_register_handler(&mousedev_handler); if (error) { mousedev_destroy(mousedev_mix); return error; } mousedev_psaux_register(); pr_info("PS/2 mouse device common for all mice\n"); return 0; } static void __exit mousedev_exit(void) { mousedev_psaux_unregister(); input_unregister_handler(&mousedev_handler); mousedev_destroy(mousedev_mix); } module_init(mousedev_init); module_exit(mousedev_exit); |
6 2 5 2 7 17 1 17 1 17 2 1 16 16 13 2 2 7 6 1 7 11 11 11 6 1 1 8 2 5 9 5 6 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <net/ip.h> #include <net/tcp.h> #include <net/netlink.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_synproxy.h> #include <net/netfilter/nf_synproxy.h> #include <linux/netfilter/nf_tables.h> #include <linux/netfilter/nf_synproxy.h> struct nft_synproxy { struct nf_synproxy_info info; }; static const struct nla_policy nft_synproxy_policy[NFTA_SYNPROXY_MAX + 1] = { [NFTA_SYNPROXY_MSS] = { .type = NLA_U16 }, [NFTA_SYNPROXY_WSCALE] = { .type = NLA_U8 }, [NFTA_SYNPROXY_FLAGS] = { .type = NLA_U32 }, }; static void nft_synproxy_tcp_options(struct synproxy_options *opts, const struct tcphdr *tcp, struct synproxy_net *snet, struct nf_synproxy_info *info, const struct nft_synproxy *priv) { this_cpu_inc(snet->stats->syn_received); if (tcp->ece && tcp->cwr) opts->options |= NF_SYNPROXY_OPT_ECN; opts->options &= priv->info.options; opts->mss_encode = opts->mss_option; opts->mss_option = info->mss; if (opts->options & NF_SYNPROXY_OPT_TIMESTAMP) synproxy_init_timestamp_cookie(info, opts); else opts->options &= ~(NF_SYNPROXY_OPT_WSCALE | NF_SYNPROXY_OPT_SACK_PERM | NF_SYNPROXY_OPT_ECN); } static void nft_synproxy_eval_v4(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt, const struct tcphdr *tcp, struct tcphdr *_tcph, struct synproxy_options *opts) { struct nf_synproxy_info info = priv->info; struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *skb = pkt->skb; if (tcp->syn) { /* Initial SYN from client */ nft_synproxy_tcp_options(opts, tcp, snet, &info, priv); synproxy_send_client_synack(net, skb, tcp, opts); consume_skb(skb); regs->verdict.code = NF_STOLEN; } else if (tcp->ack) { /* ACK from client */ if (synproxy_recv_client_ack(net, skb, tcp, opts, ntohl(tcp->seq))) { consume_skb(skb); regs->verdict.code = NF_STOLEN; } else { regs->verdict.code = NF_DROP; } } } #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) static void nft_synproxy_eval_v6(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt, const struct tcphdr *tcp, struct tcphdr *_tcph, struct synproxy_options *opts) { struct nf_synproxy_info info = priv->info; struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); struct sk_buff *skb = pkt->skb; if (tcp->syn) { /* Initial SYN from client */ nft_synproxy_tcp_options(opts, tcp, snet, &info, priv); synproxy_send_client_synack_ipv6(net, skb, tcp, opts); consume_skb(skb); regs->verdict.code = NF_STOLEN; } else if (tcp->ack) { /* ACK from client */ if (synproxy_recv_client_ack_ipv6(net, skb, tcp, opts, ntohl(tcp->seq))) { consume_skb(skb); regs->verdict.code = NF_STOLEN; } else { regs->verdict.code = NF_DROP; } } } #endif /* CONFIG_NF_TABLES_IPV6*/ static void nft_synproxy_do_eval(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct synproxy_options opts = {}; struct sk_buff *skb = pkt->skb; int thoff = nft_thoff(pkt); const struct tcphdr *tcp; struct tcphdr _tcph; if (pkt->tprot != IPPROTO_TCP) { regs->verdict.code = NFT_BREAK; return; } if (nf_ip_checksum(skb, nft_hook(pkt), thoff, IPPROTO_TCP)) { regs->verdict.code = NF_DROP; return; } tcp = skb_header_pointer(skb, thoff, sizeof(struct tcphdr), &_tcph); if (!tcp) { regs->verdict.code = NF_DROP; return; } if (!synproxy_parse_options(skb, thoff, tcp, &opts)) { regs->verdict.code = NF_DROP; return; } switch (skb->protocol) { case htons(ETH_P_IP): nft_synproxy_eval_v4(priv, regs, pkt, tcp, &_tcph, &opts); return; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case htons(ETH_P_IPV6): nft_synproxy_eval_v6(priv, regs, pkt, tcp, &_tcph, &opts); return; #endif } regs->verdict.code = NFT_BREAK; } static int nft_synproxy_do_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_synproxy *priv) { struct synproxy_net *snet = synproxy_pernet(ctx->net); u32 flags; int err; if (tb[NFTA_SYNPROXY_MSS]) priv->info.mss = ntohs(nla_get_be16(tb[NFTA_SYNPROXY_MSS])); if (tb[NFTA_SYNPROXY_WSCALE]) priv->info.wscale = nla_get_u8(tb[NFTA_SYNPROXY_WSCALE]); if (tb[NFTA_SYNPROXY_FLAGS]) { flags = ntohl(nla_get_be32(tb[NFTA_SYNPROXY_FLAGS])); if (flags & ~NF_SYNPROXY_OPT_MASK) return -EOPNOTSUPP; priv->info.options = flags; } err = nf_ct_netns_get(ctx->net, ctx->family); if (err) return err; switch (ctx->family) { case NFPROTO_IPV4: err = nf_synproxy_ipv4_init(snet, ctx->net); if (err) goto nf_ct_failure; break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: err = nf_synproxy_ipv6_init(snet, ctx->net); if (err) goto nf_ct_failure; break; #endif case NFPROTO_INET: err = nf_synproxy_ipv4_init(snet, ctx->net); if (err) goto nf_ct_failure; err = nf_synproxy_ipv6_init(snet, ctx->net); if (err) { nf_synproxy_ipv4_fini(snet, ctx->net); goto nf_ct_failure; } break; } return 0; nf_ct_failure: nf_ct_netns_put(ctx->net, ctx->family); return err; } static void nft_synproxy_do_destroy(const struct nft_ctx *ctx) { struct synproxy_net *snet = synproxy_pernet(ctx->net); switch (ctx->family) { case NFPROTO_IPV4: nf_synproxy_ipv4_fini(snet, ctx->net); break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: nf_synproxy_ipv6_fini(snet, ctx->net); break; #endif case NFPROTO_INET: nf_synproxy_ipv4_fini(snet, ctx->net); nf_synproxy_ipv6_fini(snet, ctx->net); break; } nf_ct_netns_put(ctx->net, ctx->family); } static int nft_synproxy_do_dump(struct sk_buff *skb, struct nft_synproxy *priv) { if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) || nla_put_u8(skb, NFTA_SYNPROXY_WSCALE, priv->info.wscale) || nla_put_be32(skb, NFTA_SYNPROXY_FLAGS, htonl(priv->info.options))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static void nft_synproxy_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_synproxy *priv = nft_expr_priv(expr); nft_synproxy_do_eval(priv, regs, pkt); } static int nft_synproxy_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && ctx->family != NFPROTO_INET) return -EOPNOTSUPP; return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD)); } static int nft_synproxy_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_synproxy *priv = nft_expr_priv(expr); return nft_synproxy_do_init(ctx, tb, priv); } static void nft_synproxy_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { nft_synproxy_do_destroy(ctx); } static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { struct nft_synproxy *priv = nft_expr_priv(expr); return nft_synproxy_do_dump(skb, priv); } static struct nft_expr_type nft_synproxy_type; static const struct nft_expr_ops nft_synproxy_ops = { .eval = nft_synproxy_eval, .size = NFT_EXPR_SIZE(sizeof(struct nft_synproxy)), .init = nft_synproxy_init, .destroy = nft_synproxy_destroy, .dump = nft_synproxy_dump, .type = &nft_synproxy_type, .validate = nft_synproxy_validate, .reduce = NFT_REDUCE_READONLY, }; static struct nft_expr_type nft_synproxy_type __read_mostly = { .ops = &nft_synproxy_ops, .name = "synproxy", .owner = THIS_MODULE, .policy = nft_synproxy_policy, .maxattr = NFTA_SYNPROXY_MAX, }; static int nft_synproxy_obj_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_object *obj) { struct nft_synproxy *priv = nft_obj_data(obj); return nft_synproxy_do_init(ctx, tb, priv); } static void nft_synproxy_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj) { nft_synproxy_do_destroy(ctx); } static int nft_synproxy_obj_dump(struct sk_buff *skb, struct nft_object *obj, bool reset) { struct nft_synproxy *priv = nft_obj_data(obj); return nft_synproxy_do_dump(skb, priv); } static void nft_synproxy_obj_eval(struct nft_object *obj, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_synproxy *priv = nft_obj_data(obj); nft_synproxy_do_eval(priv, regs, pkt); } static void nft_synproxy_obj_update(struct nft_object *obj, struct nft_object *newobj) { struct nft_synproxy *newpriv = nft_obj_data(newobj); struct nft_synproxy *priv = nft_obj_data(obj); priv->info = newpriv->info; } static struct nft_object_type nft_synproxy_obj_type; static const struct nft_object_ops nft_synproxy_obj_ops = { .type = &nft_synproxy_obj_type, .size = sizeof(struct nft_synproxy), .init = nft_synproxy_obj_init, .destroy = nft_synproxy_obj_destroy, .dump = nft_synproxy_obj_dump, .eval = nft_synproxy_obj_eval, .update = nft_synproxy_obj_update, }; static struct nft_object_type nft_synproxy_obj_type __read_mostly = { .type = NFT_OBJECT_SYNPROXY, .ops = &nft_synproxy_obj_ops, .maxattr = NFTA_SYNPROXY_MAX, .policy = nft_synproxy_policy, .owner = THIS_MODULE, }; static int __init nft_synproxy_module_init(void) { int err; err = nft_register_obj(&nft_synproxy_obj_type); if (err < 0) return err; err = nft_register_expr(&nft_synproxy_type); if (err < 0) goto err; return 0; err: nft_unregister_obj(&nft_synproxy_obj_type); return err; } static void __exit nft_synproxy_module_exit(void) { nft_unregister_expr(&nft_synproxy_type); nft_unregister_obj(&nft_synproxy_obj_type); } module_init(nft_synproxy_module_init); module_exit(nft_synproxy_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>"); MODULE_ALIAS_NFT_EXPR("synproxy"); MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY); MODULE_DESCRIPTION("nftables SYNPROXY expression support"); |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 3 3 3 3 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 | // SPDX-License-Identifier: GPL-2.0-or-later /* xfrm6_protocol.c - Generic xfrm protocol multiplexer for ipv6. * * Copyright (C) 2013 secunet Security Networks AG * * Author: * Steffen Klassert <steffen.klassert@secunet.com> * * Based on: * net/ipv4/xfrm4_protocol.c */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/skbuff.h> #include <linux/icmpv6.h> #include <net/ip6_route.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/xfrm.h> static struct xfrm6_protocol __rcu *esp6_handlers __read_mostly; static struct xfrm6_protocol __rcu *ah6_handlers __read_mostly; static struct xfrm6_protocol __rcu *ipcomp6_handlers __read_mostly; static DEFINE_MUTEX(xfrm6_protocol_mutex); static inline struct xfrm6_protocol __rcu **proto_handlers(u8 protocol) { switch (protocol) { case IPPROTO_ESP: return &esp6_handlers; case IPPROTO_AH: return &ah6_handlers; case IPPROTO_COMP: return &ipcomp6_handlers; } return NULL; } #define for_each_protocol_rcu(head, handler) \ for (handler = rcu_dereference(head); \ handler != NULL; \ handler = rcu_dereference(handler->next)) \ static int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err) { int ret; struct xfrm6_protocol *handler; struct xfrm6_protocol __rcu **head = proto_handlers(protocol); if (!head) return 0; for_each_protocol_rcu(*proto_handlers(protocol), handler) if ((ret = handler->cb_handler(skb, err)) <= 0) return ret; return 0; } int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) { int ret; struct xfrm6_protocol *handler; struct xfrm6_protocol __rcu **head = proto_handlers(nexthdr); XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; XFRM_SPI_SKB_CB(skb)->family = AF_INET6; XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); if (!head) goto out; if (!skb_dst(skb)) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); int flags = RT6_LOOKUP_F_HAS_SADDR; struct dst_entry *dst; struct flowi6 fl6 = { .flowi6_iif = skb->dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowlabel = ip6_flowinfo(ip6h), .flowi6_mark = skb->mark, .flowi6_proto = ip6h->nexthdr, }; dst = ip6_route_input_lookup(dev_net(skb->dev), skb->dev, &fl6, skb, flags); if (dst->error) goto drop; skb_dst_set(skb, dst); } for_each_protocol_rcu(*head, handler) if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) return ret; out: icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); drop: kfree_skb(skb); return 0; } EXPORT_SYMBOL(xfrm6_rcv_encap); static int xfrm6_esp_rcv(struct sk_buff *skb) { int ret; struct xfrm6_protocol *handler; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; for_each_protocol_rcu(esp6_handlers, handler) if ((ret = handler->handler(skb)) != -EINVAL) return ret; icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; } static int xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct xfrm6_protocol *handler; for_each_protocol_rcu(esp6_handlers, handler) if (!handler->err_handler(skb, opt, type, code, offset, info)) return 0; return -ENOENT; } static int xfrm6_ah_rcv(struct sk_buff *skb) { int ret; struct xfrm6_protocol *handler; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; for_each_protocol_rcu(ah6_handlers, handler) if ((ret = handler->handler(skb)) != -EINVAL) return ret; icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; } static int xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct xfrm6_protocol *handler; for_each_protocol_rcu(ah6_handlers, handler) if (!handler->err_handler(skb, opt, type, code, offset, info)) return 0; return -ENOENT; } static int xfrm6_ipcomp_rcv(struct sk_buff *skb) { int ret; struct xfrm6_protocol *handler; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; for_each_protocol_rcu(ipcomp6_handlers, handler) if ((ret = handler->handler(skb)) != -EINVAL) return ret; icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; } static int xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct xfrm6_protocol *handler; for_each_protocol_rcu(ipcomp6_handlers, handler) if (!handler->err_handler(skb, opt, type, code, offset, info)) return 0; return -ENOENT; } static const struct inet6_protocol esp6_protocol = { .handler = xfrm6_esp_rcv, .err_handler = xfrm6_esp_err, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol ah6_protocol = { .handler = xfrm6_ah_rcv, .err_handler = xfrm6_ah_err, .flags = INET6_PROTO_NOPOLICY, }; static const struct inet6_protocol ipcomp6_protocol = { .handler = xfrm6_ipcomp_rcv, .err_handler = xfrm6_ipcomp_err, .flags = INET6_PROTO_NOPOLICY, }; static const struct xfrm_input_afinfo xfrm6_input_afinfo = { .family = AF_INET6, .callback = xfrm6_rcv_cb, }; static inline const struct inet6_protocol *netproto(unsigned char protocol) { switch (protocol) { case IPPROTO_ESP: return &esp6_protocol; case IPPROTO_AH: return &ah6_protocol; case IPPROTO_COMP: return &ipcomp6_protocol; } return NULL; } int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol) { struct xfrm6_protocol __rcu **pprev; struct xfrm6_protocol *t; bool add_netproto = false; int ret = -EEXIST; int priority = handler->priority; if (!proto_handlers(protocol) || !netproto(protocol)) return -EINVAL; mutex_lock(&xfrm6_protocol_mutex); if (!rcu_dereference_protected(*proto_handlers(protocol), lockdep_is_held(&xfrm6_protocol_mutex))) add_netproto = true; for (pprev = proto_handlers(protocol); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&xfrm6_protocol_mutex))) != NULL; pprev = &t->next) { if (t->priority < priority) break; if (t->priority == priority) goto err; } handler->next = *pprev; rcu_assign_pointer(*pprev, handler); ret = 0; err: mutex_unlock(&xfrm6_protocol_mutex); if (add_netproto) { if (inet6_add_protocol(netproto(protocol), protocol)) { pr_err("%s: can't add protocol\n", __func__); ret = -EAGAIN; } } return ret; } EXPORT_SYMBOL(xfrm6_protocol_register); int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol) { struct xfrm6_protocol __rcu **pprev; struct xfrm6_protocol *t; int ret = -ENOENT; if (!proto_handlers(protocol) || !netproto(protocol)) return -EINVAL; mutex_lock(&xfrm6_protocol_mutex); for (pprev = proto_handlers(protocol); (t = rcu_dereference_protected(*pprev, lockdep_is_held(&xfrm6_protocol_mutex))) != NULL; pprev = &t->next) { if (t == handler) { *pprev = handler->next; ret = 0; break; } } if (!rcu_dereference_protected(*proto_handlers(protocol), lockdep_is_held(&xfrm6_protocol_mutex))) { if (inet6_del_protocol(netproto(protocol), protocol) < 0) { pr_err("%s: can't remove protocol\n", __func__); ret = -EAGAIN; } } mutex_unlock(&xfrm6_protocol_mutex); synchronize_net(); return ret; } EXPORT_SYMBOL(xfrm6_protocol_deregister); int __init xfrm6_protocol_init(void) { return xfrm_input_register_afinfo(&xfrm6_input_afinfo); } void xfrm6_protocol_fini(void) { xfrm_input_unregister_afinfo(&xfrm6_input_afinfo); } |
10656 8830 8837 50 50 2 6 6 50 50 705 701 703 704 12 5 4 3 1 692 690 691 692 692 691 692 690 690 689 692 1 13 2 13 3 13 749 175 667 10029 9013 671 25 27 27 25 27 25 25 27 27 25 25 3 3 1885 1833 1824 1799 1879 578 575 471 259 259 246 259 1209 1210 1210 1127 1122 1102 1127 50 50 50 10719 10566 10566 268 8818 9359 9314 8380 9362 9362 11126 6 25 16 15 14 22 12 12 12 11 22 23 22 23 28 27 24 23 130 130 130 11 11 6 5 3 11 11 11 11 8 3 11 16 15 12 1 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic pidhash and scalable, time-bounded PID allocator * * (C) 2002-2003 Nadia Yvette Chambers, IBM * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain * against. There is very little to them aside from hashing them and * parking tasks using given ID's on a list. * * The hash is always changed with the tasklist_lock write-acquired, * and the hash is only accessed with the tasklist_lock at least * read-acquired, so there's no additional SMP locking needed here. * * We have a list of bitmap pages, which bitmaps represent the PID space. * Allocating and freeing PIDs is completely lockless. The worst-case * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). * * Pid namespaces: * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM * Many thanks to Oleg Nesterov for comments and help * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/memblock.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> #include <linux/syscalls.h> #include <linux/proc_ns.h> #include <linux/refcount.h> #include <linux/anon_inodes.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/idr.h> #include <linux/pidfs.h> #include <linux/seqlock.h> #include <net/sock.h> #include <uapi/linux/pidfd.h> struct pid init_struct_pid = { .count = REFCOUNT_INIT(1), .tasks = { { .first = NULL }, { .first = NULL }, { .first = NULL }, }, .level = 0, .numbers = { { .nr = 0, .ns = &init_pid_ns, }, } }; static int pid_max_min = RESERVED_PIDS + 1; static int pid_max_max = PID_MAX_LIMIT; /* * PID-map pages start out as NULL, they get allocated upon * first use and are never deallocated. This way a low pid_max * value does not cause lots of bitmaps to be allocated, but * the scheme scales to up to 4 million PIDs, runtime. */ struct pid_namespace init_pid_ns = { .ns.count = REFCOUNT_INIT(2), .idr = IDR_INIT(init_pid_ns.idr), .pid_allocated = PIDNS_ADDING, .level = 0, .child_reaper = &init_task, .user_ns = &init_user_ns, .ns.inum = PROC_PID_INIT_INO, #ifdef CONFIG_PID_NS .ns.ops = &pidns_operations, #endif .pid_max = PID_MAX_DEFAULT, #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) .memfd_noexec_scope = MEMFD_NOEXEC_SCOPE_EXEC, #endif }; EXPORT_SYMBOL_GPL(init_pid_ns); /* * Note: disable interrupts while the pidmap_lock is held as an * interrupt might come in and do read_lock(&tasklist_lock). * * If we don't disable interrupts there is a nasty deadlock between * detach_pid()->free_pid() and another cpu that does * spin_lock(&pidmap_lock) followed by an interrupt routine that does * read_lock(&tasklist_lock); * * After we clean up the tasklist_lock and know there are no * irq handlers that take it we can leave the interrupts enabled. * For now it is easier to be safe than to prove it can't happen. */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); seqcount_spinlock_t pidmap_lock_seq = SEQCNT_SPINLOCK_ZERO(pidmap_lock_seq, &pidmap_lock); void put_pid(struct pid *pid) { struct pid_namespace *ns; if (!pid) return; ns = pid->numbers[pid->level].ns; if (refcount_dec_and_test(&pid->count)) { kmem_cache_free(ns->pid_cachep, pid); put_pid_ns(ns); } } EXPORT_SYMBOL_GPL(put_pid); static void delayed_put_pid(struct rcu_head *rhp) { struct pid *pid = container_of(rhp, struct pid, rcu); put_pid(pid); } void free_pid(struct pid *pid) { /* We can be called with write_lock_irq(&tasklist_lock) held */ int i; unsigned long flags; spin_lock_irqsave(&pidmap_lock, flags); for (i = 0; i <= pid->level; i++) { struct upid *upid = pid->numbers + i; struct pid_namespace *ns = upid->ns; switch (--ns->pid_allocated) { case 2: case 1: /* When all that is left in the pid namespace * is the reaper wake up the reaper. The reaper * may be sleeping in zap_pid_ns_processes(). */ wake_up_process(ns->child_reaper); break; case PIDNS_ADDING: /* Handle a fork failure of the first process */ WARN_ON(ns->child_reaper); ns->pid_allocated = 0; break; } idr_remove(&ns->idr, upid->nr); } pidfs_remove_pid(pid); spin_unlock_irqrestore(&pidmap_lock, flags); call_rcu(&pid->rcu, delayed_put_pid); } struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid, size_t set_tid_size) { struct pid *pid; enum pid_type type; int i, nr; struct pid_namespace *tmp; struct upid *upid; int retval = -ENOMEM; /* * set_tid_size contains the size of the set_tid array. Starting at * the most nested currently active PID namespace it tells alloc_pid() * which PID to set for a process in that most nested PID namespace * up to set_tid_size PID namespaces. It does not have to set the PID * for a process in all nested PID namespaces but set_tid_size must * never be greater than the current ns->level + 1. */ if (set_tid_size > ns->level + 1) return ERR_PTR(-EINVAL); pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) return ERR_PTR(retval); tmp = ns; pid->level = ns->level; for (i = ns->level; i >= 0; i--) { int tid = 0; int pid_max = READ_ONCE(tmp->pid_max); if (set_tid_size) { tid = set_tid[ns->level - i]; retval = -EINVAL; if (tid < 1 || tid >= pid_max) goto out_free; /* * Also fail if a PID != 1 is requested and * no PID 1 exists. */ if (tid != 1 && !tmp->child_reaper) goto out_free; retval = -EPERM; if (!checkpoint_restore_ns_capable(tmp->user_ns)) goto out_free; set_tid_size--; } idr_preload(GFP_KERNEL); spin_lock_irq(&pidmap_lock); if (tid) { nr = idr_alloc(&tmp->idr, NULL, tid, tid + 1, GFP_ATOMIC); /* * If ENOSPC is returned it means that the PID is * alreay in use. Return EEXIST in that case. */ if (nr == -ENOSPC) nr = -EEXIST; } else { int pid_min = 1; /* * init really needs pid 1, but after reaching the * maximum wrap back to RESERVED_PIDS */ if (idr_get_cursor(&tmp->idr) > RESERVED_PIDS) pid_min = RESERVED_PIDS; /* * Store a null pointer so find_pid_ns does not find * a partially initialized PID (see below). */ nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min, pid_max, GFP_ATOMIC); } spin_unlock_irq(&pidmap_lock); idr_preload_end(); if (nr < 0) { retval = (nr == -ENOSPC) ? -EAGAIN : nr; goto out_free; } pid->numbers[i].nr = nr; pid->numbers[i].ns = tmp; tmp = tmp->parent; } /* * ENOMEM is not the most obvious choice especially for the case * where the child subreaper has already exited and the pid * namespace denies the creation of any new processes. But ENOMEM * is what we have exposed to userspace for a long time and it is * documented behavior for pid namespaces. So we can't easily * change it even if there were an error code better suited. */ retval = -ENOMEM; get_pid_ns(ns); refcount_set(&pid->count, 1); spin_lock_init(&pid->lock); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); init_waitqueue_head(&pid->wait_pidfd); INIT_HLIST_HEAD(&pid->inodes); upid = pid->numbers + ns->level; idr_preload(GFP_KERNEL); spin_lock_irq(&pidmap_lock); if (!(ns->pid_allocated & PIDNS_ADDING)) goto out_unlock; pidfs_add_pid(pid); for ( ; upid >= pid->numbers; --upid) { /* Make the PID visible to find_pid_ns. */ idr_replace(&upid->ns->idr, pid, upid->nr); upid->ns->pid_allocated++; } spin_unlock_irq(&pidmap_lock); idr_preload_end(); return pid; out_unlock: spin_unlock_irq(&pidmap_lock); idr_preload_end(); put_pid_ns(ns); out_free: spin_lock_irq(&pidmap_lock); while (++i <= ns->level) { upid = pid->numbers + i; idr_remove(&upid->ns->idr, upid->nr); } /* On failure to allocate the first pid, reset the state */ if (ns->pid_allocated == PIDNS_ADDING) idr_set_cursor(&ns->idr, 0); spin_unlock_irq(&pidmap_lock); kmem_cache_free(ns->pid_cachep, pid); return ERR_PTR(retval); } void disable_pid_allocation(struct pid_namespace *ns) { spin_lock_irq(&pidmap_lock); ns->pid_allocated &= ~PIDNS_ADDING; spin_unlock_irq(&pidmap_lock); } struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { return idr_find(&ns->idr, nr); } EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { return find_pid_ns(nr, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); static struct pid **task_pid_ptr(struct task_struct *task, enum pid_type type) { return (type == PIDTYPE_PID) ? &task->thread_pid : &task->signal->pids[type]; } /* * attach_pid() must be called with the tasklist_lock write-held. */ void attach_pid(struct task_struct *task, enum pid_type type) { struct pid *pid = *task_pid_ptr(task, type); hlist_add_head_rcu(&task->pid_links[type], &pid->tasks[type]); } static void __change_pid(struct task_struct *task, enum pid_type type, struct pid *new) { struct pid **pid_ptr = task_pid_ptr(task, type); struct pid *pid; int tmp; pid = *pid_ptr; hlist_del_rcu(&task->pid_links[type]); *pid_ptr = new; if (type == PIDTYPE_PID) { WARN_ON_ONCE(pid_has_task(pid, PIDTYPE_PID)); wake_up_all(&pid->wait_pidfd); } for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (pid_has_task(pid, tmp)) return; free_pid(pid); } void detach_pid(struct task_struct *task, enum pid_type type) { __change_pid(task, type, NULL); } void change_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { __change_pid(task, type, pid); attach_pid(task, type); } void exchange_tids(struct task_struct *left, struct task_struct *right) { struct pid *pid1 = left->thread_pid; struct pid *pid2 = right->thread_pid; struct hlist_head *head1 = &pid1->tasks[PIDTYPE_PID]; struct hlist_head *head2 = &pid2->tasks[PIDTYPE_PID]; /* Swap the single entry tid lists */ hlists_swap_heads_rcu(head1, head2); /* Swap the per task_struct pid */ rcu_assign_pointer(left->thread_pid, pid2); rcu_assign_pointer(right->thread_pid, pid1); /* Swap the cached value */ WRITE_ONCE(left->pid, pid_nr(pid2)); WRITE_ONCE(right->pid, pid_nr(pid1)); } /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { WARN_ON_ONCE(type == PIDTYPE_PID); hlist_replace_rcu(&old->pid_links[type], &new->pid_links[type]); } struct task_struct *pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; if (pid) { struct hlist_node *first; first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), lockdep_tasklist_lock_is_held()); if (first) result = hlist_entry(first, struct task_struct, pid_links[(type)]); } return result; } EXPORT_SYMBOL(pid_task); /* * Must be called under rcu_read_lock(). */ struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "find_task_by_pid_ns() needs rcu_read_lock() protection"); return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } struct task_struct *find_task_by_vpid(pid_t vnr) { return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } struct task_struct *find_get_task_by_vpid(pid_t nr) { struct task_struct *task; rcu_read_lock(); task = find_task_by_vpid(nr); if (task) get_task_struct(task); rcu_read_unlock(); return task; } struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; rcu_read_lock(); pid = get_pid(rcu_dereference(*task_pid_ptr(task, type))); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(get_task_pid); struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result; rcu_read_lock(); result = pid_task(pid, type); if (result) get_task_struct(result); rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(get_pid_task); struct pid *find_get_pid(pid_t nr) { struct pid *pid; rcu_read_lock(); pid = get_pid(find_vpid(nr)); rcu_read_unlock(); return pid; } EXPORT_SYMBOL_GPL(find_get_pid); pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) { struct upid *upid; pid_t nr = 0; if (pid && ns->level <= pid->level) { upid = &pid->numbers[ns->level]; if (upid->ns == ns) nr = upid->nr; } return nr; } EXPORT_SYMBOL_GPL(pid_nr_ns); pid_t pid_vnr(struct pid *pid) { return pid_nr_ns(pid, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(pid_vnr); pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns) { pid_t nr = 0; rcu_read_lock(); if (!ns) ns = task_active_pid_ns(current); nr = pid_nr_ns(rcu_dereference(*task_pid_ptr(task, type)), ns); rcu_read_unlock(); return nr; } EXPORT_SYMBOL(__task_pid_nr_ns); struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { return ns_of_pid(task_pid(tsk)); } EXPORT_SYMBOL_GPL(task_active_pid_ns); /* * Used by proc to find the first pid that is greater than or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid_ns. */ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { return idr_get_next(&ns->idr, &nr); } EXPORT_SYMBOL_GPL(find_ge_pid); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags) { CLASS(fd, f)(fd); struct pid *pid; if (fd_empty(f)) return ERR_PTR(-EBADF); pid = pidfd_pid(fd_file(f)); if (!IS_ERR(pid)) { get_pid(pid); *flags = fd_file(f)->f_flags; } return pid; } /** * pidfd_get_task() - Get the task associated with a pidfd * * @pidfd: pidfd for which to get the task * @flags: flags associated with this pidfd * * Return the task associated with @pidfd. The function takes a reference on * the returned task. The caller is responsible for releasing that reference. * * Return: On success, the task_struct associated with the pidfd. * On error, a negative errno number will be returned. */ struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) { unsigned int f_flags; struct pid *pid; struct task_struct *task; pid = pidfd_get_pid(pidfd, &f_flags); if (IS_ERR(pid)) return ERR_CAST(pid); task = get_pid_task(pid, PIDTYPE_TGID); put_pid(pid); if (!task) return ERR_PTR(-ESRCH); *flags = f_flags; return task; } /** * pidfd_create() - Create a new pid file descriptor. * * @pid: struct pid that the pidfd will reference * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set. * * Note, that this function can only be called after the fd table has * been unshared to avoid leaking the pidfd to the new process. * * This symbol should not be explicitly exported to loadable modules. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ static int pidfd_create(struct pid *pid, unsigned int flags) { int pidfd; struct file *pidfd_file; pidfd = pidfd_prepare(pid, flags, &pidfd_file); if (pidfd < 0) return pidfd; fd_install(pidfd, pidfd_file); return pidfd; } /** * sys_pidfd_open() - Open new pid file descriptor. * * @pid: pid for which to retrieve a pidfd * @flags: flags to pass * * This creates a new pid file descriptor with the O_CLOEXEC flag set for * the task identified by @pid. Without PIDFD_THREAD flag the target task * must be a thread-group leader. * * Return: On success, a cloexec pidfd is returned. * On error, a negative errno number will be returned. */ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags) { int fd; struct pid *p; if (flags & ~(PIDFD_NONBLOCK | PIDFD_THREAD)) return -EINVAL; if (pid <= 0) return -EINVAL; p = find_get_pid(pid); if (!p) return -ESRCH; fd = pidfd_create(p, flags); put_pid(p); return fd; } #ifdef CONFIG_SYSCTL static struct ctl_table_set *pid_table_root_lookup(struct ctl_table_root *root) { return &task_active_pid_ns(current)->set; } static int set_is_seen(struct ctl_table_set *set) { return &task_active_pid_ns(current)->set == set; } static int pid_table_root_permissions(struct ctl_table_header *head, const struct ctl_table *table) { struct pid_namespace *pidns = container_of(head->set, struct pid_namespace, set); int mode = table->mode; if (ns_capable(pidns->user_ns, CAP_SYS_ADMIN) || uid_eq(current_euid(), make_kuid(pidns->user_ns, 0))) mode = (mode & S_IRWXU) >> 6; else if (in_egroup_p(make_kgid(pidns->user_ns, 0))) mode = (mode & S_IRWXG) >> 3; else mode = mode & S_IROTH; return (mode << 6) | (mode << 3) | mode; } static void pid_table_root_set_ownership(struct ctl_table_header *head, kuid_t *uid, kgid_t *gid) { struct pid_namespace *pidns = container_of(head->set, struct pid_namespace, set); kuid_t ns_root_uid; kgid_t ns_root_gid; ns_root_uid = make_kuid(pidns->user_ns, 0); if (uid_valid(ns_root_uid)) *uid = ns_root_uid; ns_root_gid = make_kgid(pidns->user_ns, 0); if (gid_valid(ns_root_gid)) *gid = ns_root_gid; } static struct ctl_table_root pid_table_root = { .lookup = pid_table_root_lookup, .permissions = pid_table_root_permissions, .set_ownership = pid_table_root_set_ownership, }; static const struct ctl_table pid_table[] = { { .procname = "pid_max", .data = &init_pid_ns.pid_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, }; #endif int register_pidns_sysctls(struct pid_namespace *pidns) { #ifdef CONFIG_SYSCTL struct ctl_table *tbl; setup_sysctl_set(&pidns->set, &pid_table_root, set_is_seen); tbl = kmemdup(pid_table, sizeof(pid_table), GFP_KERNEL); if (!tbl) return -ENOMEM; tbl->data = &pidns->pid_max; pidns->pid_max = min(pid_max_max, max_t(int, pidns->pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pidns->sysctls = __register_sysctl_table(&pidns->set, "kernel", tbl, ARRAY_SIZE(pid_table)); if (!pidns->sysctls) { kfree(tbl); retire_sysctl_set(&pidns->set); return -ENOMEM; } #endif return 0; } void unregister_pidns_sysctls(struct pid_namespace *pidns) { #ifdef CONFIG_SYSCTL const struct ctl_table *tbl; tbl = pidns->sysctls->ctl_table_arg; unregister_sysctl_table(pidns->sysctls); retire_sysctl_set(&pidns->set); kfree(tbl); #endif } void __init pid_idr_init(void) { /* Verify no one has done anything silly: */ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_ADDING); /* bump default and minimum pid_max based on number of cpus */ init_pid_ns.pid_max = min(pid_max_max, max_t(int, init_pid_ns.pid_max, PIDS_PER_CPU_DEFAULT * num_possible_cpus())); pid_max_min = max_t(int, pid_max_min, PIDS_PER_CPU_MIN * num_possible_cpus()); pr_info("pid_max: default: %u minimum: %u\n", init_pid_ns.pid_max, pid_max_min); idr_init(&init_pid_ns.idr); init_pid_ns.pid_cachep = kmem_cache_create("pid", struct_size_t(struct pid, numbers, 1), __alignof__(struct pid), SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); } static __init int pid_namespace_sysctl_init(void) { #ifdef CONFIG_SYSCTL /* "kernel" directory will have already been initialized. */ BUG_ON(register_pidns_sysctls(&init_pid_ns)); #endif return 0; } subsys_initcall(pid_namespace_sysctl_init); static struct file *__pidfd_fget(struct task_struct *task, int fd) { struct file *file; int ret; ret = down_read_killable(&task->signal->exec_update_lock); if (ret) return ERR_PTR(ret); if (ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS)) file = fget_task(task, fd); else file = ERR_PTR(-EPERM); up_read(&task->signal->exec_update_lock); if (!file) { /* * It is possible that the target thread is exiting; it can be * either: * 1. before exit_signals(), which gives a real fd * 2. before exit_files() takes the task_lock() gives a real fd * 3. after exit_files() releases task_lock(), ->files is NULL; * this has PF_EXITING, since it was set in exit_signals(), * __pidfd_fget() returns EBADF. * In case 3 we get EBADF, but that really means ESRCH, since * the task is currently exiting and has freed its files * struct, so we fix it up. */ if (task->flags & PF_EXITING) file = ERR_PTR(-ESRCH); else file = ERR_PTR(-EBADF); } return file; } static int pidfd_getfd(struct pid *pid, int fd) { struct task_struct *task; struct file *file; int ret; task = get_pid_task(pid, PIDTYPE_PID); if (!task) return -ESRCH; file = __pidfd_fget(task, fd); put_task_struct(task); if (IS_ERR(file)) return PTR_ERR(file); ret = receive_fd(file, NULL, O_CLOEXEC); fput(file); return ret; } /** * sys_pidfd_getfd() - Get a file descriptor from another process * * @pidfd: the pidfd file descriptor of the process * @fd: the file descriptor number to get * @flags: flags on how to get the fd (reserved) * * This syscall gets a copy of a file descriptor from another process * based on the pidfd, and file descriptor number. It requires that * the calling process has the ability to ptrace the process represented * by the pidfd. The process which is having its file descriptor copied * is otherwise unaffected. * * Return: On success, a cloexec file descriptor is returned. * On error, a negative errno number will be returned. */ SYSCALL_DEFINE3(pidfd_getfd, int, pidfd, int, fd, unsigned int, flags) { struct pid *pid; /* flags is currently unused - make sure it's unset */ if (flags) return -EINVAL; CLASS(fd, f)(pidfd); if (fd_empty(f)) return -EBADF; pid = pidfd_pid(fd_file(f)); if (IS_ERR(pid)) return PTR_ERR(pid); return pidfd_getfd(pid, fd); } |
4 1191 1195 1193 1193 238 235 215 215 214 1195 629 1198 1192 1194 1197 1 1195 1196 1197 1201 1188 1198 1195 1186 1196 1192 1194 1196 1195 1195 1193 215 215 212 208 212 212 212 212 211 207 3 3 185 4 5 208 208 147 113 148 182 1 182 92 212 212 182 151 146 14 147 88 194 136 92 193 212 216 214 213 238 239 239 238 237 232 228 231 226 231 216 212 232 108 79 107 78 108 244 243 242 243 239 239 231 231 5 229 227 215 214 212 118 206 194 194 99 29 238 5 4 2 3 3 2 2 2 2 2 2 1 3 6 4 3 1 1 7 2 13 2 11 2 9 9 9 11 13 247 245 243 243 242 242 241 13 13 11 235 1 4 1 3 2 2 2 2 3 4 248 255 255 249 249 248 17 15 5 17 7 7 4 1 4 3 1 2 1 1 4 4 4 4 3 3 3 3 4 80 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 | // SPDX-License-Identifier: GPL-2.0-only /* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/capability.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/poison.h> #include <net/ipv6.h> #include <net/compat.h> #include <linux/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv6 packet filter"); void *ip6t_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ip6t, IP6T); } EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip6_packet_match(const struct sk_buff *skb, const char *indev, const char *outdev, const struct ip6t_ip6 *ip6info, unsigned int *protoff, u16 *fragoff, bool *hotdrop) { unsigned long ret; const struct ipv6hdr *ipv6 = ipv6_hdr(skb); if (NF_INVF(ip6info, IP6T_INV_SRCIP, ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, &ip6info->src)) || NF_INVF(ip6info, IP6T_INV_DSTIP, ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, &ip6info->dst))) return false; ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); if (NF_INVF(ip6info, IP6T_INV_VIA_IN, ret != 0)) return false; ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); if (NF_INVF(ip6info, IP6T_INV_VIA_OUT, ret != 0)) return false; /* ... might want to do something with class and flowlabel here ... */ /* look for the desired protocol header */ if (ip6info->flags & IP6T_F_PROTO) { int protohdr; unsigned short _frag_off; protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; return false; } *fragoff = _frag_off; if (ip6info->proto == protohdr) { if (ip6info->invflags & IP6T_INV_PROTO) return false; return true; } /* We need match for the '-p all', too! */ if ((ip6info->proto != 0) && !(ip6info->invflags & IP6T_INV_PROTO)) return false; } return true; } /* should be ip6 safe */ static bool ip6_checkentry(const struct ip6t_ip6 *ipv6) { if (ipv6->flags & ~IP6T_F_MASK) return false; if (ipv6->invflags & ~IP6T_INV_MASK) return false; return true; } static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline struct ip6t_entry * get_entry(const void *base, unsigned int offset) { return (struct ip6t_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ip6t_entry *e) { static const struct ip6t_ip6 uncond; return e->target_offset == sizeof(struct ip6t_entry) && memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0; } static inline const struct xt_entry_target * ip6t_get_target_c(const struct ip6t_entry *e) { return ip6t_get_target((struct ip6t_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* This cries for unification! */ static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP6_TRACE_COMMENT_RULE, NF_IP6_TRACE_COMMENT_RETURN, NF_IP6_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP6_TRACE_COMMENT_RULE] = "rule", [NF_IP6_TRACE_COMMENT_RETURN] = "return", [NF_IP6_TRACE_COMMENT_POLICY] = "policy", }; static const struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_WARNING, .logflags = NF_LOG_DEFAULT_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (unconditional(s) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ip6t_entry *e) { const struct ip6t_entry *root; const char *hookname, *chainname, *comment; const struct ip6t_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP6_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ip6t_entry * ip6t_next_entry(const struct ip6t_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ip6t_do_table(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { const struct xt_table *table = priv; unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.fragoff = 0; acpar.hotdrop = false; acpar.state = state; WARN_ON(!(table->valid_hooks & (1 << hook))); local_bh_disable(); addend = xt_write_recseq_begin(); private = READ_ONCE(table->private); /* Address dependency. */ cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; WARN_ON(!e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); WARN_ON(!t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { if (unlikely(stackidx >= private->stacksize)) { verdict = NF_DROP; break; } jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); if (acpar.hotdrop) return NF_DROP; else return verdict; } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0, unsigned int *offsets) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ip6t_entry *e = entry0 + pos; if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) return 0; e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((unconditional(e) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0) || visited) { unsigned int oldpos, size; /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = entry0 + pos; } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = entry0 + pos + size; if (pos + size >= newinfo->size) return 0; e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { /* This a jump; chase it. */ if (!xt_find_jump_offset(offsets, newpos, newinfo->number)) return 0; } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; if (newpos >= newinfo->size) return 0; } e = entry0 + newpos; e->counters.pcnt = pos; pos = newpos; } } next: ; } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV6; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; par->match = m->u.kernel.match; par->matchinfo = m->data; return xt_check_match(par, m->u.match_size - sizeof(*m), ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) return PTR_ERR(match); m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ip6t_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ip6t_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV6, }; return xt_check_target(&par, t->u.target_size - sizeof(*t), e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); } static int find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int size, struct xt_percpu_counter_alloc_state *alloc_state) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) return -ENOMEM; j = 0; memset(&mtpar, 0, sizeof(mtpar)); mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(&e->counters); return ret; } static bool check_underflow(const struct ip6t_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(e)) return false; t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) return -EINVAL; if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) return -EINVAL; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; err = xt_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) return -EINVAL; newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ip6t_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ip6t_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV6; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(&e->counters); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { struct xt_percpu_counter_alloc_state alloc_state = { 0 }; struct ip6t_entry *iter; unsigned int *offsets; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } offsets = xt_alloc_entry_offsets(newinfo->number); if (!offsets) return -ENOMEM; i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) goto out_free; if (i < repl->num_entries) offsets[i] = (void *)iter - entry0; ++i; if (strcmp(ip6t_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } ret = -EINVAL; if (i != repl->num_entries) goto out_free; ret = xt_check_table_hooks(newinfo, repl->valid_hooks); if (ret) goto out_free; if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { ret = -ELOOP; goto out_free; } kvfree(offsets); /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size, &alloc_state); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; out_free: kvfree(offsets); return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ip6t_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; cond_resched(); } } } static void get_old_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ip6t_entry *iter; unsigned int cpu, i; for_each_possible_cpu(cpu) { i = 0; xt_entry_foreach(iter, t->entries, t->size) { const struct xt_counters *tmp; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt); ++i; } cond_resched(); } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ip6t_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = loc_cpu_entry + off; if (copy_to_user(userptr + off, e, sizeof(*e))) { ret = -EFAULT; goto free_counters; } if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ip6t_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (xt_match_to_user(m, userptr + off + i)) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); if (xt_target_to_user(t, userptr + off + e->target_offset)) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_NETFILTER_XTABLES_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET6, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET6, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ip6t_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ip6t_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ip6t_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ip6t_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; ret = xt_compat_init_offsets(AF_INET6, info->number); if (ret) return ret; xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ip6t_getinfo)) return -EINVAL; if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) xt_compat_lock(AF_INET6); #endif t = xt_request_find_table_lock(net, AF_INET6, name); if (!IS_ERR(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_NETFILTER_XTABLES_COMPAT struct xt_table_info tmp; if (in_compat_syscall()) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = PTR_ERR(t); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) xt_compat_unlock(AF_INET6); #endif return ret; } static int get_entries(struct net *net, struct ip6t_get_entries __user *uptr, const int *len) { int ret; struct ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) return -EINVAL; if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ip6t_get_entries) + get.size) return -EINVAL; get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR(t)) { struct xt_table_info *private = t->private; if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else ret = -EAGAIN; module_put(t->me); xt_table_unlock(t); } else ret = PTR_ERR(t); return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ip6t_entry *iter; counters = xt_counters_alloc(num_counters); if (!counters) { ret = -ENOMEM; goto out; } t = xt_request_find_table_lock(net, AF_INET6, name); if (IS_ERR(t)) { ret = PTR_ERR(t); goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); xt_table_unlock(t); get_old_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); return 0; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (len < sizeof(tmp)) return -EINVAL; if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; if ((u64)len < (u64)tmp.size + sizeof(tmp)) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); if (IS_ERR(t)) { ret = PTR_ERR(t); goto free; } local_bh_disable(); private = t->private; if (private->number != tmp.num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_NETFILTER_XTABLES_COMPAT struct compat_ip6t_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ip6t_entry entries[]; }; static int compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ip6t_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = *dstptr; if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ip6t_entry); *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ip6t_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const struct ip6t_ip6 *ipv6, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) return PTR_ERR(match); m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ip6t_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ip6t_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off; if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) return -EINVAL; if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) return -EINVAL; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static void compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; int h; struct xt_entry_match *ematch; origsize = *size; de = *dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) xt_compat_match_from_user(ematch, dstptr, size); de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } } static int translate_compat_table(struct net *net, struct xt_table_info **pinfo, void **pentry0, const struct compat_ip6t_replace *compatr) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; struct ip6t_replace repl; unsigned int size; int ret; info = *pinfo; entry0 = *pentry0; size = compatr->size; info->number = compatr->num_entries; j = 0; xt_compat_lock(AF_INET6); ret = xt_compat_init_offsets(AF_INET6, compatr->num_entries); if (ret) goto out_unlock; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, compatr->size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + compatr->size); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != compatr->num_entries) goto out_unlock; ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; memset(newinfo->entries, 0, size); newinfo->number = compatr->num_entries; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = compatr->hook_entry[i]; newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = compatr->size; xt_entry_foreach(iter0, entry0, compatr->size) compat_copy_entry_from_user(iter0, &pos, &size, newinfo, entry1); /* all module references in entry0 are now gone. */ xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); memcpy(&repl, compatr, sizeof(*compatr)); for (i = 0; i < NF_INET_NUMHOOKS; i++) { repl.hook_entry[i] = newinfo->hook_entry[i]; repl.underflow[i] = newinfo->underflow[i]; } repl.num_counters = 0; repl.counters = NULL; repl.size = newinfo->size; ret = translate_table(net, newinfo, entry1, &repl); if (ret) goto free_newinfo; *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); return ret; out_unlock: xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); xt_entry_foreach(iter0, entry0, compatr->size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; } static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (len < sizeof(tmp)) return -EINVAL; if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; if ((u64)len < (u64)tmp.size + sizeof(tmp)) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } struct compat_ip6t_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ip6t_entry entrytable[]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ip6t_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, int *len) { int ret; struct compat_ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) return -EINVAL; if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) return -EINVAL; get.name[sizeof(get.name) - 1] = '\0'; xt_compat_lock(AF_INET6); t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; ret = compat_table_info(private, &info); if (!ret && get.size == info.size) ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); else if (!ret) ret = -EAGAIN; xt_compat_flush_offsets(AF_INET6); module_put(t->me); xt_table_unlock(t); } else ret = PTR_ERR(t); xt_compat_unlock(AF_INET6); return ret; } #endif static int do_ip6t_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) ret = compat_do_replace(sock_net(sk), arg, len); else #endif ret = do_replace(sock_net(sk), arg, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), arg, len); break; default: ret = -EINVAL; } return ret; } static int do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len); break; case IP6T_SO_GET_ENTRIES: #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) ret = compat_get_entries(sock_net(sk), user, len); else #endif ret = get_entries(sock_net(sk), user, len); break; case IP6T_SO_GET_REVISION_MATCH: case IP6T_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET6, rev.name, rev.revision, target, &ret), "ip6t_%s", rev.name); break; } default: ret = -EINVAL; } return ret; } static void __ip6t_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ip6t_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *template_ops) { struct nf_hook_ops *ops; unsigned int num_ops; int ret, i; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) { xt_free_table_info(newinfo); return ret; } new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { struct ip6t_entry *iter; xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); xt_free_table_info(newinfo); return PTR_ERR(new_table); } if (!template_ops) return 0; num_ops = hweight32(table->valid_hooks); if (num_ops == 0) { ret = -EINVAL; goto out_free; } ops = kmemdup_array(template_ops, num_ops, sizeof(*ops), GFP_KERNEL); if (!ops) { ret = -ENOMEM; goto out_free; } for (i = 0; i < num_ops; i++) ops[i].priv = new_table; new_table->ops = ops; ret = nf_register_net_hooks(net, ops, num_ops); if (ret != 0) goto out_free; return ret; out_free: __ip6t_unregister_table(net, new_table); return ret; } void ip6t_unregister_table_pre_exit(struct net *net, const char *name) { struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name); if (table) nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks)); } void ip6t_unregister_table_exit(struct net *net, const char *name) { struct xt_table *table = xt_find_table(net, NFPROTO_IPV6, name); if (table) __ip6t_unregister_table(net, table); } /* The built-in targets: standard (NULL) and error. */ static struct xt_target ip6t_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV6, #ifdef CONFIG_NETFILTER_XTABLES_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ip6t_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV6, }, }; static struct nf_sockopt_ops ip6t_sockopts = { .pf = PF_INET6, .set_optmin = IP6T_BASE_CTL, .set_optmax = IP6T_SO_SET_MAX+1, .set = do_ip6t_set_ctl, .get_optmin = IP6T_BASE_CTL, .get_optmax = IP6T_SO_GET_MAX+1, .get = do_ip6t_get_ctl, .owner = THIS_MODULE, }; static int __net_init ip6_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV6); } static void __net_exit ip6_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV6); } static struct pernet_operations ip6_tables_net_ops = { .init = ip6_tables_net_init, .exit = ip6_tables_net_exit, }; static int __init ip6_tables_init(void) { int ret; ret = register_pernet_subsys(&ip6_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); if (ret < 0) goto err2; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); if (ret < 0) goto err4; return 0; err4: xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); err2: unregister_pernet_subsys(&ip6_tables_net_ops); err1: return ret; } static void __exit ip6_tables_fini(void) { nf_unregister_sockopt(&ip6t_sockopts); xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); unregister_pernet_subsys(&ip6_tables_net_ops); } EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table_pre_exit); EXPORT_SYMBOL(ip6t_unregister_table_exit); EXPORT_SYMBOL(ip6t_do_table); module_init(ip6_tables_init); module_exit(ip6_tables_fini); |
3 3 3 3 3 3 3 3 1 3 3 3 3 1 3 3 3 3 3 2 3 2 3 4 4 4 3 3 3 1 1 1 1 1 1 3 3 3 3 3 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 | // SPDX-License-Identifier: GPL-2.0+ /* * hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo, * Quadro, High Flow Next, Aquaero, Aquastream Ultimate, Leakshield, * High Flow USB/MPS Flow family) * * Aquacomputer devices send HID reports (with ID 0x01) every second to report * sensor values, except for devices that communicate through the * legacy way (currently, Poweradjust 3 and High Flow USB/MPS Flow family). * * Copyright 2021 Aleksa Savic <savicaleksa83@gmail.com> * Copyright 2022 Jack Doan <me@jackdoan.com> */ #include <linux/crc16.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hid.h> #include <linux/hwmon.h> #include <linux/jiffies.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/unaligned.h> #define USB_VENDOR_ID_AQUACOMPUTER 0x0c70 #define USB_PRODUCT_ID_AQUAERO 0xf001 #define USB_PRODUCT_ID_FARBWERK 0xf00a #define USB_PRODUCT_ID_QUADRO 0xf00d #define USB_PRODUCT_ID_D5NEXT 0xf00e #define USB_PRODUCT_ID_FARBWERK360 0xf010 #define USB_PRODUCT_ID_OCTO 0xf011 #define USB_PRODUCT_ID_HIGHFLOWNEXT 0xf012 #define USB_PRODUCT_ID_LEAKSHIELD 0xf014 #define USB_PRODUCT_ID_AQUASTREAMXT 0xf0b6 #define USB_PRODUCT_ID_AQUASTREAMULT 0xf00b #define USB_PRODUCT_ID_POWERADJUST3 0xf0bd #define USB_PRODUCT_ID_HIGHFLOW 0xf003 enum kinds { d5next, farbwerk, farbwerk360, octo, quadro, highflownext, aquaero, poweradjust3, aquastreamult, aquastreamxt, leakshield, highflow }; static const char *const aqc_device_names[] = { [d5next] = "d5next", [farbwerk] = "farbwerk", [farbwerk360] = "farbwerk360", [octo] = "octo", [quadro] = "quadro", [highflownext] = "highflownext", [leakshield] = "leakshield", [aquastreamxt] = "aquastreamxt", [aquaero] = "aquaero", [aquastreamult] = "aquastreamultimate", [poweradjust3] = "poweradjust3", [highflow] = "highflow" /* Covers MPS Flow devices */ }; #define DRIVER_NAME "aquacomputer_d5next" #define STATUS_REPORT_ID 0x01 #define STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */ #define SERIAL_PART_OFFSET 2 #define CTRL_REPORT_ID 0x03 #define AQUAERO_CTRL_REPORT_ID 0x0b #define CTRL_REPORT_DELAY 200 /* ms */ /* The HID report that the official software always sends * after writing values, currently same for all devices */ #define SECONDARY_CTRL_REPORT_ID 0x02 #define SECONDARY_CTRL_REPORT_SIZE 0x0B static u8 secondary_ctrl_report[] = { 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x34, 0xC6 }; /* Secondary HID report values for Aquaero */ #define AQUAERO_SECONDARY_CTRL_REPORT_ID 0x06 #define AQUAERO_SECONDARY_CTRL_REPORT_SIZE 0x07 static u8 aquaero_secondary_ctrl_report[] = { 0x06, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00 }; /* Report IDs for legacy devices */ #define AQUASTREAMXT_STATUS_REPORT_ID 0x04 #define POWERADJUST3_STATUS_REPORT_ID 0x03 #define HIGHFLOW_STATUS_REPORT_ID 0x02 /* Data types for reading and writing control reports */ #define AQC_8 0 #define AQC_BE16 1 /* Info, sensor sizes and offsets for most Aquacomputer devices */ #define AQC_SERIAL_START 0x3 #define AQC_FIRMWARE_VERSION 0xD #define AQC_SENSOR_SIZE 0x02 #define AQC_SENSOR_NA 0x7FFF #define AQC_FAN_PERCENT_OFFSET 0x00 #define AQC_FAN_VOLTAGE_OFFSET 0x02 #define AQC_FAN_CURRENT_OFFSET 0x04 #define AQC_FAN_POWER_OFFSET 0x06 #define AQC_FAN_SPEED_OFFSET 0x08 /* Specs of the Aquaero fan controllers */ #define AQUAERO_SERIAL_START 0x07 #define AQUAERO_FIRMWARE_VERSION 0x0B #define AQUAERO_NUM_FANS 4 #define AQUAERO_NUM_SENSORS 8 #define AQUAERO_NUM_VIRTUAL_SENSORS 8 #define AQUAERO_NUM_CALC_VIRTUAL_SENSORS 4 #define AQUAERO_NUM_FLOW_SENSORS 2 #define AQUAERO_CTRL_REPORT_SIZE 0xa93 #define AQUAERO_CTRL_PRESET_ID 0x5c #define AQUAERO_CTRL_PRESET_SIZE 0x02 #define AQUAERO_CTRL_PRESET_START 0x55c /* Sensor report offsets for Aquaero fan controllers */ #define AQUAERO_SENSOR_START 0x65 #define AQUAERO_VIRTUAL_SENSOR_START 0x85 #define AQUAERO_CALC_VIRTUAL_SENSOR_START 0x95 #define AQUAERO_FLOW_SENSORS_START 0xF9 #define AQUAERO_FAN_VOLTAGE_OFFSET 0x04 #define AQUAERO_FAN_CURRENT_OFFSET 0x06 #define AQUAERO_FAN_POWER_OFFSET 0x08 #define AQUAERO_FAN_SPEED_OFFSET 0x00 static u16 aquaero_sensor_fan_offsets[] = { 0x167, 0x173, 0x17f, 0x18B }; /* Control report offsets for the Aquaero fan controllers */ #define AQUAERO_TEMP_CTRL_OFFSET 0xdb #define AQUAERO_FAN_CTRL_MIN_PWR_OFFSET 0x04 #define AQUAERO_FAN_CTRL_MAX_PWR_OFFSET 0x06 #define AQUAERO_FAN_CTRL_SRC_OFFSET 0x10 static u16 aquaero_ctrl_fan_offsets[] = { 0x20c, 0x220, 0x234, 0x248 }; /* Specs of the D5 Next pump */ #define D5NEXT_NUM_FANS 2 #define D5NEXT_NUM_SENSORS 1 #define D5NEXT_NUM_VIRTUAL_SENSORS 8 #define D5NEXT_CTRL_REPORT_SIZE 0x329 /* Sensor report offsets for the D5 Next pump */ #define D5NEXT_POWER_CYCLES 0x18 #define D5NEXT_COOLANT_TEMP 0x57 #define D5NEXT_PUMP_OFFSET 0x6c #define D5NEXT_FAN_OFFSET 0x5f #define D5NEXT_5V_VOLTAGE 0x39 #define D5NEXT_12V_VOLTAGE 0x37 #define D5NEXT_VIRTUAL_SENSORS_START 0x3f static u16 d5next_sensor_fan_offsets[] = { D5NEXT_PUMP_OFFSET, D5NEXT_FAN_OFFSET }; /* Control report offsets for the D5 Next pump */ #define D5NEXT_TEMP_CTRL_OFFSET 0x2D /* Temperature sensor offsets location */ static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 }; /* Pump and fan speed (from 0-100%) */ /* Specs of the Aquastream Ultimate pump */ /* Pump does not follow the standard structure, so only consider the fan */ #define AQUASTREAMULT_NUM_FANS 1 #define AQUASTREAMULT_NUM_SENSORS 2 /* Sensor report offsets for the Aquastream Ultimate pump */ #define AQUASTREAMULT_SENSOR_START 0x2D #define AQUASTREAMULT_PUMP_OFFSET 0x51 #define AQUASTREAMULT_PUMP_VOLTAGE 0x3D #define AQUASTREAMULT_PUMP_CURRENT 0x53 #define AQUASTREAMULT_PUMP_POWER 0x55 #define AQUASTREAMULT_FAN_OFFSET 0x41 #define AQUASTREAMULT_PRESSURE_OFFSET 0x57 #define AQUASTREAMULT_FLOW_SENSOR_OFFSET 0x37 #define AQUASTREAMULT_FAN_VOLTAGE_OFFSET 0x02 #define AQUASTREAMULT_FAN_CURRENT_OFFSET 0x00 #define AQUASTREAMULT_FAN_POWER_OFFSET 0x04 #define AQUASTREAMULT_FAN_SPEED_OFFSET 0x06 static u16 aquastreamult_sensor_fan_offsets[] = { AQUASTREAMULT_FAN_OFFSET }; /* Spec and sensor report offset for the Farbwerk RGB controller */ #define FARBWERK_NUM_SENSORS 4 #define FARBWERK_SENSOR_START 0x2f /* Specs of the Farbwerk 360 RGB controller */ #define FARBWERK360_NUM_SENSORS 4 #define FARBWERK360_NUM_VIRTUAL_SENSORS 16 #define FARBWERK360_CTRL_REPORT_SIZE 0x682 /* Sensor report offsets for the Farbwerk 360 */ #define FARBWERK360_SENSOR_START 0x32 #define FARBWERK360_VIRTUAL_SENSORS_START 0x3a /* Control report offsets for the Farbwerk 360 */ #define FARBWERK360_TEMP_CTRL_OFFSET 0x8 /* Specs of the Octo fan controller */ #define OCTO_NUM_FANS 8 #define OCTO_NUM_SENSORS 4 #define OCTO_NUM_VIRTUAL_SENSORS 16 #define OCTO_NUM_FLOW_SENSORS 1 #define OCTO_CTRL_REPORT_SIZE 0x65F /* Sensor report offsets for the Octo */ #define OCTO_POWER_CYCLES 0x18 #define OCTO_SENSOR_START 0x3D #define OCTO_VIRTUAL_SENSORS_START 0x45 #define OCTO_FLOW_SENSOR_OFFSET 0x7B static u16 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 }; /* Control report offsets for the Octo */ #define OCTO_TEMP_CTRL_OFFSET 0xA #define OCTO_FLOW_PULSES_CTRL_OFFSET 0x6 /* Fan speed offsets (0-100%) */ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0x259, 0x2AE }; /* Specs of Quadro fan controller */ #define QUADRO_NUM_FANS 4 #define QUADRO_NUM_SENSORS 4 #define QUADRO_NUM_VIRTUAL_SENSORS 16 #define QUADRO_NUM_FLOW_SENSORS 1 #define QUADRO_CTRL_REPORT_SIZE 0x3c1 /* Sensor report offsets for the Quadro */ #define QUADRO_POWER_CYCLES 0x18 #define QUADRO_SENSOR_START 0x34 #define QUADRO_VIRTUAL_SENSORS_START 0x3c #define QUADRO_FLOW_SENSOR_OFFSET 0x6e static u16 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 }; /* Control report offsets for the Quadro */ #define QUADRO_TEMP_CTRL_OFFSET 0xA #define QUADRO_FLOW_PULSES_CTRL_OFFSET 0x6 static u16 quadro_ctrl_fan_offsets[] = { 0x37, 0x8c, 0xe1, 0x136 }; /* Fan speed offsets (0-100%) */ /* Specs of High Flow Next flow sensor */ #define HIGHFLOWNEXT_NUM_SENSORS 2 #define HIGHFLOWNEXT_NUM_FLOW_SENSORS 1 /* Sensor report offsets for the High Flow Next */ #define HIGHFLOWNEXT_SENSOR_START 85 #define HIGHFLOWNEXT_FLOW 81 #define HIGHFLOWNEXT_WATER_QUALITY 89 #define HIGHFLOWNEXT_POWER 91 #define HIGHFLOWNEXT_CONDUCTIVITY 95 #define HIGHFLOWNEXT_5V_VOLTAGE 97 #define HIGHFLOWNEXT_5V_VOLTAGE_USB 99 /* Specs of the Leakshield */ #define LEAKSHIELD_NUM_SENSORS 2 /* Sensor report offsets for Leakshield */ #define LEAKSHIELD_PRESSURE_ADJUSTED 285 #define LEAKSHIELD_TEMPERATURE_1 265 #define LEAKSHIELD_TEMPERATURE_2 287 #define LEAKSHIELD_PRESSURE_MIN 291 #define LEAKSHIELD_PRESSURE_TARGET 293 #define LEAKSHIELD_PRESSURE_MAX 295 #define LEAKSHIELD_PUMP_RPM_IN 101 #define LEAKSHIELD_FLOW_IN 111 #define LEAKSHIELD_RESERVOIR_VOLUME 313 #define LEAKSHIELD_RESERVOIR_FILLED 311 /* Specs of the Aquastream XT pump */ #define AQUASTREAMXT_SERIAL_START 0x3a #define AQUASTREAMXT_FIRMWARE_VERSION 0x32 #define AQUASTREAMXT_NUM_FANS 2 #define AQUASTREAMXT_NUM_SENSORS 3 #define AQUASTREAMXT_FAN_STOPPED 0x4 #define AQUASTREAMXT_PUMP_CONVERSION_CONST 45000000 #define AQUASTREAMXT_FAN_CONVERSION_CONST 5646000 #define AQUASTREAMXT_SENSOR_REPORT_SIZE 0x42 /* Sensor report offsets and info for Aquastream XT */ #define AQUASTREAMXT_SENSOR_START 0xd #define AQUASTREAMXT_FAN_VOLTAGE_OFFSET 0x7 #define AQUASTREAMXT_FAN_STATUS_OFFSET 0x1d #define AQUASTREAMXT_PUMP_VOLTAGE_OFFSET 0x9 #define AQUASTREAMXT_PUMP_CURR_OFFSET 0xb static u16 aquastreamxt_sensor_fan_offsets[] = { 0x13, 0x1b }; /* Specs of the Poweradjust 3 */ #define POWERADJUST3_NUM_SENSORS 1 #define POWERADJUST3_SENSOR_REPORT_SIZE 0x32 /* Sensor report offsets for the Poweradjust 3 */ #define POWERADJUST3_SENSOR_START 0x03 /* Specs of the High Flow USB */ #define HIGHFLOW_NUM_SENSORS 2 #define HIGHFLOW_NUM_FLOW_SENSORS 1 #define HIGHFLOW_SENSOR_REPORT_SIZE 0x76 /* Sensor report offsets for the High Flow USB */ #define HIGHFLOW_FIRMWARE_VERSION 0x3 #define HIGHFLOW_SERIAL_START 0x9 #define HIGHFLOW_FLOW_SENSOR_OFFSET 0x23 #define HIGHFLOW_SENSOR_START 0x2b /* Labels for D5 Next */ static const char *const label_d5next_temp[] = { "Coolant temp" }; static const char *const label_d5next_speeds[] = { "Pump speed", "Fan speed" }; static const char *const label_d5next_power[] = { "Pump power", "Fan power" }; static const char *const label_d5next_voltages[] = { "Pump voltage", "Fan voltage", "+5V voltage", "+12V voltage" }; static const char *const label_d5next_current[] = { "Pump current", "Fan current" }; /* Labels for Aquaero, Farbwerk, Farbwerk 360 and Octo and Quadro temperature sensors */ static const char *const label_temp_sensors[] = { "Sensor 1", "Sensor 2", "Sensor 3", "Sensor 4", "Sensor 5", "Sensor 6", "Sensor 7", "Sensor 8" }; static const char *const label_virtual_temp_sensors[] = { "Virtual sensor 1", "Virtual sensor 2", "Virtual sensor 3", "Virtual sensor 4", "Virtual sensor 5", "Virtual sensor 6", "Virtual sensor 7", "Virtual sensor 8", "Virtual sensor 9", "Virtual sensor 10", "Virtual sensor 11", "Virtual sensor 12", "Virtual sensor 13", "Virtual sensor 14", "Virtual sensor 15", "Virtual sensor 16", }; static const char *const label_aquaero_calc_temp_sensors[] = { "Calc. virtual sensor 1", "Calc. virtual sensor 2", "Calc. virtual sensor 3", "Calc. virtual sensor 4" }; static const char *const label_fan_power[] = { "Fan 1 power", "Fan 2 power", "Fan 3 power", "Fan 4 power", "Fan 5 power", "Fan 6 power", "Fan 7 power", "Fan 8 power" }; static const char *const label_fan_voltage[] = { "Fan 1 voltage", "Fan 2 voltage", "Fan 3 voltage", "Fan 4 voltage", "Fan 5 voltage", "Fan 6 voltage", "Fan 7 voltage", "Fan 8 voltage" }; static const char *const label_fan_current[] = { "Fan 1 current", "Fan 2 current", "Fan 3 current", "Fan 4 current", "Fan 5 current", "Fan 6 current", "Fan 7 current", "Fan 8 current" }; /* Labels for Octo fan speeds */ static const char *const label_octo_speeds[] = { "Fan 1 speed", "Fan 2 speed", "Fan 3 speed", "Fan 4 speed", "Fan 5 speed", "Fan 6 speed", "Fan 7 speed", "Fan 8 speed", "Flow speed [dL/h]", }; /* Labels for Quadro fan speeds */ static const char *const label_quadro_speeds[] = { "Fan 1 speed", "Fan 2 speed", "Fan 3 speed", "Fan 4 speed", "Flow speed [dL/h]" }; /* Labels for Aquaero fan speeds */ static const char *const label_aquaero_speeds[] = { "Fan 1 speed", "Fan 2 speed", "Fan 3 speed", "Fan 4 speed", "Flow sensor 1 [dL/h]", "Flow sensor 2 [dL/h]" }; /* Labels for High Flow Next */ static const char *const label_highflownext_temp_sensors[] = { "Coolant temp", "External sensor" }; static const char *const label_highflownext_fan_speed[] = { "Flow [dL/h]", "Water quality [%]", "Conductivity [nS/cm]", }; static const char *const label_highflownext_power[] = { "Dissipated power", }; static const char *const label_highflownext_voltage[] = { "+5V voltage", "+5V USB voltage" }; /* Labels for Leakshield */ static const char *const label_leakshield_temp_sensors[] = { "Temperature 1", "Temperature 2" }; static const char *const label_leakshield_fan_speed[] = { "Pressure [ubar]", "User-Provided Pump Speed", "User-Provided Flow [dL/h]", "Reservoir Volume [ml]", "Reservoir Filled [ml]", }; /* Labels for Aquastream XT */ static const char *const label_aquastreamxt_temp_sensors[] = { "Fan IC temp", "External sensor", "Coolant temp" }; /* Labels for Aquastream Ultimate */ static const char *const label_aquastreamult_temp[] = { "Coolant temp", "External temp" }; static const char *const label_aquastreamult_speeds[] = { "Fan speed", "Pump speed", "Pressure [mbar]", "Flow speed [dL/h]" }; static const char *const label_aquastreamult_power[] = { "Fan power", "Pump power" }; static const char *const label_aquastreamult_voltages[] = { "Fan voltage", "Pump voltage" }; static const char *const label_aquastreamult_current[] = { "Fan current", "Pump current" }; /* Labels for Poweradjust 3 */ static const char *const label_poweradjust3_temp_sensors[] = { "External sensor" }; /* Labels for Highflow */ static const char *const label_highflow_temp[] = { "External temp", "Internal temp" }; static const char *const label_highflow_speeds[] = { "Flow speed [dL/h]" }; struct aqc_fan_structure_offsets { u8 voltage; u8 curr; u8 power; u8 speed; }; /* Fan structure offsets for Aquaero */ static struct aqc_fan_structure_offsets aqc_aquaero_fan_structure = { .voltage = AQUAERO_FAN_VOLTAGE_OFFSET, .curr = AQUAERO_FAN_CURRENT_OFFSET, .power = AQUAERO_FAN_POWER_OFFSET, .speed = AQUAERO_FAN_SPEED_OFFSET }; /* Fan structure offsets for Aquastream Ultimate */ static struct aqc_fan_structure_offsets aqc_aquastreamult_fan_structure = { .voltage = AQUASTREAMULT_FAN_VOLTAGE_OFFSET, .curr = AQUASTREAMULT_FAN_CURRENT_OFFSET, .power = AQUASTREAMULT_FAN_POWER_OFFSET, .speed = AQUASTREAMULT_FAN_SPEED_OFFSET }; /* Fan structure offsets for all devices except those above */ static struct aqc_fan_structure_offsets aqc_general_fan_structure = { .voltage = AQC_FAN_VOLTAGE_OFFSET, .curr = AQC_FAN_CURRENT_OFFSET, .power = AQC_FAN_POWER_OFFSET, .speed = AQC_FAN_SPEED_OFFSET }; struct aqc_data { struct hid_device *hdev; struct device *hwmon_dev; struct dentry *debugfs; struct mutex mutex; /* Used for locking access when reading and writing PWM values */ enum kinds kind; const char *name; int status_report_id; /* Used for legacy devices, report is stored in buffer */ int ctrl_report_id; int secondary_ctrl_report_id; int secondary_ctrl_report_size; u8 *secondary_ctrl_report; ktime_t last_ctrl_report_op; int ctrl_report_delay; /* Delay between two ctrl report operations, in ms */ int buffer_size; u8 *buffer; int checksum_start; int checksum_length; int checksum_offset; int num_fans; u16 *fan_sensor_offsets; u16 *fan_ctrl_offsets; int num_temp_sensors; int temp_sensor_start_offset; int num_virtual_temp_sensors; int virtual_temp_sensor_start_offset; int num_calc_virt_temp_sensors; int calc_virt_temp_sensor_start_offset; u16 temp_ctrl_offset; u16 power_cycle_count_offset; int num_flow_sensors; u8 flow_sensors_start_offset; u8 flow_pulses_ctrl_offset; struct aqc_fan_structure_offsets *fan_structure; /* General info, same across all devices */ u8 serial_number_start_offset; u32 serial_number[2]; u8 firmware_version_offset; u16 firmware_version; /* How many times the device was powered on, if available */ u32 power_cycles; /* Sensor values */ s32 temp_input[20]; /* Max 4 physical and 16 virtual or 8 physical and 12 virtual */ s32 speed_input[9]; u32 speed_input_min[1]; u32 speed_input_target[1]; u32 speed_input_max[1]; u32 power_input[8]; u16 voltage_input[8]; u16 current_input[8]; /* Label values */ const char *const *temp_label; const char *const *virtual_temp_label; const char *const *calc_virt_temp_label; /* For Aquaero */ const char *const *speed_label; const char *const *power_label; const char *const *voltage_label; const char *const *current_label; unsigned long updated; }; /* Converts from centi-percent */ static int aqc_percent_to_pwm(u16 val) { return DIV_ROUND_CLOSEST(val * 255, 100 * 100); } /* Converts to centi-percent */ static int aqc_pwm_to_percent(long val) { if (val < 0 || val > 255) return -EINVAL; return DIV_ROUND_CLOSEST(val * 100 * 100, 255); } /* Converts raw value for Aquastream XT pump speed to RPM */ static int aqc_aquastreamxt_convert_pump_rpm(u16 val) { if (val > 0) return DIV_ROUND_CLOSEST(AQUASTREAMXT_PUMP_CONVERSION_CONST, val); return 0; } /* Converts raw value for Aquastream XT fan speed to RPM */ static int aqc_aquastreamxt_convert_fan_rpm(u16 val) { if (val > 0) return DIV_ROUND_CLOSEST(AQUASTREAMXT_FAN_CONVERSION_CONST, val); return 0; } static void aqc_delay_ctrl_report(struct aqc_data *priv) { /* * If previous read or write is too close to this one, delay the current operation * to give the device enough time to process the previous one. */ if (priv->ctrl_report_delay) { s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op); if (delta < priv->ctrl_report_delay) msleep(priv->ctrl_report_delay - delta); } } /* Expects the mutex to be locked */ static int aqc_get_ctrl_data(struct aqc_data *priv) { int ret; aqc_delay_ctrl_report(priv); memset(priv->buffer, 0x00, priv->buffer_size); ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) ret = -ENODATA; priv->last_ctrl_report_op = ktime_get(); return ret; } /* Expects the mutex to be locked */ static int aqc_send_ctrl_data(struct aqc_data *priv) { int ret; u16 checksum; aqc_delay_ctrl_report(priv); /* Checksum is not needed for Aquaero */ if (priv->kind != aquaero) { /* Init and xorout value for CRC-16/USB is 0xffff */ checksum = crc16(0xffff, priv->buffer + priv->checksum_start, priv->checksum_length); checksum ^= 0xffff; /* Place the new checksum at the end of the report */ put_unaligned_be16(checksum, priv->buffer + priv->checksum_offset); } /* Send the patched up report back to the device */ ret = hid_hw_raw_request(priv->hdev, priv->ctrl_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) goto record_access_and_ret; /* The official software sends this report after every change, so do it here as well */ ret = hid_hw_raw_request(priv->hdev, priv->secondary_ctrl_report_id, priv->secondary_ctrl_report, priv->secondary_ctrl_report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); record_access_and_ret: priv->last_ctrl_report_op = ktime_get(); return ret; } /* Refreshes the control buffer and stores value at offset in val */ static int aqc_get_ctrl_val(struct aqc_data *priv, int offset, long *val, int type) { int ret; mutex_lock(&priv->mutex); ret = aqc_get_ctrl_data(priv); if (ret < 0) goto unlock_and_return; switch (type) { case AQC_BE16: *val = (s16)get_unaligned_be16(priv->buffer + offset); break; case AQC_8: *val = priv->buffer[offset]; break; default: ret = -EINVAL; } unlock_and_return: mutex_unlock(&priv->mutex); return ret; } static int aqc_set_ctrl_vals(struct aqc_data *priv, int *offsets, long *vals, int *types, int len) { int ret, i; mutex_lock(&priv->mutex); ret = aqc_get_ctrl_data(priv); if (ret < 0) goto unlock_and_return; for (i = 0; i < len; i++) { switch (types[i]) { case AQC_BE16: put_unaligned_be16((s16)vals[i], priv->buffer + offsets[i]); break; case AQC_8: priv->buffer[offsets[i]] = (u8)vals[i]; break; default: ret = -EINVAL; } } if (ret < 0) goto unlock_and_return; ret = aqc_send_ctrl_data(priv); unlock_and_return: mutex_unlock(&priv->mutex); return ret; } static int aqc_set_ctrl_val(struct aqc_data *priv, int offset, long val, int type) { return aqc_set_ctrl_vals(priv, &offset, &val, &type, 1); } static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct aqc_data *priv = data; switch (type) { case hwmon_temp: if (channel < priv->num_temp_sensors) { switch (attr) { case hwmon_temp_label: case hwmon_temp_input: return 0444; case hwmon_temp_offset: if (priv->temp_ctrl_offset != 0) return 0644; break; default: break; } } if (channel < priv->num_temp_sensors + priv->num_virtual_temp_sensors + priv->num_calc_virt_temp_sensors) switch (attr) { case hwmon_temp_label: case hwmon_temp_input: return 0444; default: break; } break; case hwmon_pwm: if (priv->fan_ctrl_offsets && channel < priv->num_fans) { switch (attr) { case hwmon_pwm_input: return 0644; default: break; } } break; case hwmon_fan: switch (attr) { case hwmon_fan_input: case hwmon_fan_label: switch (priv->kind) { case aquastreamult: /* * Special case to support pump RPM, fan RPM, * pressure and flow sensor */ if (channel < 4) return 0444; break; case highflownext: /* Special case to support flow sensor, water quality * and conductivity */ if (channel < 3) return 0444; break; case leakshield: /* Special case for Leakshield sensors */ if (channel < 5) return 0444; break; case aquaero: case octo: case quadro: case highflow: /* Special case to support flow sensors */ if (channel < priv->num_fans + priv->num_flow_sensors) return 0444; break; default: if (channel < priv->num_fans) return 0444; break; } break; case hwmon_fan_pulses: /* Special case for Quadro/Octo flow sensor */ if (channel == priv->num_fans) { switch (priv->kind) { case quadro: case octo: return 0644; default: break; } } break; case hwmon_fan_min: case hwmon_fan_max: case hwmon_fan_target: /* Special case for Leakshield pressure sensor */ if (priv->kind == leakshield && channel == 0) return 0444; break; default: break; } break; case hwmon_power: switch (priv->kind) { case aquastreamult: /* Special case to support pump and fan power */ if (channel < 2) return 0444; break; case highflownext: /* Special case to support one power sensor */ if (channel == 0) return 0444; break; case aquastreamxt: break; default: if (channel < priv->num_fans) return 0444; break; } break; case hwmon_curr: switch (priv->kind) { case aquastreamult: /* Special case to support pump and fan current */ if (channel < 2) return 0444; break; case aquastreamxt: /* Special case to support pump current */ if (channel == 0) return 0444; break; default: if (channel < priv->num_fans) return 0444; break; } break; case hwmon_in: switch (priv->kind) { case d5next: /* Special case to support +5V and +12V voltage sensors */ if (channel < priv->num_fans + 2) return 0444; break; case aquastreamult: case highflownext: /* Special case to support two voltage sensors */ if (channel < 2) return 0444; break; default: if (channel < priv->num_fans) return 0444; break; } break; default: break; } return 0; } /* Read device sensors by manually requesting the sensor report (legacy way) */ static int aqc_legacy_read(struct aqc_data *priv) { int ret, i, sensor_value; mutex_lock(&priv->mutex); memset(priv->buffer, 0x00, priv->buffer_size); ret = hid_hw_raw_request(priv->hdev, priv->status_report_id, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) goto unlock_and_return; /* Temperature sensor readings */ for (i = 0; i < priv->num_temp_sensors; i++) { sensor_value = get_unaligned_le16(priv->buffer + priv->temp_sensor_start_offset + i * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; } /* Special-case sensor readings */ switch (priv->kind) { case aquastreamxt: /* Info provided with every report */ priv->serial_number[0] = get_unaligned_le16(priv->buffer + priv->serial_number_start_offset); priv->firmware_version = get_unaligned_le16(priv->buffer + priv->firmware_version_offset); /* Read pump speed in RPM */ sensor_value = get_unaligned_le16(priv->buffer + priv->fan_sensor_offsets[0]); priv->speed_input[0] = aqc_aquastreamxt_convert_pump_rpm(sensor_value); /* Read fan speed in RPM, if available */ sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_FAN_STATUS_OFFSET); if (sensor_value == AQUASTREAMXT_FAN_STOPPED) { priv->speed_input[1] = 0; } else { sensor_value = get_unaligned_le16(priv->buffer + priv->fan_sensor_offsets[1]); priv->speed_input[1] = aqc_aquastreamxt_convert_fan_rpm(sensor_value); } /* Calculation derived from linear regression */ sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_PUMP_CURR_OFFSET); priv->current_input[0] = DIV_ROUND_CLOSEST(sensor_value * 176, 100) - 52; sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_PUMP_VOLTAGE_OFFSET); priv->voltage_input[0] = DIV_ROUND_CLOSEST(sensor_value * 1000, 61); sensor_value = get_unaligned_le16(priv->buffer + AQUASTREAMXT_FAN_VOLTAGE_OFFSET); priv->voltage_input[1] = DIV_ROUND_CLOSEST(sensor_value * 1000, 63); break; case highflow: /* Info provided with every report */ priv->serial_number[0] = get_unaligned_le16(priv->buffer + priv->serial_number_start_offset); priv->firmware_version = get_unaligned_le16(priv->buffer + priv->firmware_version_offset); /* Read flow speed */ priv->speed_input[0] = get_unaligned_le16(priv->buffer + priv->flow_sensors_start_offset); break; default: break; } priv->updated = jiffies; unlock_and_return: mutex_unlock(&priv->mutex); return ret; } static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { int ret; struct aqc_data *priv = dev_get_drvdata(dev); if (time_after(jiffies, priv->updated + STATUS_UPDATE_INTERVAL)) { if (priv->status_report_id != 0) { /* Legacy devices require manual reads */ ret = aqc_legacy_read(priv); if (ret < 0) return -ENODATA; } else { return -ENODATA; } } switch (type) { case hwmon_temp: switch (attr) { case hwmon_temp_input: if (priv->temp_input[channel] == -ENODATA) return -ENODATA; *val = priv->temp_input[channel]; break; case hwmon_temp_offset: ret = aqc_get_ctrl_val(priv, priv->temp_ctrl_offset + channel * AQC_SENSOR_SIZE, val, AQC_BE16); if (ret < 0) return ret; *val *= 10; break; default: break; } break; case hwmon_fan: switch (attr) { case hwmon_fan_input: if (priv->speed_input[channel] == -ENODATA) return -ENODATA; *val = priv->speed_input[channel]; break; case hwmon_fan_min: *val = priv->speed_input_min[channel]; break; case hwmon_fan_max: *val = priv->speed_input_max[channel]; break; case hwmon_fan_target: *val = priv->speed_input_target[channel]; break; case hwmon_fan_pulses: ret = aqc_get_ctrl_val(priv, priv->flow_pulses_ctrl_offset, val, AQC_BE16); if (ret < 0) return ret; break; default: break; } break; case hwmon_power: *val = priv->power_input[channel]; break; case hwmon_pwm: switch (priv->kind) { case aquaero: ret = aqc_get_ctrl_val(priv, AQUAERO_CTRL_PRESET_START + channel * AQUAERO_CTRL_PRESET_SIZE, val, AQC_BE16); if (ret < 0) return ret; *val = aqc_percent_to_pwm(*val); break; default: ret = aqc_get_ctrl_val(priv, priv->fan_ctrl_offsets[channel], val, AQC_BE16); if (ret < 0) return ret; *val = aqc_percent_to_pwm(*val); break; } break; case hwmon_in: *val = priv->voltage_input[channel]; break; case hwmon_curr: *val = priv->current_input[channel]; break; default: return -EOPNOTSUPP; } return 0; } static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { struct aqc_data *priv = dev_get_drvdata(dev); /* Number of sensors that are not calculated */ int num_non_calc_sensors = priv->num_temp_sensors + priv->num_virtual_temp_sensors; switch (type) { case hwmon_temp: if (channel < priv->num_temp_sensors) { *str = priv->temp_label[channel]; } else { if (priv->kind == aquaero && channel >= num_non_calc_sensors) *str = priv->calc_virt_temp_label[channel - num_non_calc_sensors]; else *str = priv->virtual_temp_label[channel - priv->num_temp_sensors]; } break; case hwmon_fan: *str = priv->speed_label[channel]; break; case hwmon_power: *str = priv->power_label[channel]; break; case hwmon_in: *str = priv->voltage_label[channel]; break; case hwmon_curr: *str = priv->current_label[channel]; break; default: return -EOPNOTSUPP; } return 0; } static int aqc_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { int ret, pwm_value; /* Arrays for setting multiple values at once in the control report */ int ctrl_values_offsets[4]; long ctrl_values[4]; int ctrl_values_types[4]; struct aqc_data *priv = dev_get_drvdata(dev); switch (type) { case hwmon_temp: switch (attr) { case hwmon_temp_offset: /* Limit temp offset to +/- 15K as in the official software */ val = clamp_val(val, -15000, 15000) / 10; ret = aqc_set_ctrl_val(priv, priv->temp_ctrl_offset + channel * AQC_SENSOR_SIZE, val, AQC_BE16); if (ret < 0) return ret; break; default: return -EOPNOTSUPP; } break; case hwmon_fan: switch (attr) { case hwmon_fan_pulses: val = clamp_val(val, 10, 1000); ret = aqc_set_ctrl_val(priv, priv->flow_pulses_ctrl_offset, val, AQC_BE16); if (ret < 0) return ret; break; default: break; } break; case hwmon_pwm: switch (attr) { case hwmon_pwm_input: pwm_value = aqc_pwm_to_percent(val); if (pwm_value < 0) return pwm_value; switch (priv->kind) { case aquaero: /* Write pwm value to preset corresponding to the channel */ ctrl_values_offsets[0] = AQUAERO_CTRL_PRESET_START + channel * AQUAERO_CTRL_PRESET_SIZE; ctrl_values[0] = pwm_value; ctrl_values_types[0] = AQC_BE16; /* Write preset number in fan control source */ ctrl_values_offsets[1] = priv->fan_ctrl_offsets[channel] + AQUAERO_FAN_CTRL_SRC_OFFSET; ctrl_values[1] = AQUAERO_CTRL_PRESET_ID + channel; ctrl_values_types[1] = AQC_BE16; /* Set minimum power to 0 to allow the fan to turn off */ ctrl_values_offsets[2] = priv->fan_ctrl_offsets[channel] + AQUAERO_FAN_CTRL_MIN_PWR_OFFSET; ctrl_values[2] = 0; ctrl_values_types[2] = AQC_BE16; /* Set maximum power to 255 to allow the fan to reach max speed */ ctrl_values_offsets[3] = priv->fan_ctrl_offsets[channel] + AQUAERO_FAN_CTRL_MAX_PWR_OFFSET; ctrl_values[3] = aqc_pwm_to_percent(255); ctrl_values_types[3] = AQC_BE16; ret = aqc_set_ctrl_vals(priv, ctrl_values_offsets, ctrl_values, ctrl_values_types, 4); if (ret < 0) return ret; break; default: ret = aqc_set_ctrl_val(priv, priv->fan_ctrl_offsets[channel], pwm_value, AQC_BE16); if (ret < 0) return ret; break; } break; default: break; } break; default: return -EOPNOTSUPP; } return 0; } static const struct hwmon_ops aqc_hwmon_ops = { .is_visible = aqc_is_visible, .read = aqc_read, .read_string = aqc_read_string, .write = aqc_write }; static const struct hwmon_channel_info * const aqc_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL), HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | HWMON_F_TARGET, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_PULSES, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_PULSES), HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL, HWMON_P_INPUT | HWMON_P_LABEL), HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT, HWMON_PWM_INPUT), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL, HWMON_C_INPUT | HWMON_C_LABEL), NULL }; static const struct hwmon_chip_info aqc_chip_info = { .ops = &aqc_hwmon_ops, .info = aqc_info, }; static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { int i, j, sensor_value; struct aqc_data *priv; if (report->id != STATUS_REPORT_ID) return 0; priv = hid_get_drvdata(hdev); /* Info provided with every report */ priv->serial_number[0] = get_unaligned_be16(data + priv->serial_number_start_offset); priv->serial_number[1] = get_unaligned_be16(data + priv->serial_number_start_offset + SERIAL_PART_OFFSET); priv->firmware_version = get_unaligned_be16(data + priv->firmware_version_offset); /* Physical temperature sensor readings */ for (i = 0; i < priv->num_temp_sensors; i++) { sensor_value = get_unaligned_be16(data + priv->temp_sensor_start_offset + i * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; } /* Virtual temperature sensor readings */ for (j = 0; j < priv->num_virtual_temp_sensors; j++) { sensor_value = get_unaligned_be16(data + priv->virtual_temp_sensor_start_offset + j * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; i++; } /* Fan speed and related readings */ for (i = 0; i < priv->num_fans; i++) { priv->speed_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->speed); priv->power_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->power) * 10000; priv->voltage_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->voltage) * 10; priv->current_input[i] = get_unaligned_be16(data + priv->fan_sensor_offsets[i] + priv->fan_structure->curr); } /* Flow sensor readings */ for (j = 0; j < priv->num_flow_sensors; j++) { priv->speed_input[i] = get_unaligned_be16(data + priv->flow_sensors_start_offset + j * AQC_SENSOR_SIZE); i++; } if (priv->power_cycle_count_offset != 0) priv->power_cycles = get_unaligned_be32(data + priv->power_cycle_count_offset); /* Special-case sensor readings */ switch (priv->kind) { case aquaero: /* Read calculated virtual temp sensors */ i = priv->num_temp_sensors + priv->num_virtual_temp_sensors; for (j = 0; j < priv->num_calc_virt_temp_sensors; j++) { sensor_value = get_unaligned_be16(data + priv->calc_virt_temp_sensor_start_offset + j * AQC_SENSOR_SIZE); if (sensor_value == AQC_SENSOR_NA) priv->temp_input[i] = -ENODATA; else priv->temp_input[i] = sensor_value * 10; i++; } break; case aquastreamult: priv->speed_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_OFFSET); priv->speed_input[2] = get_unaligned_be16(data + AQUASTREAMULT_PRESSURE_OFFSET); priv->speed_input[3] = get_unaligned_be16(data + AQUASTREAMULT_FLOW_SENSOR_OFFSET); priv->power_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_POWER) * 10000; priv->voltage_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_VOLTAGE) * 10; priv->current_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_CURRENT); break; case d5next: priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10; priv->voltage_input[3] = get_unaligned_be16(data + D5NEXT_12V_VOLTAGE) * 10; break; case highflownext: /* If external temp sensor is not connected, its power reading is also N/A */ if (priv->temp_input[1] == -ENODATA) priv->power_input[0] = -ENODATA; else priv->power_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_POWER) * 1000000; priv->voltage_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE) * 10; priv->voltage_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE_USB) * 10; priv->speed_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_WATER_QUALITY); priv->speed_input[2] = get_unaligned_be16(data + HIGHFLOWNEXT_CONDUCTIVITY); break; case leakshield: priv->speed_input[0] = ((s16)get_unaligned_be16(data + LEAKSHIELD_PRESSURE_ADJUSTED)) * 100; priv->speed_input_min[0] = get_unaligned_be16(data + LEAKSHIELD_PRESSURE_MIN) * 100; priv->speed_input_target[0] = get_unaligned_be16(data + LEAKSHIELD_PRESSURE_TARGET) * 100; priv->speed_input_max[0] = get_unaligned_be16(data + LEAKSHIELD_PRESSURE_MAX) * 100; priv->speed_input[1] = get_unaligned_be16(data + LEAKSHIELD_PUMP_RPM_IN); if (priv->speed_input[1] == AQC_SENSOR_NA) priv->speed_input[1] = -ENODATA; priv->speed_input[2] = get_unaligned_be16(data + LEAKSHIELD_FLOW_IN); if (priv->speed_input[2] == AQC_SENSOR_NA) priv->speed_input[2] = -ENODATA; priv->speed_input[3] = get_unaligned_be16(data + LEAKSHIELD_RESERVOIR_VOLUME); priv->speed_input[4] = get_unaligned_be16(data + LEAKSHIELD_RESERVOIR_FILLED); /* Second temp sensor is not positioned after the first one, read it here */ priv->temp_input[1] = get_unaligned_be16(data + LEAKSHIELD_TEMPERATURE_2) * 10; break; default: break; } priv->updated = jiffies; return 0; } static int serial_number_show(struct seq_file *seqf, void *unused) { struct aqc_data *priv = seqf->private; seq_printf(seqf, "%05u-%05u\n", priv->serial_number[0], priv->serial_number[1]); return 0; } DEFINE_SHOW_ATTRIBUTE(serial_number); static int firmware_version_show(struct seq_file *seqf, void *unused) { struct aqc_data *priv = seqf->private; seq_printf(seqf, "%u\n", priv->firmware_version); return 0; } DEFINE_SHOW_ATTRIBUTE(firmware_version); static int power_cycles_show(struct seq_file *seqf, void *unused) { struct aqc_data *priv = seqf->private; seq_printf(seqf, "%u\n", priv->power_cycles); return 0; } DEFINE_SHOW_ATTRIBUTE(power_cycles); static void aqc_debugfs_init(struct aqc_data *priv) { char name[64]; scnprintf(name, sizeof(name), "%s_%s-%s", "aquacomputer", priv->name, dev_name(&priv->hdev->dev)); priv->debugfs = debugfs_create_dir(name, NULL); if (priv->serial_number_start_offset != 0) debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops); if (priv->firmware_version_offset != 0) debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops); if (priv->power_cycle_count_offset != 0) debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops); } static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct aqc_data *priv; int ret; priv = devm_kzalloc(&hdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->hdev = hdev; hid_set_drvdata(hdev, priv); priv->updated = jiffies - STATUS_UPDATE_INTERVAL; ret = hid_parse(hdev); if (ret) return ret; ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) return ret; ret = hid_hw_open(hdev); if (ret) goto fail_and_stop; switch (hdev->product) { case USB_PRODUCT_ID_AQUAERO: /* * Aquaero presents itself as three HID devices under the same product ID: * "aquaero keyboard/mouse", "aquaero System Control" and "aquaero Device", * which is the one we want to communicate with. Unlike most other Aquacomputer * devices, Aquaero does not return meaningful data when explicitly requested * using GET_FEATURE_REPORT. * * The difference between "aquaero Device" and the other two is in the collections * they present. The two other devices have the type of the second element in * their respective collections set to 1, while the real device has it set to 0. */ if (hdev->collection[1].type != 0) { ret = -ENODEV; goto fail_and_close; } priv->kind = aquaero; priv->num_fans = AQUAERO_NUM_FANS; priv->fan_sensor_offsets = aquaero_sensor_fan_offsets; priv->fan_ctrl_offsets = aquaero_ctrl_fan_offsets; priv->num_temp_sensors = AQUAERO_NUM_SENSORS; priv->temp_sensor_start_offset = AQUAERO_SENSOR_START; priv->num_virtual_temp_sensors = AQUAERO_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = AQUAERO_VIRTUAL_SENSOR_START; priv->num_calc_virt_temp_sensors = AQUAERO_NUM_CALC_VIRTUAL_SENSORS; priv->calc_virt_temp_sensor_start_offset = AQUAERO_CALC_VIRTUAL_SENSOR_START; priv->num_flow_sensors = AQUAERO_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = AQUAERO_FLOW_SENSORS_START; priv->buffer_size = AQUAERO_CTRL_REPORT_SIZE; priv->temp_ctrl_offset = AQUAERO_TEMP_CTRL_OFFSET; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; priv->calc_virt_temp_label = label_aquaero_calc_temp_sensors; priv->speed_label = label_aquaero_speeds; priv->power_label = label_fan_power; priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; case USB_PRODUCT_ID_D5NEXT: priv->kind = d5next; priv->num_fans = D5NEXT_NUM_FANS; priv->fan_sensor_offsets = d5next_sensor_fan_offsets; priv->fan_ctrl_offsets = d5next_ctrl_fan_offsets; priv->num_temp_sensors = D5NEXT_NUM_SENSORS; priv->temp_sensor_start_offset = D5NEXT_COOLANT_TEMP; priv->num_virtual_temp_sensors = D5NEXT_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START; priv->temp_ctrl_offset = D5NEXT_TEMP_CTRL_OFFSET; priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES; priv->temp_label = label_d5next_temp; priv->virtual_temp_label = label_virtual_temp_sensors; priv->speed_label = label_d5next_speeds; priv->power_label = label_d5next_power; priv->voltage_label = label_d5next_voltages; priv->current_label = label_d5next_current; break; case USB_PRODUCT_ID_FARBWERK: priv->kind = farbwerk; priv->num_fans = 0; priv->num_temp_sensors = FARBWERK_NUM_SENSORS; priv->temp_sensor_start_offset = FARBWERK_SENSOR_START; priv->temp_label = label_temp_sensors; break; case USB_PRODUCT_ID_FARBWERK360: priv->kind = farbwerk360; priv->num_fans = 0; priv->num_temp_sensors = FARBWERK360_NUM_SENSORS; priv->temp_sensor_start_offset = FARBWERK360_SENSOR_START; priv->num_virtual_temp_sensors = FARBWERK360_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = FARBWERK360_VIRTUAL_SENSORS_START; priv->temp_ctrl_offset = FARBWERK360_TEMP_CTRL_OFFSET; priv->buffer_size = FARBWERK360_CTRL_REPORT_SIZE; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; break; case USB_PRODUCT_ID_OCTO: priv->kind = octo; priv->num_fans = OCTO_NUM_FANS; priv->fan_sensor_offsets = octo_sensor_fan_offsets; priv->fan_ctrl_offsets = octo_ctrl_fan_offsets; priv->num_temp_sensors = OCTO_NUM_SENSORS; priv->temp_sensor_start_offset = OCTO_SENSOR_START; priv->num_virtual_temp_sensors = OCTO_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START; priv->num_flow_sensors = OCTO_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = OCTO_FLOW_SENSOR_OFFSET; priv->temp_ctrl_offset = OCTO_TEMP_CTRL_OFFSET; priv->buffer_size = OCTO_CTRL_REPORT_SIZE; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->flow_pulses_ctrl_offset = OCTO_FLOW_PULSES_CTRL_OFFSET; priv->power_cycle_count_offset = OCTO_POWER_CYCLES; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; priv->speed_label = label_octo_speeds; priv->power_label = label_fan_power; priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; case USB_PRODUCT_ID_QUADRO: priv->kind = quadro; priv->num_fans = QUADRO_NUM_FANS; priv->fan_sensor_offsets = quadro_sensor_fan_offsets; priv->fan_ctrl_offsets = quadro_ctrl_fan_offsets; priv->num_temp_sensors = QUADRO_NUM_SENSORS; priv->temp_sensor_start_offset = QUADRO_SENSOR_START; priv->num_virtual_temp_sensors = QUADRO_NUM_VIRTUAL_SENSORS; priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START; priv->num_flow_sensors = QUADRO_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = QUADRO_FLOW_SENSOR_OFFSET; priv->temp_ctrl_offset = QUADRO_TEMP_CTRL_OFFSET; priv->buffer_size = QUADRO_CTRL_REPORT_SIZE; priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->flow_pulses_ctrl_offset = QUADRO_FLOW_PULSES_CTRL_OFFSET; priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; priv->speed_label = label_quadro_speeds; priv->power_label = label_fan_power; priv->voltage_label = label_fan_voltage; priv->current_label = label_fan_current; break; case USB_PRODUCT_ID_HIGHFLOWNEXT: priv->kind = highflownext; priv->num_fans = 0; priv->num_temp_sensors = HIGHFLOWNEXT_NUM_SENSORS; priv->temp_sensor_start_offset = HIGHFLOWNEXT_SENSOR_START; priv->num_flow_sensors = HIGHFLOWNEXT_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = HIGHFLOWNEXT_FLOW; priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; priv->temp_label = label_highflownext_temp_sensors; priv->speed_label = label_highflownext_fan_speed; priv->power_label = label_highflownext_power; priv->voltage_label = label_highflownext_voltage; break; case USB_PRODUCT_ID_LEAKSHIELD: /* * Choose the right Leakshield device, because * the other one acts as a keyboard */ if (hdev->type != 2) { ret = -ENODEV; goto fail_and_close; } priv->kind = leakshield; priv->num_fans = 0; priv->num_temp_sensors = LEAKSHIELD_NUM_SENSORS; priv->temp_sensor_start_offset = LEAKSHIELD_TEMPERATURE_1; priv->temp_label = label_leakshield_temp_sensors; priv->speed_label = label_leakshield_fan_speed; break; case USB_PRODUCT_ID_AQUASTREAMXT: priv->kind = aquastreamxt; priv->num_fans = AQUASTREAMXT_NUM_FANS; priv->fan_sensor_offsets = aquastreamxt_sensor_fan_offsets; priv->num_temp_sensors = AQUASTREAMXT_NUM_SENSORS; priv->temp_sensor_start_offset = AQUASTREAMXT_SENSOR_START; priv->buffer_size = AQUASTREAMXT_SENSOR_REPORT_SIZE; priv->temp_label = label_aquastreamxt_temp_sensors; priv->speed_label = label_d5next_speeds; priv->voltage_label = label_d5next_voltages; priv->current_label = label_d5next_current; break; case USB_PRODUCT_ID_AQUASTREAMULT: priv->kind = aquastreamult; priv->num_fans = AQUASTREAMULT_NUM_FANS; priv->fan_sensor_offsets = aquastreamult_sensor_fan_offsets; priv->num_temp_sensors = AQUASTREAMULT_NUM_SENSORS; priv->temp_sensor_start_offset = AQUASTREAMULT_SENSOR_START; priv->temp_label = label_aquastreamult_temp; priv->speed_label = label_aquastreamult_speeds; priv->power_label = label_aquastreamult_power; priv->voltage_label = label_aquastreamult_voltages; priv->current_label = label_aquastreamult_current; break; case USB_PRODUCT_ID_POWERADJUST3: priv->kind = poweradjust3; priv->num_fans = 0; priv->num_temp_sensors = POWERADJUST3_NUM_SENSORS; priv->temp_sensor_start_offset = POWERADJUST3_SENSOR_START; priv->buffer_size = POWERADJUST3_SENSOR_REPORT_SIZE; priv->temp_label = label_poweradjust3_temp_sensors; break; case USB_PRODUCT_ID_HIGHFLOW: priv->kind = highflow; priv->num_fans = 0; priv->num_temp_sensors = HIGHFLOW_NUM_SENSORS; priv->temp_sensor_start_offset = HIGHFLOW_SENSOR_START; priv->num_flow_sensors = HIGHFLOW_NUM_FLOW_SENSORS; priv->flow_sensors_start_offset = HIGHFLOW_FLOW_SENSOR_OFFSET; priv->buffer_size = HIGHFLOW_SENSOR_REPORT_SIZE; priv->temp_label = label_highflow_temp; priv->speed_label = label_highflow_speeds; break; default: break; } switch (priv->kind) { case aquaero: priv->serial_number_start_offset = AQUAERO_SERIAL_START; priv->firmware_version_offset = AQUAERO_FIRMWARE_VERSION; priv->fan_structure = &aqc_aquaero_fan_structure; priv->ctrl_report_id = AQUAERO_CTRL_REPORT_ID; priv->secondary_ctrl_report_id = AQUAERO_SECONDARY_CTRL_REPORT_ID; priv->secondary_ctrl_report_size = AQUAERO_SECONDARY_CTRL_REPORT_SIZE; priv->secondary_ctrl_report = aquaero_secondary_ctrl_report; break; case poweradjust3: priv->status_report_id = POWERADJUST3_STATUS_REPORT_ID; break; case aquastreamxt: priv->serial_number_start_offset = AQUASTREAMXT_SERIAL_START; priv->firmware_version_offset = AQUASTREAMXT_FIRMWARE_VERSION; priv->status_report_id = AQUASTREAMXT_STATUS_REPORT_ID; break; case highflow: priv->serial_number_start_offset = HIGHFLOW_SERIAL_START; priv->firmware_version_offset = HIGHFLOW_FIRMWARE_VERSION; priv->status_report_id = HIGHFLOW_STATUS_REPORT_ID; break; default: priv->serial_number_start_offset = AQC_SERIAL_START; priv->firmware_version_offset = AQC_FIRMWARE_VERSION; priv->ctrl_report_id = CTRL_REPORT_ID; priv->secondary_ctrl_report_id = SECONDARY_CTRL_REPORT_ID; priv->secondary_ctrl_report_size = SECONDARY_CTRL_REPORT_SIZE; priv->secondary_ctrl_report = secondary_ctrl_report; if (priv->kind == aquastreamult) priv->fan_structure = &aqc_aquastreamult_fan_structure; else priv->fan_structure = &aqc_general_fan_structure; break; } if (priv->buffer_size != 0) { priv->checksum_start = 0x01; priv->checksum_length = priv->buffer_size - 3; priv->checksum_offset = priv->buffer_size - 2; } priv->name = aqc_device_names[priv->kind]; priv->buffer = devm_kzalloc(&hdev->dev, priv->buffer_size, GFP_KERNEL); if (!priv->buffer) { ret = -ENOMEM; goto fail_and_close; } mutex_init(&priv->mutex); priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, priv->name, priv, &aqc_chip_info, NULL); if (IS_ERR(priv->hwmon_dev)) { ret = PTR_ERR(priv->hwmon_dev); goto fail_and_close; } aqc_debugfs_init(priv); return 0; fail_and_close: hid_hw_close(hdev); fail_and_stop: hid_hw_stop(hdev); return ret; } static void aqc_remove(struct hid_device *hdev) { struct aqc_data *priv = hid_get_drvdata(hdev); debugfs_remove_recursive(priv->debugfs); hwmon_device_unregister(priv->hwmon_dev); hid_hw_close(hdev); hid_hw_stop(hdev); } static const struct hid_device_id aqc_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUAERO) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_D5NEXT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOWNEXT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_LEAKSHIELD) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUASTREAMXT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUASTREAMULT) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_POWERADJUST3) }, { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOW) }, { } }; MODULE_DEVICE_TABLE(hid, aqc_table); static struct hid_driver aqc_driver = { .name = DRIVER_NAME, .id_table = aqc_table, .probe = aqc_probe, .remove = aqc_remove, .raw_event = aqc_raw_event, }; static int __init aqc_init(void) { return hid_register_driver(&aqc_driver); } static void __exit aqc_exit(void) { hid_unregister_driver(&aqc_driver); } /* Request to initialize after the HID bus to ensure it's not being loaded before */ late_initcall(aqc_init); module_exit(aqc_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Aleksa Savic <savicaleksa83@gmail.com>"); MODULE_AUTHOR("Jack Doan <me@jackdoan.com>"); MODULE_DESCRIPTION("Hwmon driver for Aquacomputer devices"); |
1998 1872 73 14 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 | // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell */ #ifndef __USB_CORE_HCD_H #define __USB_CORE_HCD_H #ifdef __KERNEL__ #include <linux/rwsem.h> #include <linux/interrupt.h> #include <linux/idr.h> #define MAX_TOPO_LEVEL 6 /* This file contains declarations of usbcore internals that are mostly * used or exposed by Host Controller Drivers. */ /* * USB Packet IDs (PIDs) */ #define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */ #define USB_PID_OUT 0xe1 #define USB_PID_ACK 0xd2 #define USB_PID_DATA0 0xc3 #define USB_PID_PING 0xb4 /* USB 2.0 */ #define USB_PID_SOF 0xa5 #define USB_PID_NYET 0x96 /* USB 2.0 */ #define USB_PID_DATA2 0x87 /* USB 2.0 */ #define USB_PID_SPLIT 0x78 /* USB 2.0 */ #define USB_PID_IN 0x69 #define USB_PID_NAK 0x5a #define USB_PID_DATA1 0x4b #define USB_PID_PREAMBLE 0x3c /* Token mode */ #define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */ #define USB_PID_SETUP 0x2d #define USB_PID_STALL 0x1e #define USB_PID_MDATA 0x0f /* USB 2.0 */ /*-------------------------------------------------------------------------*/ /* * USB Host Controller Driver (usb_hcd) framework * * Since "struct usb_bus" is so thin, you can't share much code in it. * This framework is a layer over that, and should be more shareable. */ /*-------------------------------------------------------------------------*/ struct giveback_urb_bh { bool running; bool high_prio; spinlock_t lock; struct list_head head; struct work_struct bh; struct usb_host_endpoint *completing_ep; }; enum usb_dev_authorize_policy { USB_DEVICE_AUTHORIZE_NONE = 0, USB_DEVICE_AUTHORIZE_ALL = 1, USB_DEVICE_AUTHORIZE_INTERNAL = 2, }; struct usb_hcd { /* * housekeeping */ struct usb_bus self; /* hcd is-a bus */ struct kref kref; /* reference counter */ const char *product_desc; /* product/vendor string */ int speed; /* Speed for this roothub. * May be different from * hcd->driver->flags & HCD_MASK */ char irq_descr[24]; /* driver + bus # */ struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ #ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif struct work_struct died_work; /* for when the device dies */ /* * hardware info/state */ const struct hc_driver *driver; /* hw-specific hooks */ /* * OTG and some Host controllers need software interaction with phys; * other external phys should be software-transparent */ struct usb_phy *usb_phy; struct usb_phy_roothub *phy_roothub; /* Flags that need to be manipulated atomically because they can * change while the host controller is running. Always use * set_bit() or clear_bit() to change their values. */ unsigned long flags; #define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */ #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ #define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). */ #define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE)) #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) #define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER)) /* * Specifies if interfaces are authorized by default * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/interface_authorized_default */ #define HCD_INTF_AUTHORIZED(hcd) \ ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) /* * Specifies if devices are authorized by default * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/authorized_default */ enum usb_dev_authorize_policy dev_policy; /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ unsigned msix_enabled:1; /* driver has MSI-X enabled? */ unsigned msi_enabled:1; /* driver has MSI enabled? */ /* * do not manage the PHY state in the HCD core, instead let the driver * handle this (for example if the PHY can only be turned on after a * specific event) */ unsigned skip_phy_initialization:1; /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ unsigned tpl_support:1; /* OTG & EH TPL support */ unsigned cant_recv_wakeups:1; /* wakeup requests from downstream aren't received */ unsigned int irq; /* irq allocated */ void __iomem *regs; /* device memory/io */ resource_size_t rsrc_start; /* memory/io resource start */ resource_size_t rsrc_len; /* memory/io resource length */ unsigned power_budget; /* in mA, 0 = no limit */ struct giveback_urb_bh high_prio_bh; struct giveback_urb_bh low_prio_bh; /* bandwidth_mutex should be taken before adding or removing * any new bus bandwidth constraints: * 1. Before adding a configuration for a new device. * 2. Before removing the configuration to put the device into * the addressed state. * 3. Before selecting a different configuration. * 4. Before selecting an alternate interface setting. * * bandwidth_mutex should be dropped after a successful control message * to the device, or resetting the bandwidth after a failed attempt. */ struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; #define HCD_BUFFER_POOLS 4 struct dma_pool *pool[HCD_BUFFER_POOLS]; int state; # define __ACTIVE 0x01 # define __SUSPEND 0x04 # define __TRANSIENT 0x80 # define HC_STATE_HALT 0 # define HC_STATE_RUNNING (__ACTIVE) # define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE) # define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT) # define HC_STATE_SUSPENDED (__SUSPEND) #define HC_IS_RUNNING(state) ((state) & __ACTIVE) #define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) /* memory pool for HCs having local memory, or %NULL */ struct gen_pool *localmem_pool; /* more shared queuing code would be good; it should support * smarter scheduling, handle transaction translators, etc; * input size of periodic table to an interrupt scheduler. * (ohci 32, uhci 1024, ehci 256/512/1024). */ /* The HC driver's private data is stored at the end of * this structure. */ unsigned long hcd_priv[] __attribute__ ((aligned(sizeof(s64)))); }; /* 2.4 does this a bit differently ... */ static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd) { return &hcd->self; } static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus) { return container_of(bus, struct usb_hcd, self); } /*-------------------------------------------------------------------------*/ struct hc_driver { const char *description; /* "ehci-hcd" etc */ const char *product_desc; /* product/vendor string */ size_t hcd_priv_size; /* size of private data */ /* irq handler */ irqreturn_t (*irq) (struct usb_hcd *hcd); int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ #define HCD_DMA 0x0002 /* HC uses DMA */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB3 0x0040 /* USB 3.0 */ #define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_USB32 0x0060 /* USB 3.2 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ /* called to init HCD and root hub */ int (*reset) (struct usb_hcd *hcd); int (*start) (struct usb_hcd *hcd); /* NOTE: these suspend/resume calls relate to the HC as * a whole, not just the root hub; they're for PCI bus glue. */ /* called after suspending the hub, before entering D3 etc */ int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup); /* called after entering D0 (etc), before resuming the hub */ int (*pci_resume)(struct usb_hcd *hcd, pm_message_t state); /* called just before hibernate final D3 state, allows host to poweroff parts */ int (*pci_poweroff_late)(struct usb_hcd *hcd, bool do_wakeup); /* cleanly make HCD stop writing memory and doing I/O */ void (*stop) (struct usb_hcd *hcd); /* shutdown HCD */ void (*shutdown) (struct usb_hcd *hcd); /* return current frame number */ int (*get_frame_number) (struct usb_hcd *hcd); /* manage i/o requests, device state */ int (*urb_enqueue)(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); int (*urb_dequeue)(struct usb_hcd *hcd, struct urb *urb, int status); /* * (optional) these hooks allow an HCD to override the default DMA * mapping and unmapping routines. In general, they shouldn't be * necessary unless the host controller has special DMA requirements, * such as alignment constraints. If these are not specified, the * general usb_hcd_(un)?map_urb_for_dma functions will be used instead * (and it may be a good idea to call these functions in your HCD * implementation) */ int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb); /* hw synch, freeing endpoint resources that urb_dequeue can't */ void (*endpoint_disable)(struct usb_hcd *hcd, struct usb_host_endpoint *ep); /* (optional) reset any endpoint state such as sequence number and current window */ void (*endpoint_reset)(struct usb_hcd *hcd, struct usb_host_endpoint *ep); /* root hub support */ int (*hub_status_data) (struct usb_hcd *hcd, char *buf); int (*hub_control) (struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned port_num); unsigned long (*get_resuming_ports)(struct usb_hcd *); /* force handover of high-speed port to full-speed companion */ void (*relinquish_port)(struct usb_hcd *, int); /* has a port been handed over to a companion? */ int (*port_handed_over)(struct usb_hcd *, int); /* CLEAR_TT_BUFFER completion callback */ void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); /* xHCI specific functions */ /* Called by usb_alloc_dev to alloc HC device structures */ int (*alloc_dev)(struct usb_hcd *, struct usb_device *); /* Called by usb_disconnect to free HC device structures */ void (*free_dev)(struct usb_hcd *, struct usb_device *); /* Change a group of bulk endpoints to support multiple stream IDs */ int (*alloc_streams)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags); /* Reverts a group of bulk endpoints back to not using stream IDs. * Can fail if we run out of memory. */ int (*free_streams)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags); /* Bandwidth computation functions */ /* Note that add_endpoint() can only be called once per endpoint before * check_bandwidth() or reset_bandwidth() must be called. * drop_endpoint() can only be called once per endpoint also. * A call to xhci_drop_endpoint() followed by a call to * xhci_add_endpoint() will add the endpoint to the schedule with * possibly new parameters denoted by a different endpoint descriptor * in usb_host_endpoint. A call to xhci_add_endpoint() followed by a * call to xhci_drop_endpoint() is not allowed. */ /* Allocate endpoint resources and add them to a new schedule */ int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); /* Drop an endpoint from a new schedule */ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); /* Check that a new hardware configuration, set using * endpoint_enable and endpoint_disable, does not exceed bus * bandwidth. This must be called before any set configuration * or set interface requests are sent to the device. */ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); /* Reset the device schedule to the last known good schedule, * which was set from a previous successful call to * check_bandwidth(). This reverts any add_endpoint() and * drop_endpoint() calls since that last successful call. * Used for when a check_bandwidth() call fails due to resource * or bandwidth constraints. */ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); /* Set the hardware-chosen device address */ int (*address_device)(struct usb_hcd *, struct usb_device *udev, unsigned int timeout_ms); /* prepares the hardware to send commands to the device */ int (*enable_device)(struct usb_hcd *, struct usb_device *udev); /* Notifies the HCD after a hub descriptor is fetched. * Will block. */ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); int (*reset_device)(struct usb_hcd *, struct usb_device *); /* Notifies the HCD after a device is connected and its * address is set */ int (*update_device)(struct usb_hcd *, struct usb_device *); int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); /* USB 3.0 Link Power Management */ /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); /* The xHCI host controller can still fail the command to * disable the LPM timeouts, so this can return an error code. */ int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); int (*find_raw_port_number)(struct usb_hcd *, int); /* Call for power on/off the port if necessary */ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */ #define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) { return hcd->driver->flags & HCD_BH; } static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { return hcd->high_prio_bh.completing_ep == ep; } static inline bool hcd_uses_dma(struct usb_hcd *hcd) { return IS_ENABLED(CONFIG_HAS_DMA) && (hcd->driver->flags & HCD_DMA); } extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, int status); extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags); extern int usb_hcd_unlink_urb(struct urb *urb, int status); extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status); extern int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_flush_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_disable_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_reset_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); extern int usb_hcd_alloc_bandwidth(struct usb_device *udev, struct usb_host_config *new_config, struct usb_host_interface *old_alt, struct usb_host_interface *new_alt); extern int usb_hcd_get_frame_number(struct usb_device *udev); struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, struct device *sysdev, struct device *dev, const char *bus_name, struct usb_hcd *primary_hcd); extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name); extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name, struct usb_hcd *shared_hcd); extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd); extern void usb_put_hcd(struct usb_hcd *hcd); extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd); extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, dma_addr_t dma, size_t size); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); #ifdef CONFIG_USB_HCD_TEST_MODE extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port); #else static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) { return 0; } #endif /* CONFIG_USB_HCD_TEST_MODE */ #ifdef CONFIG_USB_PCI struct pci_dev; struct pci_device_id; extern int usb_hcd_pci_probe(struct pci_dev *dev, const struct hc_driver *driver); extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); #ifdef CONFIG_USB_PCI_AMD extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, const struct hc_driver *driver) { if (!usb_hcd_amd_remote_wakeup_quirk(dev)) return false; if (driver->flags & (HCD_USB11 | HCD_USB3)) return true; return false; } #else /* CONFIG_USB_PCI_AMD */ static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, const struct hc_driver *driver) { return false; } #endif extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif /* CONFIG_USB_PCI */ /* pci-ish (pdev null is ok) buffer alloc/mapping support */ void usb_init_pool_max(void); int hcd_buffer_create(struct usb_hcd *hcd); void hcd_buffer_destroy(struct usb_hcd *hcd); void *hcd_buffer_alloc(struct usb_bus *bus, size_t size, gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, size_t size, gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free_pages(struct usb_hcd *hcd, size_t size, void *addr, dma_addr_t dma); /* generic bus glue, needed for host controllers that don't use PCI */ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); extern void usb_hc_died(struct usb_hcd *hcd); extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); extern void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum); extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) #define usb_settoggle(dev, ep, out, bit) \ ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ ((bit) << (ep))) /* -------------------------------------------------------------------------- */ /* Enumeration is only for the hub driver, or HCD virtual root hubs */ extern struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *, unsigned port); extern int usb_new_device(struct usb_device *dev); extern void usb_disconnect(struct usb_device **); extern int usb_get_configuration(struct usb_device *dev); extern void usb_destroy_configuration(struct usb_device *dev); /*-------------------------------------------------------------------------*/ /* * HCD Root Hub support */ #include <linux/usb/ch11.h> /* * As of USB 2.0, full/low speed devices are segregated into trees. * One type grows from USB 1.1 host controllers (OHCI, UHCI etc). * The other type grows from high speed hubs when they connect to * full/low speed devices using "Transaction Translators" (TTs). * * TTs should only be known to the hub driver, and high speed bus * drivers (only EHCI for now). They affect periodic scheduling and * sometimes control/bulk error recovery. */ struct usb_device; struct usb_tt { struct usb_device *hub; /* upstream highspeed hub */ int multi; /* true means one TT per port */ unsigned think_time; /* think time in ns */ void *hcpriv; /* HCD private data */ /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ spinlock_t lock; struct list_head clear_list; /* of usb_tt_clear */ struct work_struct clear_work; }; struct usb_tt_clear { struct list_head clear_list; unsigned tt; u16 devinfo; struct usb_hcd *hcd; struct usb_host_endpoint *ep; }; extern int usb_hub_clear_tt_buffer(struct urb *urb); extern void usb_ep0_reinit(struct usb_device *); /* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */ #define DeviceRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define DeviceOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define InterfaceRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) #define EndpointOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ #define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) /* GetBusState and SetHubDescriptor are optional, omitted */ #define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) #define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) #define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) #define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) #define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) #define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) #define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) #define ClearTTBuffer HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_CLEAR_TT_BUFFER) #define ResetTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_RESET_TT) #define GetTTState HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_TT_STATE) #define StopTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_STOP_TT) /*-------------------------------------------------------------------------*/ /* class requests from USB 3.1 hub spec, table 10-7 */ #define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) #define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) /* * Generic bandwidth allocation constants/support */ #define FRAME_TIME_USECS 1000L #define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */ /* Trying not to use worst-case bit-stuffing * of (7/6 * 8 * bytecount) = 9.33 * bytecount */ /* bytecount = data payload byte count */ #define NS_TO_US(ns) DIV_ROUND_UP(ns, 1000L) /* convert nanoseconds to microseconds, rounding up */ /* * Full/low speed bandwidth allocation constants/support. */ #define BW_HOST_DELAY 1000L /* nanoseconds */ #define BW_HUB_LS_SETUP 333L /* nanoseconds */ /* 4 full-speed bit times (est.) */ #define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */ #define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L) #define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L) /* * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed * to preallocate bandwidth) */ #define USB2_HOST_DELAY 5 /* nsec, guess */ #define HS_NSECS(bytes) (((55 * 8 * 2083) \ + (2083UL * (3 + BitTime(bytes))))/1000 \ + USB2_HOST_DELAY) #define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \ + (2083UL * (3 + BitTime(bytes))))/1000 \ + USB2_HOST_DELAY) #define HS_USECS(bytes) NS_TO_US(HS_NSECS(bytes)) #define HS_USECS_ISO(bytes) NS_TO_US(HS_NSECS_ISO(bytes)) extern long usb_calc_bus_time(int speed, int is_input, int isoc, int bytecount); /*-------------------------------------------------------------------------*/ extern void usb_set_device_state(struct usb_device *udev, enum usb_device_state new_state); /*-------------------------------------------------------------------------*/ /* exported only within usbcore */ extern struct idr usb_bus_idr; extern struct mutex usb_bus_idr_lock; extern wait_queue_head_t usb_kill_urb_queue; #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) #ifdef CONFIG_PM extern unsigned usb_wakeup_enabled_descendants(struct usb_device *udev); extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline unsigned usb_wakeup_enabled_descendants(struct usb_device *udev) { return 0; } static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct usb_mon_operations { void (*urb_submit)(struct usb_bus *bus, struct urb *urb); void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status); /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ }; extern const struct usb_mon_operations *mon_ops; static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) { if (bus->monitored) (*mon_ops->urb_submit)(bus, urb); } static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, int error) { if (bus->monitored) (*mon_ops->urb_submit_error)(bus, urb, error); } static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, int status) { if (bus->monitored) (*mon_ops->urb_complete)(bus, urb, status); } int usb_mon_register(const struct usb_mon_operations *ops); void usb_mon_deregister(void); #else static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {} static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, int error) {} static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, int status) {} #endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */ /*-------------------------------------------------------------------------*/ /* random stuff */ /* This rwsem is for use only by the hub driver and ehci-hcd. * Nobody else should touch it. */ extern struct rw_semaphore ehci_cf_port_reset_rwsem; /* Keep track of which host controller drivers are loaded */ #define USB_UHCI_LOADED 0 #define USB_OHCI_LOADED 1 #define USB_EHCI_LOADED 2 extern unsigned long usb_hcds_loaded; #endif /* __KERNEL__ */ #endif /* __USB_CORE_HCD_H */ |
32 32 32 32 32 32 38 38 38 38 18 1 26 1 38 38 26 38 23 22 22 24 11 11 24 19 18 36 31 31 31 31 30 31 31 31 29 31 11 11 13 31 12 12 28 28 4 1 9 12 10 11 11 10 5 10 1 2 38 38 38 38 37 38 30 30 20 19 18 17 16 10 8 11 11 1 10 6 80 80 80 80 80 80 80 80 38 36 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 | // SPDX-License-Identifier: GPL-2.0 /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The IP fragmentation functionality. * * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox <alan@lxorguk.ukuu.org.uk> * * Fixes: * Alan Cox : Split from ip.c , see ip_input.c for history. * David S. Miller : Begin massive cleanup... * Andi Kleen : Add sysctls. * xxxx : Overlapfrag bug. * Ultima : ip_expire() kernel panic. * Bill Hawes : Frag accounting and evictor fixes. * John McDonald : 0 length frag bug. * Alexey Kuznetsov: SMP races, threading, cleanup. * Patrick McHardy : LRU queue of frag heads for evictor. */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/compiler.h> #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/jiffies.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/slab.h> #include <net/route.h> #include <net/dst.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/checksum.h> #include <net/inetpeer.h> #include <net/inet_frag.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/inet.h> #include <linux/netfilter_ipv4.h> #include <net/inet_ecn.h> #include <net/l3mdev.h> /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c * as well. Or notify me, at least. --ANK */ static const char ip_frag_cache_name[] = "ip4-frags"; /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; u8 ecn; /* RFC3168 support */ u16 max_df_size; /* largest frag with DF set seen */ int iif; unsigned int rid; struct inet_peer *peer; }; static u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); } static struct inet_frags ip4_frags; static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct sk_buff *prev_tail, struct net_device *dev); static void ip4_frag_init(struct inet_frag_queue *q, const void *a) { struct ipq *qp = container_of(q, struct ipq, q); const struct frag_v4_compare_key *key = a; struct net *net = q->fqdir->net; struct inet_peer *p = NULL; q->key.v4 = *key; qp->ecn = 0; if (q->fqdir->max_dist) { rcu_read_lock(); p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif); if (p && !refcount_inc_not_zero(&p->refcnt)) p = NULL; rcu_read_unlock(); } qp->peer = p; } static void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; qp = container_of(q, struct ipq, q); if (qp->peer) inet_putpeer(qp->peer); } /* Destruction primitives. */ static void ipq_put(struct ipq *ipq) { inet_frag_put(&ipq->q); } /* Kill ipq entry. It is not destroyed immediately, * because caller (and someone more) holds reference count. */ static void ipq_kill(struct ipq *ipq) { inet_frag_kill(&ipq->q); } static bool frag_expire_skip_icmp(u32 user) { return user == IP_DEFRAG_AF_PACKET || ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, __IP_DEFRAG_CONNTRACK_IN_END) || ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, __IP_DEFRAG_CONNTRACK_BRIDGE_IN); } /* * Oops, a fragment queue timed out. Kill it and send an ICMP reply. */ static void ip_expire(struct timer_list *t) { enum skb_drop_reason reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; struct inet_frag_queue *frag = from_timer(frag, t, timer); const struct iphdr *iph; struct sk_buff *head = NULL; struct net *net; struct ipq *qp; qp = container_of(frag, struct ipq, q); net = qp->q.fqdir->net; rcu_read_lock(); /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */ if (READ_ONCE(qp->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&qp->q.lock); if (qp->q.flags & INET_FRAG_COMPLETE) goto out; qp->q.flags |= INET_FRAG_DROP; ipq_kill(qp); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); if (!(qp->q.flags & INET_FRAG_FIRST_IN)) goto out; /* sk_buff::dev and sk_buff::rbnode are unionized. So we * pull the head out of the tree in order to be able to * deal with head->dev. */ head = inet_frag_pull_head(&qp->q); if (!head) goto out; head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out; /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); reason = ip_route_input_noref(head, iph->daddr, iph->saddr, ip4h_dscp(iph), head->dev); if (reason) goto out; /* Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; if (frag_expire_skip_icmp(qp->q.key.v4.user) && (skb_rtable(head)->rt_type != RTN_LOCAL)) goto out; spin_unlock(&qp->q.lock); icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); goto out_rcu_unlock; out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); kfree_skb_reason(head, reason); ipq_put(qp); } /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user, int vif) { struct frag_v4_compare_key key = { .saddr = iph->saddr, .daddr = iph->daddr, .user = user, .vif = vif, .id = iph->id, .protocol = iph->protocol, }; struct inet_frag_queue *q; q = inet_frag_find(net->ipv4.fqdir, &key); if (!q) return NULL; return container_of(q, struct ipq, q); } /* Is the fragment too far ahead to be part of ipq? */ static int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = qp->q.fqdir->max_dist; unsigned int start, end; int rc; if (!peer || !max) return 0; start = qp->rid; end = atomic_inc_return(&peer->rid); qp->rid = end; rc = qp->q.fragments_tail && (end - start) > max; if (rc) __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS); return rc; } static int ip_frag_reinit(struct ipq *qp) { unsigned int sum_truesize = 0; if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) { refcount_inc(&qp->q.refcnt); return -ETIMEDOUT; } sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments, SKB_DROP_REASON_FRAG_TOO_FAR); sub_frag_mem_limit(qp->q.fqdir, sum_truesize); qp->q.flags = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; qp->q.last_run_head = NULL; qp->iif = 0; qp->ecn = 0; return 0; } /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct net *net = qp->q.fqdir->net; int ihl, end, flags, offset; struct sk_buff *prev_tail; struct net_device *dev; unsigned int fragsize; int err = -ENOENT; SKB_DR(reason); u8 ecn; /* If reassembly is already done, @skb must be a duplicate frag. */ if (qp->q.flags & INET_FRAG_COMPLETE) { SKB_DR_SET(reason, DUP_FRAG); goto err; } if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && unlikely(ip_frag_too_far(qp)) && unlikely(err = ip_frag_reinit(qp))) { ipq_kill(qp); goto err; } ecn = ip4_frag_ecn(ip_hdr(skb)->tos); offset = ntohs(ip_hdr(skb)->frag_off); flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ end = offset + skb->len - skb_network_offset(skb) - ihl; err = -EINVAL; /* Is this the final fragment? */ if ((flags & IP_MF) == 0) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < qp->q.len || ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) goto discard_qp; qp->q.flags |= INET_FRAG_LAST_IN; qp->q.len = end; } else { if (end&7) { end &= ~7; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } if (end > qp->q.len) { /* Some bits beyond end -> corruption. */ if (qp->q.flags & INET_FRAG_LAST_IN) goto discard_qp; qp->q.len = end; } } if (end == offset) goto discard_qp; err = -ENOMEM; if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) goto discard_qp; err = pskb_trim_rcsum(skb, end - offset); if (err) goto discard_qp; /* Note : skb->rbnode and skb->dev share the same location. */ dev = skb->dev; /* Makes sure compiler wont do silly aliasing games */ barrier(); prev_tail = qp->q.fragments_tail; err = inet_frag_queue_insert(&qp->q, skb, offset, end); if (err) goto insert_error; if (dev) qp->iif = dev->ifindex; qp->q.stamp = skb->tstamp; qp->q.tstamp_type = skb->tstamp_type; qp->q.meat += skb->len; qp->ecn |= ecn; add_frag_mem_limit(qp->q.fqdir, skb->truesize); if (offset == 0) qp->q.flags |= INET_FRAG_FIRST_IN; fragsize = skb->len + ihl; if (fragsize > qp->q.max_size) qp->q.max_size = fragsize; if (ip_hdr(skb)->frag_off & htons(IP_DF) && fragsize > qp->max_df_size) qp->max_df_size = fragsize; if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) { unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; err = ip_frag_reasm(qp, skb, prev_tail, dev); skb->_skb_refdst = orefdst; if (err) inet_frag_kill(&qp->q); return err; } skb_dst_drop(skb); skb_orphan(skb); return -EINPROGRESS; insert_error: if (err == IPFRAG_DUP) { SKB_DR_SET(reason, DUP_FRAG); err = -EINVAL; goto err; } err = -EINVAL; __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); discard_qp: inet_frag_kill(&qp->q); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); err: kfree_skb_reason(skb, reason); return err; } static bool ip_frag_coalesce_ok(const struct ipq *qp) { return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER; } /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct sk_buff *prev_tail, struct net_device *dev) { struct net *net = qp->q.fqdir->net; struct iphdr *iph; void *reasm_data; int len, err; u8 ecn; ipq_kill(qp); ecn = ip_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; } /* Make the one we just received the head. */ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail); if (!reasm_data) goto out_nomem; len = ip_hdrlen(skb) + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; inet_frag_reasm_finish(&qp->q, skb, reasm_data, ip_frag_coalesce_ok(qp)); skb->dev = dev; IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); iph = ip_hdr(skb); iph->tot_len = htons(len); iph->tos |= ecn; /* When we set IP_DF on a refragmented skb we must also force a * call to ip_fragment to avoid forwarding a DF-skb of size s while * original sender only sent fragments of size f (where f < s). * * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest * frag seen to avoid sending tiny DF-fragments in case skb was built * from one very small df-fragment and one large non-df frag. */ if (qp->max_df_size == qp->q.max_size) { IPCB(skb)->flags |= IPSKB_FRAG_PMTU; iph->frag_off = htons(IP_DF); } else { iph->frag_off = 0; } ip_send_check(iph); __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; qp->q.last_run_head = NULL; return 0; out_nomem: net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); out_fail: __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); return err; } /* Process an incoming IP datagram fragment. */ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; int vif = l3mdev_master_ifindex_rcu(dev); struct ipq *qp; __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); /* Lookup (or create) queue header */ qp = ip_find(net, ip_hdr(skb), user, vif); if (qp) { int ret; spin_lock(&qp->q.lock); ret = ip_frag_queue(qp, skb); spin_unlock(&qp->q.lock); ipq_put(qp); return ret; } __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; } EXPORT_SYMBOL(ip_defrag); struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct iphdr iph; int netoff; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; netoff = skb_network_offset(skb); if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) return skb; if (iph.ihl < 5 || iph.version != 4) return skb; len = ntohs(iph.tot_len); if (skb->len < netoff + len || len < (iph.ihl * 4)) return skb; if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { kfree_skb(skb); return NULL; } if (pskb_trim_rcsum(skb, netoff + len)) { kfree_skb(skb); return NULL; } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (ip_defrag(net, skb, user)) return NULL; skb_clear_hash(skb); } } return skb; } EXPORT_SYMBOL(ip_check_defrag); #ifdef CONFIG_SYSCTL static int dist_min; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "ipfrag_low_thresh", .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "ipfrag_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ipfrag_max_dist", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &dist_min, }, }; /* secret interval has been deprecated */ static int ip4_frags_secret_interval_unused; static struct ctl_table ip4_frags_ctl_table[] = { { .procname = "ipfrag_secret_interval", .data = &ip4_frags_secret_interval_unused, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, }; static int __net_init ip4_frags_ns_ctl_register(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = ip4_frags_ns_ctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); if (!table) goto err_alloc; } table[0].data = &net->ipv4.fqdir->high_thresh; table[0].extra1 = &net->ipv4.fqdir->low_thresh; table[1].data = &net->ipv4.fqdir->low_thresh; table[1].extra2 = &net->ipv4.fqdir->high_thresh; table[2].data = &net->ipv4.fqdir->timeout; table[3].data = &net->ipv4.fqdir->max_dist; hdr = register_net_sysctl_sz(net, "net/ipv4", table, ARRAY_SIZE(ip4_frags_ns_ctl_table)); if (!hdr) goto err_reg; net->ipv4.frags_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) { const struct ctl_table *table; table = net->ipv4.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.frags_hdr); kfree(table); } static void __init ip4_frags_ctl_register(void) { register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } #else static int ip4_frags_ns_ctl_register(struct net *net) { return 0; } static void ip4_frags_ns_ctl_unregister(struct net *net) { } static void __init ip4_frags_ctl_register(void) { } #endif static int __net_init ipv4_frags_init_net(struct net *net) { int res; res = fqdir_init(&net->ipv4.fqdir, &ip4_frags, net); if (res < 0) return res; /* Fragment cache limits. * * The fragment memory accounting code, (tries to) account for * the real memory usage, by measuring both the size of frag * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) * and the SKB's truesize. * * A 64K fragment consumes 129736 bytes (44*2944)+200 * (1500 truesize == 2944, sizeof(struct ipq) == 200) * * We will commit 4MB at one time. Should we cross that limit * we will prune down to 3MB, making room for approx 8 big 64K * fragments 8x128k. */ net->ipv4.fqdir->high_thresh = 4 * 1024 * 1024; net->ipv4.fqdir->low_thresh = 3 * 1024 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival * by TTL. */ net->ipv4.fqdir->timeout = IP_FRAG_TIME; net->ipv4.fqdir->max_dist = 64; res = ip4_frags_ns_ctl_register(net); if (res < 0) fqdir_exit(net->ipv4.fqdir); return res; } static void __net_exit ipv4_frags_pre_exit_net(struct net *net) { fqdir_pre_exit(net->ipv4.fqdir); } static void __net_exit ipv4_frags_exit_net(struct net *net) { ip4_frags_ns_ctl_unregister(net); fqdir_exit(net->ipv4.fqdir); } static struct pernet_operations ip4_frags_ops = { .init = ipv4_frags_init_net, .pre_exit = ipv4_frags_pre_exit_net, .exit = ipv4_frags_exit_net, }; static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) { return jhash2(data, sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); } static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) { const struct inet_frag_queue *fq = data; return jhash2((const u32 *)&fq->key.v4, sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); } static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) { const struct frag_v4_compare_key *key = arg->key; const struct inet_frag_queue *fq = ptr; return !!memcmp(&fq->key, key, sizeof(*key)); } static const struct rhashtable_params ip4_rhash_params = { .head_offset = offsetof(struct inet_frag_queue, node), .key_offset = offsetof(struct inet_frag_queue, key), .key_len = sizeof(struct frag_v4_compare_key), .hashfn = ip4_key_hashfn, .obj_hashfn = ip4_obj_hashfn, .obj_cmpfn = ip4_obj_cmpfn, .automatic_shrinking = true, }; void __init ipfrag_init(void) { ip4_frags.constructor = ip4_frag_init; ip4_frags.destructor = ip4_frag_free; ip4_frags.qsize = sizeof(struct ipq); ip4_frags.frag_expire = ip_expire; ip4_frags.frags_cache_name = ip_frag_cache_name; ip4_frags.rhash_params = ip4_rhash_params; if (inet_frags_init(&ip4_frags)) panic("IP: failed to allocate ip4_frags cache\n"); ip4_frags_ctl_register(); register_pernet_subsys(&ip4_frags_ops); } |
1 1 1 1 1 4 2 4 1 1 1 1 3 3 3 3 6 12 6 6 11 7 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Cloudflare Ltd https://cloudflare.com */ #include <linux/skmsg.h> #include <net/sock.h> #include <net/udp.h> #include <net/inet_common.h> #include "udp_impl.h" static struct proto *udpv6_prot_saved __read_mostly; static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) return udpv6_prot_saved->recvmsg(sk, msg, len, flags, addr_len); #endif return udp_prot.recvmsg(sk, msg, len, flags, addr_len); } static bool udp_sk_has_data(struct sock *sk) { return !skb_queue_empty(&udp_sk(sk)->reader_queue) || !skb_queue_empty(&sk->sk_receive_queue); } static bool psock_has_data(struct sk_psock *psock) { return !skb_queue_empty(&psock->ingress_skb) || !sk_psock_queue_empty(psock); } #define udp_msg_has_data(__sk, __psock) \ ({ udp_sk_has_data(__sk) || psock_has_data(__psock); }) static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int ret = 0; if (sk->sk_shutdown & RCV_SHUTDOWN) return 1; if (!timeo) return ret; add_wait_queue(sk_sleep(sk), &wait); sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ret = udp_msg_has_data(sk, psock); if (!ret) { wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); ret = udp_msg_has_data(sk, psock); } sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return ret; } static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct sk_psock *psock; int copied, ret; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); if (!len) return 0; psock = sk_psock_get(sk); if (unlikely(!psock)) return sk_udp_recvmsg(sk, msg, len, flags, addr_len); if (!psock_has_data(psock)) { ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len); goto out; } msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); if (!copied) { long timeo; int data; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = udp_msg_wait_data(sk, psock, timeo); if (data) { if (psock_has_data(psock)) goto msg_bytes_ready; ret = sk_udp_recvmsg(sk, msg, len, flags, addr_len); goto out; } copied = -EAGAIN; } ret = copied; out: sk_psock_put(sk, psock); return ret; } enum { UDP_BPF_IPV4, UDP_BPF_IPV6, UDP_BPF_NUM_PROTS, }; static DEFINE_SPINLOCK(udpv6_prot_lock); static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS]; static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base) { *prot = *base; prot->close = sock_map_close; prot->recvmsg = udp_bpf_recvmsg; prot->sock_is_readable = sk_msg_is_readable; } static void udp_bpf_check_v6_needs_rebuild(struct proto *ops) { if (unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) { spin_lock_bh(&udpv6_prot_lock); if (likely(ops != udpv6_prot_saved)) { udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops); smp_store_release(&udpv6_prot_saved, ops); } spin_unlock_bh(&udpv6_prot_lock); } } static int __init udp_bpf_v4_build_proto(void) { udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot); return 0; } late_initcall(udp_bpf_v4_build_proto); int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6; if (restore) { sk->sk_write_space = psock->saved_write_space; sock_replace_proto(sk, psock->sk_proto); return 0; } if (sk->sk_family == AF_INET6) udp_bpf_check_v6_needs_rebuild(psock->sk_proto); sock_replace_proto(sk, &udp_bpf_prots[family]); return 0; } EXPORT_SYMBOL_GPL(udp_bpf_update_proto); |
1 1 1 8 4 4 4 3 3 3 4 8 8 6 3 3 3 2 2 6 6 860 84 84 44 859 322 33 33 321 3 3 3 3 18 18 18 18 18 18 17 8 3 5 3 8 8 8 5 4 9 7 7 6 7 4 4 8 4 8 3 8 3 3 8 8 8 8 5 5 3 5 8 8 2 1 19 18 18 9 9 12 15 9 2 8 2 1 1 1 1 1 1 1 19 19 4 4 3 2 1 1 1 5 2 2 1 1 1 5 2 4 3 3 3 4 3 5 80 1480 322 1479 862 562 80 80 586 586 585 585 145 584 5 16 1 1 16 4 4 4 4 16 12 12 12 12 12 12 12 4 4 4 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 | // SPDX-License-Identifier: GPL-2.0 /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Generic netlink support functions to configure an SMC-R PNET table * * Copyright IBM Corp. 2016 * * Author(s): Thomas Richter <tmricht@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/mutex.h> #include <net/netlink.h> #include <net/genetlink.h> #include <uapi/linux/if.h> #include <uapi/linux/smc.h> #include <rdma/ib_verbs.h> #include <net/netns/generic.h> #include "smc_netns.h" #include "smc_pnet.h" #include "smc_ib.h" #include "smc_ism.h" #include "smc_core.h" static struct net_device *__pnet_find_base_ndev(struct net_device *ndev); static struct net_device *pnet_find_base_ndev(struct net_device *ndev); static const struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { [SMC_PNETID_NAME] = { .type = NLA_NUL_STRING, .len = SMC_MAX_PNETID_LEN }, [SMC_PNETID_ETHNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [SMC_PNETID_IBNAME] = { .type = NLA_NUL_STRING, .len = IB_DEVICE_NAME_MAX - 1 }, [SMC_PNETID_IBPORT] = { .type = NLA_U8 } }; static struct genl_family smc_pnet_nl_family; enum smc_pnet_nametype { SMC_PNET_ETH = 1, SMC_PNET_IB = 2, }; /* pnet entry stored in pnet table */ struct smc_pnetentry { struct list_head list; char pnet_name[SMC_MAX_PNETID_LEN + 1]; enum smc_pnet_nametype type; union { struct { char eth_name[IFNAMSIZ + 1]; struct net_device *ndev; netdevice_tracker dev_tracker; }; struct { char ib_name[IB_DEVICE_NAME_MAX + 1]; u8 ib_port; }; }; }; /* Check if the pnetid is set */ bool smc_pnet_is_pnetid_set(u8 *pnetid) { if (pnetid[0] == 0 || pnetid[0] == _S) return false; return true; } /* Check if two given pnetids match */ static bool smc_pnet_match(u8 *pnetid1, u8 *pnetid2) { int i; for (i = 0; i < SMC_MAX_PNETID_LEN; i++) { if ((pnetid1[i] == 0 || pnetid1[i] == _S) && (pnetid2[i] == 0 || pnetid2[i] == _S)) break; if (pnetid1[i] != pnetid2[i]) return false; } return true; } /* Remove a pnetid from the pnet table. */ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct smc_ib_device *ibdev; struct smcd_dev *smcd; struct smc_net *sn; int rc = -ENOENT; int ibport; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; /* remove table entry */ mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (!pnet_name || smc_pnet_match(pnetelem->pnet_name, pnet_name)) { list_del(&pnetelem->list); if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) { netdev_put(pnetelem->ndev, &pnetelem->dev_tracker); pr_warn_ratelimited("smc: net device %s " "erased user defined " "pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); } kfree(pnetelem); rc = 0; } } mutex_unlock(&pnettable->lock); /* if this is not the initial namespace, stop here */ if (net != &init_net) return rc; /* remove ib devices */ mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) { if (ibdev->pnetid_by_user[ibport] && (!pnet_name || smc_pnet_match(pnet_name, ibdev->pnetid[ibport]))) { pr_warn_ratelimited("smc: ib device %s ibport " "%d erased user defined " "pnetid %.16s\n", ibdev->ibdev->name, ibport + 1, ibdev->pnetid[ibport]); memset(ibdev->pnetid[ibport], 0, SMC_MAX_PNETID_LEN); ibdev->pnetid_by_user[ibport] = false; rc = 0; } } } mutex_unlock(&smc_ib_devices.mutex); /* remove smcd devices */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd, &smcd_dev_list.list, list) { if (smcd->pnetid_by_user && (!pnet_name || smc_pnet_match(pnet_name, smcd->pnetid))) { pr_warn_ratelimited("smc: smcd device %s " "erased user defined pnetid " "%.16s\n", dev_name(smcd->ops->get_dev(smcd)), smcd->pnetid); memset(smcd->pnetid, 0, SMC_MAX_PNETID_LEN); smcd->pnetid_by_user = false; rc = 0; } } mutex_unlock(&smcd_dev_list.mutex); return rc; } /* Add the reference to a given network device to the pnet table. */ static int smc_pnet_add_by_ndev(struct net_device *ndev) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { netdev_hold(ndev, &pnetelem->dev_tracker, GFP_ATOMIC); pnetelem->ndev = ndev; rc = 0; pr_warn_ratelimited("smc: adding net device %s with " "user defined pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); break; } } mutex_unlock(&pnettable->lock); return rc; } /* Remove the reference to a given network device from the pnet table. */ static int smc_pnet_remove_by_ndev(struct net_device *ndev) { struct smc_pnetentry *pnetelem, *tmp_pe; struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { netdev_put(pnetelem->ndev, &pnetelem->dev_tracker); pnetelem->ndev = NULL; rc = 0; pr_warn_ratelimited("smc: removing net device %s with " "user defined pnetid %.16s\n", pnetelem->eth_name, pnetelem->pnet_name); break; } } mutex_unlock(&pnettable->lock); return rc; } /* Apply pnetid to ib device when no pnetid is set. */ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port, char *pnet_name) { bool applied = false; mutex_lock(&smc_ib_devices.mutex); if (!smc_pnet_is_pnetid_set(ib_dev->pnetid[ib_port - 1])) { memcpy(ib_dev->pnetid[ib_port - 1], pnet_name, SMC_MAX_PNETID_LEN); ib_dev->pnetid_by_user[ib_port - 1] = true; applied = true; } mutex_unlock(&smc_ib_devices.mutex); return applied; } /* Apply pnetid to smcd device when no pnetid is set. */ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name) { bool applied = false; mutex_lock(&smcd_dev_list.mutex); if (!smc_pnet_is_pnetid_set(smcd_dev->pnetid)) { memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN); smcd_dev->pnetid_by_user = true; applied = true; } mutex_unlock(&smcd_dev_list.mutex); return applied; } /* The limit for pnetid is 16 characters. * Valid characters should be (single-byte character set) a-z, A-Z, 0-9. * Lower case letters are converted to upper case. * Interior blanks should not be used. */ static bool smc_pnetid_valid(const char *pnet_name, char *pnetid) { char *bf = skip_spaces(pnet_name); size_t len = strlen(bf); char *end = bf + len; if (!len) return false; while (--end >= bf && isspace(*end)) ; if (end - bf >= SMC_MAX_PNETID_LEN) return false; while (bf <= end) { if (!isalnum(*bf)) return false; *pnetid++ = islower(*bf) ? toupper(*bf) : *bf; bf++; } *pnetid = '\0'; return true; } /* Find an infiniband device by a given name. The device might not exist. */ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) { struct smc_ib_device *ibdev; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (!strncmp(ibdev->ibdev->name, ib_name, sizeof(ibdev->ibdev->name)) || (ibdev->ibdev->dev.parent && !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, IB_DEVICE_NAME_MAX - 1))) { goto out; } } ibdev = NULL; out: mutex_unlock(&smc_ib_devices.mutex); return ibdev; } /* Find an smcd device by a given name. The device might not exist. */ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name) { struct smcd_dev *smcd_dev; mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) { if (!strncmp(dev_name(smcd_dev->ops->get_dev(smcd_dev)), smcd_name, IB_DEVICE_NAME_MAX - 1)) goto out; } smcd_dev = NULL; out: mutex_unlock(&smcd_dev_list.mutex); return smcd_dev; } static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, char *eth_name, char *pnet_name) { struct smc_pnetentry *tmp_pe, *new_pe; struct net_device *ndev, *base_ndev; u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; bool new_netdev; int rc; /* check if (base) netdev already has a pnetid. If there is one, we do * not want to add a pnet table entry */ rc = -EEXIST; ndev = dev_get_by_name(net, eth_name); /* dev_hold() */ if (ndev) { base_ndev = pnet_find_base_ndev(ndev); if (!smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid)) goto out_put; } /* add a new netdev entry to the pnet table if there isn't one */ rc = -ENOMEM; new_pe = kzalloc(sizeof(*new_pe), GFP_KERNEL); if (!new_pe) goto out_put; new_pe->type = SMC_PNET_ETH; memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strncpy(new_pe->eth_name, eth_name, IFNAMSIZ); rc = -EEXIST; new_netdev = true; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_ETH && !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { new_netdev = false; break; } } if (new_netdev) { if (ndev) { new_pe->ndev = ndev; netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_ATOMIC); } list_add_tail(&new_pe->list, &pnettable->pnetlist); mutex_unlock(&pnettable->lock); } else { mutex_unlock(&pnettable->lock); kfree(new_pe); goto out_put; } if (ndev) pr_warn_ratelimited("smc: net device %s " "applied user defined pnetid %.16s\n", new_pe->eth_name, new_pe->pnet_name); return 0; out_put: dev_put(ndev); return rc; } static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, u8 ib_port, char *pnet_name) { struct smc_pnetentry *tmp_pe, *new_pe; struct smc_ib_device *ib_dev; bool smcddev_applied = true; bool ibdev_applied = true; struct smcd_dev *smcd; struct device *dev; bool new_ibdev; /* try to apply the pnetid to active devices */ ib_dev = smc_pnet_find_ib(ib_name); if (ib_dev) { ibdev_applied = smc_pnet_apply_ib(ib_dev, ib_port, pnet_name); if (ibdev_applied) pr_warn_ratelimited("smc: ib device %s ibport %d " "applied user defined pnetid " "%.16s\n", ib_dev->ibdev->name, ib_port, ib_dev->pnetid[ib_port - 1]); } smcd = smc_pnet_find_smcd(ib_name); if (smcd) { smcddev_applied = smc_pnet_apply_smcd(smcd, pnet_name); if (smcddev_applied) { dev = smcd->ops->get_dev(smcd); pr_warn_ratelimited("smc: smcd device %s " "applied user defined pnetid " "%.16s\n", dev_name(dev), smcd->pnetid); } } /* Apply fails when a device has a hardware-defined pnetid set, do not * add a pnet table entry in that case. */ if (!ibdev_applied || !smcddev_applied) return -EEXIST; /* add a new ib entry to the pnet table if there isn't one */ new_pe = kzalloc(sizeof(*new_pe), GFP_KERNEL); if (!new_pe) return -ENOMEM; new_pe->type = SMC_PNET_IB; memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN); strncpy(new_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX); new_pe->ib_port = ib_port; new_ibdev = true; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { new_ibdev = false; break; } } if (new_ibdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); mutex_unlock(&pnettable->lock); } else { mutex_unlock(&pnettable->lock); kfree(new_pe); } return (new_ibdev) ? 0 : -EEXIST; } /* Append a pnetid to the end of the pnet table if not already on this list. */ static int smc_pnet_enter(struct net *net, struct nlattr *tb[]) { char pnet_name[SMC_MAX_PNETID_LEN + 1]; struct smc_pnettable *pnettable; bool new_netdev = false; bool new_ibdev = false; struct smc_net *sn; u8 ibport = 1; char *string; int rc; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; rc = -EINVAL; if (!tb[SMC_PNETID_NAME]) goto error; string = (char *)nla_data(tb[SMC_PNETID_NAME]); if (!smc_pnetid_valid(string, pnet_name)) goto error; if (tb[SMC_PNETID_ETHNAME]) { string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); rc = smc_pnet_add_eth(pnettable, net, string, pnet_name); if (!rc) new_netdev = true; else if (rc != -EEXIST) goto error; } /* if this is not the initial namespace, stop here */ if (net != &init_net) return new_netdev ? 0 : -EEXIST; rc = -EINVAL; if (tb[SMC_PNETID_IBNAME]) { string = (char *)nla_data(tb[SMC_PNETID_IBNAME]); string = strim(string); if (tb[SMC_PNETID_IBPORT]) { ibport = nla_get_u8(tb[SMC_PNETID_IBPORT]); if (ibport < 1 || ibport > SMC_MAX_PORTS) goto error; } rc = smc_pnet_add_ib(pnettable, string, ibport, pnet_name); if (!rc) new_ibdev = true; else if (rc != -EEXIST) goto error; } return (new_netdev || new_ibdev) ? 0 : -EEXIST; error: return rc; } /* Convert an smc_pnetentry to a netlink attribute sequence */ static int smc_pnet_set_nla(struct sk_buff *msg, struct smc_pnetentry *pnetelem) { if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name)) return -1; if (pnetelem->type == SMC_PNET_ETH) { if (nla_put_string(msg, SMC_PNETID_ETHNAME, pnetelem->eth_name)) return -1; } else { if (nla_put_string(msg, SMC_PNETID_ETHNAME, "n/a")) return -1; } if (pnetelem->type == SMC_PNET_IB) { if (nla_put_string(msg, SMC_PNETID_IBNAME, pnetelem->ib_name) || nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port)) return -1; } else { if (nla_put_string(msg, SMC_PNETID_IBNAME, "n/a") || nla_put_u8(msg, SMC_PNETID_IBPORT, 0xff)) return -1; } return 0; } static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); return smc_pnet_enter(net, info->attrs); } static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); if (!info->attrs[SMC_PNETID_NAME]) return -EINVAL; return smc_pnet_remove_by_pnetid(net, (char *)nla_data(info->attrs[SMC_PNETID_NAME])); } static int smc_pnet_dump_start(struct netlink_callback *cb) { cb->args[0] = 0; return 0; } static int smc_pnet_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq, u32 flags, struct smc_pnetentry *pnetelem) { void *hdr; hdr = genlmsg_put(skb, portid, seq, &smc_pnet_nl_family, flags, SMC_PNETID_GET); if (!hdr) return -ENOMEM; if (smc_pnet_set_nla(skb, pnetelem) < 0) { genlmsg_cancel(skb, hdr); return -EMSGSIZE; } genlmsg_end(skb, hdr); return 0; } static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, u32 seq, u8 *pnetid, int start_idx) { struct smc_pnettable *pnettable; struct smc_pnetentry *pnetelem; struct smc_net *sn; int idx = 0; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; /* dump pnettable entries */ mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) continue; if (idx++ < start_idx) continue; /* if this is not the initial namespace, dump only netdev */ if (net != &init_net && pnetelem->type != SMC_PNET_ETH) continue; if (smc_pnet_dumpinfo(skb, portid, seq, NLM_F_MULTI, pnetelem)) { --idx; break; } } mutex_unlock(&pnettable->lock); return idx; } static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int idx; idx = _smc_pnet_dump(net, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NULL, cb->args[0]); cb->args[0] = idx; return skb->len; } /* Retrieve one PNETID entry */ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); struct sk_buff *msg; void *hdr; if (!info->attrs[SMC_PNETID_NAME]) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; _smc_pnet_dump(net, msg, info->snd_portid, info->snd_seq, nla_data(info->attrs[SMC_PNETID_NAME]), 0); /* finish multi part message and send it */ hdr = nlmsg_put(msg, info->snd_portid, info->snd_seq, NLMSG_DONE, 0, NLM_F_MULTI); if (!hdr) { nlmsg_free(msg); return -EMSGSIZE; } return genlmsg_reply(msg, info); } /* Remove and delete all pnetids from pnet table. */ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info) { struct net *net = genl_info_net(info); smc_pnet_remove_by_pnetid(net, NULL); return 0; } /* SMC_PNETID generic netlink operation definition */ static const struct genl_ops smc_pnet_ops[] = { { .cmd = SMC_PNETID_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, /* can be retrieved by unprivileged users */ .doit = smc_pnet_get, .dumpit = smc_pnet_dump, .start = smc_pnet_dump_start }, { .cmd = SMC_PNETID_ADD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_add }, { .cmd = SMC_PNETID_DEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_del }, { .cmd = SMC_PNETID_FLUSH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .flags = GENL_ADMIN_PERM, .doit = smc_pnet_flush } }; /* SMC_PNETID family definition */ static struct genl_family smc_pnet_nl_family __ro_after_init = { .hdrsize = 0, .name = SMCR_GENL_FAMILY_NAME, .version = SMCR_GENL_FAMILY_VERSION, .maxattr = SMC_PNETID_MAX, .policy = smc_pnet_policy, .netnsok = true, .module = THIS_MODULE, .ops = smc_pnet_ops, .n_ops = ARRAY_SIZE(smc_pnet_ops), .resv_start_op = SMC_PNETID_FLUSH + 1, }; bool smc_pnet_is_ndev_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe; bool rc = false; read_lock(&sn->pnetids_ndev.lock); list_for_each_entry(pe, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { rc = true; goto unlock; } } unlock: read_unlock(&sn->pnetids_ndev.lock); return rc; } static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *pi; pe = kzalloc(sizeof(*pe), GFP_KERNEL); if (!pe) return -ENOMEM; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry(pi, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pi->pnetid)) { refcount_inc(&pi->refcnt); kfree(pe); goto unlock; } } refcount_set(&pe->refcnt, 1); memcpy(pe->pnetid, pnetid, SMC_MAX_PNETID_LEN); list_add_tail(&pe->list, &sn->pnetids_ndev.list); unlock: write_unlock(&sn->pnetids_ndev.lock); return 0; } static void smc_pnet_remove_pnetid(struct net *net, u8 *pnetid) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *pe2; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry_safe(pe, pe2, &sn->pnetids_ndev.list, list) { if (smc_pnet_match(pnetid, pe->pnetid)) { if (refcount_dec_and_test(&pe->refcnt)) { list_del(&pe->list); kfree(pe); } break; } } write_unlock(&sn->pnetids_ndev.lock); } static void smc_pnet_add_base_pnetid(struct net *net, struct net_device *dev, u8 *ndev_pnetid) { struct net_device *base_dev; base_dev = __pnet_find_base_ndev(dev); if (base_dev->flags & IFF_UP && !smc_pnetid_by_dev_port(base_dev->dev.parent, base_dev->dev_port, ndev_pnetid)) { /* add to PNETIDs list */ smc_pnet_add_pnetid(net, ndev_pnetid); } } /* create initial list of netdevice pnetids */ static void smc_pnet_create_pnetids_list(struct net *net) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct net_device *dev; /* Newly created netns do not have devices. * Do not even acquire rtnl. */ if (list_empty(&net->dev_base_head)) return; /* Note: This might not be needed, because smc_pnet_netdev_event() * is also calling smc_pnet_add_base_pnetid() when handling * NETDEV_UP event. */ rtnl_lock(); for_each_netdev(net, dev) smc_pnet_add_base_pnetid(net, dev, ndev_pnetid); rtnl_unlock(); } /* clean up list of netdevice pnetids */ static void smc_pnet_destroy_pnetids_list(struct net *net) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnetids_ndev_entry *pe, *temp_pe; write_lock(&sn->pnetids_ndev.lock); list_for_each_entry_safe(pe, temp_pe, &sn->pnetids_ndev.list, list) { list_del(&pe->list); kfree(pe); } write_unlock(&sn->pnetids_ndev.lock); } static int smc_pnet_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(event_dev); u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; switch (event) { case NETDEV_REBOOT: case NETDEV_UNREGISTER: smc_pnet_remove_by_ndev(event_dev); smc_ib_ndev_change(event_dev, event); return NOTIFY_OK; case NETDEV_REGISTER: smc_pnet_add_by_ndev(event_dev); smc_ib_ndev_change(event_dev, event); return NOTIFY_OK; case NETDEV_UP: smc_pnet_add_base_pnetid(net, event_dev, ndev_pnetid); return NOTIFY_OK; case NETDEV_DOWN: event_dev = __pnet_find_base_ndev(event_dev); if (!smc_pnetid_by_dev_port(event_dev->dev.parent, event_dev->dev_port, ndev_pnetid)) { /* remove from PNETIDs list */ smc_pnet_remove_pnetid(net, ndev_pnetid); } return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block smc_netdev_notifier = { .notifier_call = smc_pnet_netdev_event }; /* init network namespace */ int smc_pnet_net_init(struct net *net) { struct smc_net *sn = net_generic(net, smc_net_id); struct smc_pnettable *pnettable = &sn->pnettable; struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; INIT_LIST_HEAD(&pnettable->pnetlist); mutex_init(&pnettable->lock); INIT_LIST_HEAD(&pnetids_ndev->list); rwlock_init(&pnetids_ndev->lock); smc_pnet_create_pnetids_list(net); return 0; } int __init smc_pnet_init(void) { int rc; rc = genl_register_family(&smc_pnet_nl_family); if (rc) return rc; rc = register_netdevice_notifier(&smc_netdev_notifier); if (rc) genl_unregister_family(&smc_pnet_nl_family); return rc; } /* exit network namespace */ void smc_pnet_net_exit(struct net *net) { /* flush pnet table */ smc_pnet_remove_by_pnetid(net, NULL); smc_pnet_destroy_pnetids_list(net); } void smc_pnet_exit(void) { unregister_netdevice_notifier(&smc_netdev_notifier); genl_unregister_family(&smc_pnet_nl_family); } static struct net_device *__pnet_find_base_ndev(struct net_device *ndev) { int i, nest_lvl; ASSERT_RTNL(); nest_lvl = ndev->lower_level; for (i = 0; i < nest_lvl; i++) { struct list_head *lower = &ndev->adj_list.lower; if (list_empty(lower)) break; lower = lower->next; ndev = netdev_lower_get_next(ndev, &lower); } return ndev; } /* Determine one base device for stacked net devices. * If the lower device level contains more than one devices * (for instance with bonding slaves), just the first device * is used to reach a base device. */ static struct net_device *pnet_find_base_ndev(struct net_device *ndev) { rtnl_lock(); ndev = __pnet_find_base_ndev(ndev); rtnl_unlock(); return ndev; } static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, u8 *pnetid) { struct smc_pnettable *pnettable; struct net *net = dev_net(ndev); struct smc_pnetentry *pnetelem; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for namespace */ sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { /* get pnetid of netdev device */ memcpy(pnetid, pnetelem->pnet_name, SMC_MAX_PNETID_LEN); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } static int smc_pnet_determine_gid(struct smc_ib_device *ibdev, int i, struct smc_init_info *ini) { if (!ini->check_smcrv2 && !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->ib_gid, NULL, NULL)) { ini->ib_dev = ibdev; ini->ib_port = i; return 0; } if (ini->check_smcrv2 && !smc_ib_determine_gid(ibdev, i, ini->vlan_id, ini->smcrv2.ib_gid_v2, NULL, &ini->smcrv2)) { ini->smcrv2.ib_dev_v2 = ibdev; ini->smcrv2.ib_port_v2 = i; return 0; } return -ENODEV; } /* find a roce device for the given pnetid */ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id, struct smc_init_info *ini, struct smc_ib_device *known_dev, struct net *net) { struct smc_ib_device *ibdev; int i; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (ibdev == known_dev || !rdma_dev_access_netns(ibdev->ibdev, net)) continue; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(ibdev->ibdev, i)) continue; if (smc_pnet_match(ibdev->pnetid[i - 1], pnet_id) && smc_ib_port_active(ibdev, i) && !test_bit(i - 1, ibdev->ports_going_away)) { if (!smc_pnet_determine_gid(ibdev, i, ini)) goto out; } } } out: mutex_unlock(&smc_ib_devices.mutex); } /* find alternate roce device with same pnet_id, vlan_id and net namespace */ void smc_pnet_find_alt_roce(struct smc_link_group *lgr, struct smc_init_info *ini, struct smc_ib_device *known_dev) { struct net *net = lgr->net; _smc_pnet_find_roce_by_pnetid(lgr->pnet_id, ini, known_dev, net); } /* if handshake network device belongs to a roce device, return its * IB device and port */ static void smc_pnet_find_rdma_dev(struct net_device *netdev, struct smc_init_info *ini) { struct net *net = dev_net(netdev); struct smc_ib_device *ibdev; mutex_lock(&smc_ib_devices.mutex); list_for_each_entry(ibdev, &smc_ib_devices.list, list) { struct net_device *ndev; int i; /* check rdma net namespace */ if (!rdma_dev_access_netns(ibdev->ibdev, net)) continue; for (i = 1; i <= SMC_MAX_PORTS; i++) { if (!rdma_is_port_valid(ibdev->ibdev, i)) continue; ndev = ib_device_get_netdev(ibdev->ibdev, i); if (!ndev) continue; dev_put(ndev); if (netdev == ndev && smc_ib_port_active(ibdev, i) && !test_bit(i - 1, ibdev->ports_going_away)) { if (!smc_pnet_determine_gid(ibdev, i, ini)) break; } } } mutex_unlock(&smc_ib_devices.mutex); } /* Determine the corresponding IB device port based on the hardware PNETID. * Searching stops at the first matching active IB device port with vlan_id * configured. * If nothing found, check pnetid table. * If nothing found, try to use handshake device */ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct net *net; ndev = pnet_find_base_ndev(ndev); net = dev_net(ndev); if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { smc_pnet_find_rdma_dev(ndev, ini); return; /* pnetid could not be determined */ } _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); } static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; struct smcd_dev *ismdev; ndev = pnet_find_base_ndev(ndev); if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) return; /* pnetid could not be determined */ mutex_lock(&smcd_dev_list.mutex); list_for_each_entry(ismdev, &smcd_dev_list.list, list) { if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) && !ismdev->going_away && (!ini->ism_peer_gid[0].gid || !smc_ism_cantalk(&ini->ism_peer_gid[0], ini->vlan_id, ismdev))) { ini->ism_dev[0] = ismdev; break; } } mutex_unlock(&smcd_dev_list.mutex); } /* PNET table analysis for a given sock: * determine ib_device and port belonging to used internal TCP socket * ethernet interface. */ void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(sk); if (!dst) goto out; if (!dst->dev) goto out_rel; smc_pnet_find_roce_by_pnetid(dst->dev, ini); out_rel: dst_release(dst); out: return; } void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini) { struct dst_entry *dst = sk_dst_get(sk); ini->ism_dev[0] = NULL; if (!dst) goto out; if (!dst->dev) goto out_rel; smc_pnet_find_ism_by_pnetid(dst->dev, ini); out_rel: dst_release(dst); out: return; } /* Lookup and apply a pnet table entry to the given ib device. */ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) { char *ib_name = smcibdev->ibdev->name; struct smc_pnettable *pnettable; struct smc_pnetentry *tmp_pe; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for init namespace */ sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && tmp_pe->ib_port == ib_port) { smc_pnet_apply_ib(smcibdev, ib_port, tmp_pe->pnet_name); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } /* Lookup and apply a pnet table entry to the given smcd device. */ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) { const char *ib_name = dev_name(smcddev->ops->get_dev(smcddev)); struct smc_pnettable *pnettable; struct smc_pnetentry *tmp_pe; struct smc_net *sn; int rc = -ENOENT; /* get pnettable for init namespace */ sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { smc_pnet_apply_smcd(smcddev, tmp_pe->pnet_name); rc = 0; break; } } mutex_unlock(&pnettable->lock); return rc; } |
26 26 8 44 43 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __NET_SCHED_PIE_H #define __NET_SCHED_PIE_H #include <linux/ktime.h> #include <linux/skbuff.h> #include <linux/types.h> #include <net/inet_ecn.h> #include <net/pkt_sched.h> #define MAX_PROB (U64_MAX >> BITS_PER_BYTE) #define DTIME_INVALID U64_MAX #define QUEUE_THRESHOLD 16384 #define DQCOUNT_INVALID -1 #define PIE_SCALE 8 /** * struct pie_params - contains pie parameters * @target: target delay in pschedtime * @tupdate: interval at which drop probability is calculated * @limit: total number of packets that can be in the queue * @alpha: parameter to control drop probability * @beta: parameter to control drop probability * @ecn: is ECN marking of packets enabled * @bytemode: is drop probability scaled based on pkt size * @dq_rate_estimator: is Little's law used for qdelay calculation */ struct pie_params { psched_time_t target; u32 tupdate; u32 limit; u32 alpha; u32 beta; u8 ecn; u8 bytemode; u8 dq_rate_estimator; }; /** * struct pie_vars - contains pie variables * @qdelay: current queue delay * @qdelay_old: queue delay in previous qdelay calculation * @burst_time: burst time allowance * @dq_tstamp: timestamp at which dq rate was last calculated * @prob: drop probability * @accu_prob: accumulated drop probability * @dq_count: number of bytes dequeued in a measurement cycle * @avg_dq_rate: calculated average dq rate * @backlog_old: queue backlog during previous qdelay calculation */ struct pie_vars { psched_time_t qdelay; psched_time_t qdelay_old; psched_time_t burst_time; psched_time_t dq_tstamp; u64 prob; u64 accu_prob; u64 dq_count; u32 avg_dq_rate; u32 backlog_old; }; /** * struct pie_stats - contains pie stats * @packets_in: total number of packets enqueued * @dropped: packets dropped due to pie action * @overlimit: packets dropped due to lack of space in queue * @ecn_mark: packets marked with ECN * @maxq: maximum queue size */ struct pie_stats { u32 packets_in; u32 dropped; u32 overlimit; u32 ecn_mark; u32 maxq; }; /** * struct pie_skb_cb - contains private skb vars * @enqueue_time: timestamp when the packet is enqueued * @mem_usage: size of the skb during enqueue */ struct pie_skb_cb { psched_time_t enqueue_time; u32 mem_usage; }; static inline void pie_params_init(struct pie_params *params) { params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */ params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */ params->limit = 1000; params->alpha = 2; params->beta = 20; params->ecn = false; params->bytemode = false; params->dq_rate_estimator = false; } static inline void pie_vars_init(struct pie_vars *vars) { vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); /* 150 ms */ vars->dq_tstamp = DTIME_INVALID; vars->accu_prob = 0; vars->dq_count = DQCOUNT_INVALID; vars->avg_dq_rate = 0; } static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb)); return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data; } static inline psched_time_t pie_get_enqueue_time(const struct sk_buff *skb) { return get_pie_cb(skb)->enqueue_time; } static inline void pie_set_enqueue_time(struct sk_buff *skb) { get_pie_cb(skb)->enqueue_time = psched_get_time(); } bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, struct pie_vars *vars, u32 backlog, u32 packet_size); void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params, struct pie_vars *vars, u32 backlog); void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, u32 backlog); #endif |
2 7 7 2 2 3 2 3 3 1 1 1 19 19 18 3 16 16 4 1 3 1 1 2 3 1 3 1 2 2 2 2 1 1 19 2 4 27 24 1 2 3 2 3 4 10 4 8 8 8 6 7 7 7 5 4 2 4 4 4 4 1 1 3 3 3 3 1 1 3 2 1 12 6 1 1 11 16 7 7 6 5 2 1 1 1 1 1 1 39 4 39 27 12 39 2 2 2 2 2 2 15 2 17 17 17 40 41 41 41 41 42 42 42 42 42 42 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/nfs/fs_context.c * * Copyright (C) 1992 Rick Sladkey * Conversion to new mount api Copyright (C) David Howells * * NFS mount handling. * * Split from fs/nfs/super.c by David Howells <dhowells@redhat.com> */ #include <linux/compat.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/nfs4_mount.h> #include <net/handshake.h> #include "nfs.h" #include "internal.h" #include "nfstrace.h" #define NFSDBG_FACILITY NFSDBG_MOUNT #if IS_ENABLED(CONFIG_NFS_V3) #define NFS_DEFAULT_VERSION 3 #else #define NFS_DEFAULT_VERSION 2 #endif #define NFS_MAX_CONNECTIONS 16 enum nfs_param { Opt_ac, Opt_acdirmax, Opt_acdirmin, Opt_acl, Opt_acregmax, Opt_acregmin, Opt_actimeo, Opt_addr, Opt_bg, Opt_bsize, Opt_clientaddr, Opt_cto, Opt_alignwrite, Opt_fg, Opt_fscache, Opt_fscache_flag, Opt_hard, Opt_intr, Opt_local_lock, Opt_lock, Opt_lookupcache, Opt_migration, Opt_minorversion, Opt_mountaddr, Opt_mounthost, Opt_mountport, Opt_mountproto, Opt_mountvers, Opt_namelen, Opt_nconnect, Opt_max_connect, Opt_port, Opt_posix, Opt_proto, Opt_rdirplus, Opt_rdma, Opt_resvport, Opt_retrans, Opt_retry, Opt_rsize, Opt_sec, Opt_sharecache, Opt_sloppy, Opt_soft, Opt_softerr, Opt_softreval, Opt_source, Opt_tcp, Opt_timeo, Opt_trunkdiscovery, Opt_udp, Opt_v, Opt_vers, Opt_wsize, Opt_write, Opt_xprtsec, }; enum { Opt_local_lock_all, Opt_local_lock_flock, Opt_local_lock_none, Opt_local_lock_posix, }; static const struct constant_table nfs_param_enums_local_lock[] = { { "all", Opt_local_lock_all }, { "flock", Opt_local_lock_flock }, { "posix", Opt_local_lock_posix }, { "none", Opt_local_lock_none }, {} }; enum { Opt_lookupcache_all, Opt_lookupcache_none, Opt_lookupcache_positive, }; static const struct constant_table nfs_param_enums_lookupcache[] = { { "all", Opt_lookupcache_all }, { "none", Opt_lookupcache_none }, { "pos", Opt_lookupcache_positive }, { "positive", Opt_lookupcache_positive }, {} }; enum { Opt_write_lazy, Opt_write_eager, Opt_write_wait, }; static const struct constant_table nfs_param_enums_write[] = { { "lazy", Opt_write_lazy }, { "eager", Opt_write_eager }, { "wait", Opt_write_wait }, {} }; static const struct fs_parameter_spec nfs_fs_parameters[] = { fsparam_flag_no("ac", Opt_ac), fsparam_u32 ("acdirmax", Opt_acdirmax), fsparam_u32 ("acdirmin", Opt_acdirmin), fsparam_flag_no("acl", Opt_acl), fsparam_u32 ("acregmax", Opt_acregmax), fsparam_u32 ("acregmin", Opt_acregmin), fsparam_u32 ("actimeo", Opt_actimeo), fsparam_string("addr", Opt_addr), fsparam_flag ("bg", Opt_bg), fsparam_u32 ("bsize", Opt_bsize), fsparam_string("clientaddr", Opt_clientaddr), fsparam_flag_no("cto", Opt_cto), fsparam_flag_no("alignwrite", Opt_alignwrite), fsparam_flag ("fg", Opt_fg), fsparam_flag_no("fsc", Opt_fscache_flag), fsparam_string("fsc", Opt_fscache), fsparam_flag ("hard", Opt_hard), __fsparam(NULL, "intr", Opt_intr, fs_param_neg_with_no|fs_param_deprecated, NULL), fsparam_enum ("local_lock", Opt_local_lock, nfs_param_enums_local_lock), fsparam_flag_no("lock", Opt_lock), fsparam_enum ("lookupcache", Opt_lookupcache, nfs_param_enums_lookupcache), fsparam_flag_no("migration", Opt_migration), fsparam_u32 ("minorversion", Opt_minorversion), fsparam_string("mountaddr", Opt_mountaddr), fsparam_string("mounthost", Opt_mounthost), fsparam_u32 ("mountport", Opt_mountport), fsparam_string("mountproto", Opt_mountproto), fsparam_u32 ("mountvers", Opt_mountvers), fsparam_u32 ("namlen", Opt_namelen), fsparam_u32 ("nconnect", Opt_nconnect), fsparam_u32 ("max_connect", Opt_max_connect), fsparam_string("nfsvers", Opt_vers), fsparam_u32 ("port", Opt_port), fsparam_flag_no("posix", Opt_posix), fsparam_string("proto", Opt_proto), fsparam_flag_no("rdirplus", Opt_rdirplus), fsparam_flag ("rdma", Opt_rdma), fsparam_flag_no("resvport", Opt_resvport), fsparam_u32 ("retrans", Opt_retrans), fsparam_string("retry", Opt_retry), fsparam_u32 ("rsize", Opt_rsize), fsparam_string("sec", Opt_sec), fsparam_flag_no("sharecache", Opt_sharecache), fsparam_flag ("sloppy", Opt_sloppy), fsparam_flag ("soft", Opt_soft), fsparam_flag ("softerr", Opt_softerr), fsparam_flag ("softreval", Opt_softreval), fsparam_string("source", Opt_source), fsparam_flag ("tcp", Opt_tcp), fsparam_u32 ("timeo", Opt_timeo), fsparam_flag_no("trunkdiscovery", Opt_trunkdiscovery), fsparam_flag ("udp", Opt_udp), fsparam_flag ("v2", Opt_v), fsparam_flag ("v3", Opt_v), fsparam_flag ("v4", Opt_v), fsparam_flag ("v4.0", Opt_v), fsparam_flag ("v4.1", Opt_v), fsparam_flag ("v4.2", Opt_v), fsparam_string("vers", Opt_vers), fsparam_enum ("write", Opt_write, nfs_param_enums_write), fsparam_u32 ("wsize", Opt_wsize), fsparam_string("xprtsec", Opt_xprtsec), {} }; enum { Opt_vers_2, Opt_vers_3, Opt_vers_4, Opt_vers_4_0, Opt_vers_4_1, Opt_vers_4_2, }; static const struct constant_table nfs_vers_tokens[] = { { "2", Opt_vers_2 }, { "3", Opt_vers_3 }, { "4", Opt_vers_4 }, { "4.0", Opt_vers_4_0 }, { "4.1", Opt_vers_4_1 }, { "4.2", Opt_vers_4_2 }, {} }; enum { Opt_xprt_rdma, Opt_xprt_rdma6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_udp, Opt_xprt_udp6, nr__Opt_xprt }; static const struct constant_table nfs_xprt_protocol_tokens[] = { { "rdma", Opt_xprt_rdma }, { "rdma6", Opt_xprt_rdma6 }, { "tcp", Opt_xprt_tcp }, { "tcp6", Opt_xprt_tcp6 }, { "udp", Opt_xprt_udp }, { "udp6", Opt_xprt_udp6 }, {} }; enum { Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p, Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp, Opt_sec_none, Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp, Opt_sec_sys, nr__Opt_sec }; static const struct constant_table nfs_secflavor_tokens[] = { { "krb5", Opt_sec_krb5 }, { "krb5i", Opt_sec_krb5i }, { "krb5p", Opt_sec_krb5p }, { "lkey", Opt_sec_lkey }, { "lkeyi", Opt_sec_lkeyi }, { "lkeyp", Opt_sec_lkeyp }, { "none", Opt_sec_none }, { "null", Opt_sec_none }, { "spkm3", Opt_sec_spkm }, { "spkm3i", Opt_sec_spkmi }, { "spkm3p", Opt_sec_spkmp }, { "sys", Opt_sec_sys }, {} }; enum { Opt_xprtsec_none, Opt_xprtsec_tls, Opt_xprtsec_mtls, nr__Opt_xprtsec }; static const struct constant_table nfs_xprtsec_policies[] = { { "none", Opt_xprtsec_none }, { "tls", Opt_xprtsec_tls }, { "mtls", Opt_xprtsec_mtls }, {} }; /* * Sanity-check a server address provided by the mount command. * * Address family must be initialized, and address must not be * the ANY address for that family. */ static int nfs_verify_server_address(struct sockaddr_storage *addr) { switch (addr->ss_family) { case AF_INET: { struct sockaddr_in *sa = (struct sockaddr_in *)addr; return sa->sin_addr.s_addr != htonl(INADDR_ANY); } case AF_INET6: { struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr; return !ipv6_addr_any(sa); } } return 0; } #ifdef CONFIG_NFS_DISABLE_UDP_SUPPORT static bool nfs_server_transport_udp_invalid(const struct nfs_fs_context *ctx) { return true; } #else static bool nfs_server_transport_udp_invalid(const struct nfs_fs_context *ctx) { if (ctx->version == 4) return true; return false; } #endif /* * Sanity check the NFS transport protocol. */ static int nfs_validate_transport_protocol(struct fs_context *fc, struct nfs_fs_context *ctx) { switch (ctx->nfs_server.protocol) { case XPRT_TRANSPORT_UDP: if (nfs_server_transport_udp_invalid(ctx)) goto out_invalid_transport_udp; break; case XPRT_TRANSPORT_TCP: case XPRT_TRANSPORT_RDMA: break; default: ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; } if (ctx->xprtsec.policy != RPC_XPRTSEC_NONE) switch (ctx->nfs_server.protocol) { case XPRT_TRANSPORT_TCP: ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP_TLS; break; default: goto out_invalid_xprtsec_policy; } return 0; out_invalid_transport_udp: return nfs_invalf(fc, "NFS: Unsupported transport protocol udp"); out_invalid_xprtsec_policy: return nfs_invalf(fc, "NFS: Transport does not support xprtsec"); } /* * For text based NFSv2/v3 mounts, the mount protocol transport default * settings should depend upon the specified NFS transport. */ static void nfs_set_mount_transport_protocol(struct nfs_fs_context *ctx) { if (ctx->mount_server.protocol == XPRT_TRANSPORT_UDP || ctx->mount_server.protocol == XPRT_TRANSPORT_TCP) return; switch (ctx->nfs_server.protocol) { case XPRT_TRANSPORT_UDP: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case XPRT_TRANSPORT_TCP: case XPRT_TRANSPORT_RDMA: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; } } /* * Add 'flavor' to 'auth_info' if not already present. * Returns true if 'flavor' ends up in the list, false otherwise */ static int nfs_auth_info_add(struct fs_context *fc, struct nfs_auth_info *auth_info, rpc_authflavor_t flavor) { unsigned int i; unsigned int max_flavor_len = ARRAY_SIZE(auth_info->flavors); /* make sure this flavor isn't already in the list */ for (i = 0; i < auth_info->flavor_len; i++) { if (flavor == auth_info->flavors[i]) return 0; } if (auth_info->flavor_len + 1 >= max_flavor_len) return nfs_invalf(fc, "NFS: too many sec= flavors"); auth_info->flavors[auth_info->flavor_len++] = flavor; return 0; } /* * Parse the value of the 'sec=' option. */ static int nfs_parse_security_flavors(struct fs_context *fc, struct fs_parameter *param) { struct nfs_fs_context *ctx = nfs_fc2context(fc); rpc_authflavor_t pseudoflavor; char *string = param->string, *p; int ret; trace_nfs_mount_assign(param->key, string); while ((p = strsep(&string, ":")) != NULL) { if (!*p) continue; switch (lookup_constant(nfs_secflavor_tokens, p, -1)) { case Opt_sec_none: pseudoflavor = RPC_AUTH_NULL; break; case Opt_sec_sys: pseudoflavor = RPC_AUTH_UNIX; break; case Opt_sec_krb5: pseudoflavor = RPC_AUTH_GSS_KRB5; break; case Opt_sec_krb5i: pseudoflavor = RPC_AUTH_GSS_KRB5I; break; case Opt_sec_krb5p: pseudoflavor = RPC_AUTH_GSS_KRB5P; break; case Opt_sec_lkey: pseudoflavor = RPC_AUTH_GSS_LKEY; break; case Opt_sec_lkeyi: pseudoflavor = RPC_AUTH_GSS_LKEYI; break; case Opt_sec_lkeyp: pseudoflavor = RPC_AUTH_GSS_LKEYP; break; case Opt_sec_spkm: pseudoflavor = RPC_AUTH_GSS_SPKM; break; case Opt_sec_spkmi: pseudoflavor = RPC_AUTH_GSS_SPKMI; break; case Opt_sec_spkmp: pseudoflavor = RPC_AUTH_GSS_SPKMP; break; default: return nfs_invalf(fc, "NFS: sec=%s option not recognized", p); } ret = nfs_auth_info_add(fc, &ctx->auth_info, pseudoflavor); if (ret < 0) return ret; } return 0; } static int nfs_parse_xprtsec_policy(struct fs_context *fc, struct fs_parameter *param) { struct nfs_fs_context *ctx = nfs_fc2context(fc); trace_nfs_mount_assign(param->key, param->string); switch (lookup_constant(nfs_xprtsec_policies, param->string, -1)) { case Opt_xprtsec_none: ctx->xprtsec.policy = RPC_XPRTSEC_NONE; break; case Opt_xprtsec_tls: ctx->xprtsec.policy = RPC_XPRTSEC_TLS_ANON; break; case Opt_xprtsec_mtls: ctx->xprtsec.policy = RPC_XPRTSEC_TLS_X509; break; default: return nfs_invalf(fc, "NFS: Unrecognized transport security policy"); } return 0; } static int nfs_parse_version_string(struct fs_context *fc, const char *string) { struct nfs_fs_context *ctx = nfs_fc2context(fc); ctx->flags &= ~NFS_MOUNT_VER3; switch (lookup_constant(nfs_vers_tokens, string, -1)) { case Opt_vers_2: ctx->version = 2; break; case Opt_vers_3: ctx->flags |= NFS_MOUNT_VER3; ctx->version = 3; break; case Opt_vers_4: /* Backward compatibility option. In future, * the mount program should always supply * a NFSv4 minor version number. */ ctx->version = 4; break; case Opt_vers_4_0: ctx->version = 4; ctx->minorversion = 0; break; case Opt_vers_4_1: ctx->version = 4; ctx->minorversion = 1; break; case Opt_vers_4_2: ctx->version = 4; ctx->minorversion = 2; break; default: return nfs_invalf(fc, "NFS: Unsupported NFS version"); } return 0; } /* * Parse a single mount parameter. */ static int nfs_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; struct nfs_fs_context *ctx = nfs_fc2context(fc); unsigned short protofamily, mountfamily; unsigned int len; int ret, opt; trace_nfs_mount_option(param); opt = fs_parse(fc, nfs_fs_parameters, param, &result); if (opt < 0) return (opt == -ENOPARAM && ctx->sloppy) ? 1 : opt; if (fc->security) ctx->has_sec_mnt_opts = 1; switch (opt) { case Opt_source: if (fc->source) return nfs_invalf(fc, "NFS: Multiple sources not supported"); fc->source = param->string; param->string = NULL; break; /* * boolean options: foo/nofoo */ case Opt_soft: ctx->flags |= NFS_MOUNT_SOFT; ctx->flags &= ~NFS_MOUNT_SOFTERR; break; case Opt_softerr: ctx->flags |= NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL; ctx->flags &= ~NFS_MOUNT_SOFT; break; case Opt_hard: ctx->flags &= ~(NFS_MOUNT_SOFT | NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL); break; case Opt_softreval: if (result.negated) ctx->flags &= ~NFS_MOUNT_SOFTREVAL; else ctx->flags |= NFS_MOUNT_SOFTREVAL; break; case Opt_posix: if (result.negated) ctx->flags &= ~NFS_MOUNT_POSIX; else ctx->flags |= NFS_MOUNT_POSIX; break; case Opt_cto: if (result.negated) ctx->flags |= NFS_MOUNT_NOCTO; else ctx->flags &= ~NFS_MOUNT_NOCTO; break; case Opt_trunkdiscovery: if (result.negated) ctx->flags &= ~NFS_MOUNT_TRUNK_DISCOVERY; else ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY; break; case Opt_alignwrite: if (result.negated) ctx->flags |= NFS_MOUNT_NO_ALIGNWRITE; else ctx->flags &= ~NFS_MOUNT_NO_ALIGNWRITE; break; case Opt_ac: if (result.negated) ctx->flags |= NFS_MOUNT_NOAC; else ctx->flags &= ~NFS_MOUNT_NOAC; break; case Opt_lock: if (result.negated) { ctx->lock_status = NFS_LOCK_NOLOCK; ctx->flags |= NFS_MOUNT_NONLM; ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); } else { ctx->lock_status = NFS_LOCK_LOCK; ctx->flags &= ~NFS_MOUNT_NONLM; ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); } break; case Opt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_tcp: case Opt_rdma: ctx->flags |= NFS_MOUNT_TCP; /* for side protocols */ ret = xprt_find_transport_ident(param->key); if (ret < 0) goto out_bad_transport; ctx->nfs_server.protocol = ret; break; case Opt_acl: if (result.negated) ctx->flags |= NFS_MOUNT_NOACL; else ctx->flags &= ~NFS_MOUNT_NOACL; break; case Opt_rdirplus: if (result.negated) ctx->flags |= NFS_MOUNT_NORDIRPLUS; else ctx->flags &= ~NFS_MOUNT_NORDIRPLUS; break; case Opt_sharecache: if (result.negated) ctx->flags |= NFS_MOUNT_UNSHARED; else ctx->flags &= ~NFS_MOUNT_UNSHARED; break; case Opt_resvport: if (result.negated) ctx->flags |= NFS_MOUNT_NORESVPORT; else ctx->flags &= ~NFS_MOUNT_NORESVPORT; break; case Opt_fscache_flag: if (result.negated) ctx->options &= ~NFS_OPTION_FSCACHE; else ctx->options |= NFS_OPTION_FSCACHE; kfree(ctx->fscache_uniq); ctx->fscache_uniq = NULL; break; case Opt_fscache: trace_nfs_mount_assign(param->key, param->string); ctx->options |= NFS_OPTION_FSCACHE; kfree(ctx->fscache_uniq); ctx->fscache_uniq = param->string; param->string = NULL; break; case Opt_migration: if (result.negated) ctx->options &= ~NFS_OPTION_MIGRATION; else ctx->options |= NFS_OPTION_MIGRATION; break; /* * options that take numeric values */ case Opt_port: if (result.uint_32 > USHRT_MAX) goto out_of_bounds; ctx->nfs_server.port = result.uint_32; break; case Opt_rsize: ctx->rsize = result.uint_32; break; case Opt_wsize: ctx->wsize = result.uint_32; break; case Opt_bsize: ctx->bsize = result.uint_32; break; case Opt_timeo: if (result.uint_32 < 1 || result.uint_32 > INT_MAX) goto out_of_bounds; ctx->timeo = result.uint_32; break; case Opt_retrans: if (result.uint_32 > INT_MAX) goto out_of_bounds; ctx->retrans = result.uint_32; break; case Opt_acregmin: ctx->acregmin = result.uint_32; break; case Opt_acregmax: ctx->acregmax = result.uint_32; break; case Opt_acdirmin: ctx->acdirmin = result.uint_32; break; case Opt_acdirmax: ctx->acdirmax = result.uint_32; break; case Opt_actimeo: ctx->acregmin = result.uint_32; ctx->acregmax = result.uint_32; ctx->acdirmin = result.uint_32; ctx->acdirmax = result.uint_32; break; case Opt_namelen: ctx->namlen = result.uint_32; break; case Opt_mountport: if (result.uint_32 > USHRT_MAX) goto out_of_bounds; ctx->mount_server.port = result.uint_32; break; case Opt_mountvers: if (result.uint_32 < NFS_MNT_VERSION || result.uint_32 > NFS_MNT3_VERSION) goto out_of_bounds; ctx->mount_server.version = result.uint_32; break; case Opt_minorversion: if (result.uint_32 > NFS4_MAX_MINOR_VERSION) goto out_of_bounds; ctx->minorversion = result.uint_32; break; /* * options that take text values */ case Opt_v: ret = nfs_parse_version_string(fc, param->key + 1); if (ret < 0) return ret; break; case Opt_vers: if (!param->string) goto out_invalid_value; trace_nfs_mount_assign(param->key, param->string); ret = nfs_parse_version_string(fc, param->string); if (ret < 0) return ret; break; case Opt_sec: ret = nfs_parse_security_flavors(fc, param); if (ret < 0) return ret; break; case Opt_xprtsec: ret = nfs_parse_xprtsec_policy(fc, param); if (ret < 0) return ret; break; case Opt_proto: if (!param->string) goto out_invalid_value; trace_nfs_mount_assign(param->key, param->string); protofamily = AF_INET; switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: protofamily = AF_INET6; fallthrough; case Opt_xprt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: protofamily = AF_INET6; fallthrough; case Opt_xprt_tcp: ctx->flags |= NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma6: protofamily = AF_INET6; fallthrough; case Opt_xprt_rdma: /* vector side protocols to TCP */ ctx->flags |= NFS_MOUNT_TCP; ret = xprt_find_transport_ident(param->string); if (ret < 0) goto out_bad_transport; ctx->nfs_server.protocol = ret; break; default: goto out_bad_transport; } ctx->protofamily = protofamily; break; case Opt_mountproto: if (!param->string) goto out_invalid_value; trace_nfs_mount_assign(param->key, param->string); mountfamily = AF_INET; switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: mountfamily = AF_INET6; fallthrough; case Opt_xprt_udp: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: mountfamily = AF_INET6; fallthrough; case Opt_xprt_tcp: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma: /* not used for side protocols */ default: goto out_bad_transport; } ctx->mountfamily = mountfamily; break; case Opt_addr: trace_nfs_mount_assign(param->key, param->string); len = rpc_pton(fc->net_ns, param->string, param->size, &ctx->nfs_server.address, sizeof(ctx->nfs_server._address)); if (len == 0) goto out_invalid_address; ctx->nfs_server.addrlen = len; break; case Opt_clientaddr: trace_nfs_mount_assign(param->key, param->string); kfree(ctx->client_address); ctx->client_address = param->string; param->string = NULL; break; case Opt_mounthost: trace_nfs_mount_assign(param->key, param->string); kfree(ctx->mount_server.hostname); ctx->mount_server.hostname = param->string; param->string = NULL; break; case Opt_mountaddr: trace_nfs_mount_assign(param->key, param->string); len = rpc_pton(fc->net_ns, param->string, param->size, &ctx->mount_server.address, sizeof(ctx->mount_server._address)); if (len == 0) goto out_invalid_address; ctx->mount_server.addrlen = len; break; case Opt_nconnect: trace_nfs_mount_assign(param->key, param->string); if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_CONNECTIONS) goto out_of_bounds; ctx->nfs_server.nconnect = result.uint_32; break; case Opt_max_connect: trace_nfs_mount_assign(param->key, param->string); if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS) goto out_of_bounds; ctx->nfs_server.max_connect = result.uint_32; break; case Opt_lookupcache: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_lookupcache_all: ctx->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE); break; case Opt_lookupcache_positive: ctx->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE; ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG; break; case Opt_lookupcache_none: ctx->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE; break; default: goto out_invalid_value; } break; case Opt_local_lock: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_local_lock_all: ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); break; case Opt_local_lock_flock: ctx->flags |= NFS_MOUNT_LOCAL_FLOCK; break; case Opt_local_lock_posix: ctx->flags |= NFS_MOUNT_LOCAL_FCNTL; break; case Opt_local_lock_none: ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); break; default: goto out_invalid_value; } break; case Opt_write: trace_nfs_mount_assign(param->key, param->string); switch (result.uint_32) { case Opt_write_lazy: ctx->flags &= ~(NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT); break; case Opt_write_eager: ctx->flags |= NFS_MOUNT_WRITE_EAGER; ctx->flags &= ~NFS_MOUNT_WRITE_WAIT; break; case Opt_write_wait: ctx->flags |= NFS_MOUNT_WRITE_EAGER | NFS_MOUNT_WRITE_WAIT; break; default: goto out_invalid_value; } break; /* * Special options */ case Opt_sloppy: ctx->sloppy = true; break; } return 0; out_invalid_value: return nfs_invalf(fc, "NFS: Bad mount option value specified"); out_invalid_address: return nfs_invalf(fc, "NFS: Bad IP address specified"); out_of_bounds: return nfs_invalf(fc, "NFS: Value for '%s' out of range", param->key); out_bad_transport: return nfs_invalf(fc, "NFS: Unrecognized transport protocol"); } /* * Split fc->source into "hostname:export_path". * * The leftmost colon demarks the split between the server's hostname * and the export path. If the hostname starts with a left square * bracket, then it may contain colons. * * Note: caller frees hostname and export path, even on error. */ static int nfs_parse_source(struct fs_context *fc, size_t maxnamlen, size_t maxpathlen) { struct nfs_fs_context *ctx = nfs_fc2context(fc); const char *dev_name = fc->source; size_t len; const char *end; if (unlikely(!dev_name || !*dev_name)) return -EINVAL; /* Is the host name protected with square brakcets? */ if (*dev_name == '[') { end = strchr(++dev_name, ']'); if (end == NULL || end[1] != ':') goto out_bad_devname; len = end - dev_name; end++; } else { const char *comma; end = strchr(dev_name, ':'); if (end == NULL) goto out_bad_devname; len = end - dev_name; /* kill possible hostname list: not supported */ comma = memchr(dev_name, ',', len); if (comma) len = comma - dev_name; } if (len > maxnamlen) goto out_hostname; kfree(ctx->nfs_server.hostname); /* N.B. caller will free nfs_server.hostname in all cases */ ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL); if (!ctx->nfs_server.hostname) goto out_nomem; len = strlen(++end); if (len > maxpathlen) goto out_path; ctx->nfs_server.export_path = kmemdup_nul(end, len, GFP_KERNEL); if (!ctx->nfs_server.export_path) goto out_nomem; trace_nfs_mount_path(ctx->nfs_server.export_path); return 0; out_bad_devname: return nfs_invalf(fc, "NFS: device name not in host:path format"); out_nomem: nfs_errorf(fc, "NFS: not enough memory to parse device name"); return -ENOMEM; out_hostname: nfs_errorf(fc, "NFS: server hostname too long"); return -ENAMETOOLONG; out_path: nfs_errorf(fc, "NFS: export pathname too long"); return -ENAMETOOLONG; } static inline bool is_remount_fc(struct fs_context *fc) { return fc->root != NULL; } /* * Parse monolithic NFS2/NFS3 mount data * - fills in the mount root filehandle * * For option strings, user space handles the following behaviors: * * + DNS: mapping server host name to IP address ("addr=" option) * * + failure mode: how to behave if a mount request can't be handled * immediately ("fg/bg" option) * * + retry: how often to retry a mount request ("retry=" option) * * + breaking back: trying proto=udp after proto=tcp, v2 after v3, * mountproto=tcp after mountproto=udp, and so on */ static int nfs23_parse_monolithic(struct fs_context *fc, struct nfs_mount_data *data) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct nfs_fh *mntfh = ctx->mntfh; struct sockaddr_storage *sap = &ctx->nfs_server._address; int extra_flags = NFS_MOUNT_LEGACY_INTERFACE; int ret; if (data == NULL) goto out_no_data; ctx->version = NFS_DEFAULT_VERSION; switch (data->version) { case 1: data->namlen = 0; fallthrough; case 2: data->bsize = 0; fallthrough; case 3: if (data->flags & NFS_MOUNT_VER3) goto out_no_v3; data->root.size = NFS2_FHSIZE; memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); /* Turn off security negotiation */ extra_flags |= NFS_MOUNT_SECFLAVOUR; fallthrough; case 4: if (data->flags & NFS_MOUNT_SECFLAVOUR) goto out_no_sec; fallthrough; case 5: memset(data->context, 0, sizeof(data->context)); fallthrough; case 6: if (data->flags & NFS_MOUNT_VER3) { if (data->root.size > NFS3_FHSIZE || data->root.size == 0) goto out_invalid_fh; mntfh->size = data->root.size; ctx->version = 3; } else { mntfh->size = NFS2_FHSIZE; ctx->version = 2; } memcpy(mntfh->data, data->root.data, mntfh->size); if (mntfh->size < sizeof(mntfh->data)) memset(mntfh->data + mntfh->size, 0, sizeof(mntfh->data) - mntfh->size); /* * for proto == XPRT_TRANSPORT_UDP, which is what uses * to_exponential, implying shift: limit the shift value * to BITS_PER_LONG (majortimeo is unsigned long) */ if (!(data->flags & NFS_MOUNT_TCP)) /* this will be UDP */ if (data->retrans >= 64) /* shift value is too large */ goto out_invalid_data; /* * Translate to nfs_fs_context, which nfs_fill_super * can deal with. */ ctx->flags = data->flags & NFS_MOUNT_FLAGMASK; ctx->flags |= extra_flags; ctx->rsize = data->rsize; ctx->wsize = data->wsize; ctx->timeo = data->timeo; ctx->retrans = data->retrans; ctx->acregmin = data->acregmin; ctx->acregmax = data->acregmax; ctx->acdirmin = data->acdirmin; ctx->acdirmax = data->acdirmax; ctx->need_mount = false; if (!is_remount_fc(fc)) { memcpy(sap, &data->addr, sizeof(data->addr)); ctx->nfs_server.addrlen = sizeof(data->addr); ctx->nfs_server.port = ntohs(data->addr.sin_port); } if (sap->ss_family != AF_INET || !nfs_verify_server_address(sap)) goto out_no_address; if (!(data->flags & NFS_MOUNT_TCP)) ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; /* N.B. caller will free nfs_server.hostname in all cases */ ctx->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL); if (!ctx->nfs_server.hostname) goto out_nomem; ctx->namlen = data->namlen; ctx->bsize = data->bsize; if (data->flags & NFS_MOUNT_SECFLAVOUR) ctx->selected_flavor = data->pseudoflavor; else ctx->selected_flavor = RPC_AUTH_UNIX; if (!(data->flags & NFS_MOUNT_NONLM)) ctx->flags &= ~(NFS_MOUNT_LOCAL_FLOCK| NFS_MOUNT_LOCAL_FCNTL); else ctx->flags |= (NFS_MOUNT_LOCAL_FLOCK| NFS_MOUNT_LOCAL_FCNTL); /* * The legacy version 6 binary mount data from userspace has a * field used only to transport selinux information into the * kernel. To continue to support that functionality we * have a touch of selinux knowledge here in the NFS code. The * userspace code converted context=blah to just blah so we are * converting back to the full string selinux understands. */ if (data->context[0]){ #ifdef CONFIG_SECURITY_SELINUX int ret; data->context[NFS_MAX_CONTEXT_LEN] = '\0'; ret = vfs_parse_fs_string(fc, "context", data->context, strlen(data->context)); if (ret < 0) return ret; #else return -EINVAL; #endif } break; default: goto generic; } ret = nfs_validate_transport_protocol(fc, ctx); if (ret) return ret; ctx->skip_reconfig_option_check = true; return 0; generic: return generic_parse_monolithic(fc, data); out_no_data: if (is_remount_fc(fc)) { ctx->skip_reconfig_option_check = true; return 0; } return nfs_invalf(fc, "NFS: mount program didn't pass any mount data"); out_no_v3: return nfs_invalf(fc, "NFS: nfs_mount_data version does not support v3"); out_no_sec: return nfs_invalf(fc, "NFS: nfs_mount_data version supports only AUTH_SYS"); out_nomem: return -ENOMEM; out_no_address: return nfs_invalf(fc, "NFS: mount program didn't pass remote address"); out_invalid_fh: return nfs_invalf(fc, "NFS: invalid root filehandle"); out_invalid_data: return nfs_invalf(fc, "NFS: invalid binary mount data"); } #if IS_ENABLED(CONFIG_NFS_V4) struct compat_nfs_string { compat_uint_t len; compat_uptr_t data; }; static inline void compat_nfs_string(struct nfs_string *dst, struct compat_nfs_string *src) { dst->data = compat_ptr(src->data); dst->len = src->len; } struct compat_nfs4_mount_data_v1 { compat_int_t version; compat_int_t flags; compat_int_t rsize; compat_int_t wsize; compat_int_t timeo; compat_int_t retrans; compat_int_t acregmin; compat_int_t acregmax; compat_int_t acdirmin; compat_int_t acdirmax; struct compat_nfs_string client_addr; struct compat_nfs_string mnt_path; struct compat_nfs_string hostname; compat_uint_t host_addrlen; compat_uptr_t host_addr; compat_int_t proto; compat_int_t auth_flavourlen; compat_uptr_t auth_flavours; }; static void nfs4_compat_mount_data_conv(struct nfs4_mount_data *data) { struct compat_nfs4_mount_data_v1 *compat = (struct compat_nfs4_mount_data_v1 *)data; /* copy the fields backwards */ data->auth_flavours = compat_ptr(compat->auth_flavours); data->auth_flavourlen = compat->auth_flavourlen; data->proto = compat->proto; data->host_addr = compat_ptr(compat->host_addr); data->host_addrlen = compat->host_addrlen; compat_nfs_string(&data->hostname, &compat->hostname); compat_nfs_string(&data->mnt_path, &compat->mnt_path); compat_nfs_string(&data->client_addr, &compat->client_addr); data->acdirmax = compat->acdirmax; data->acdirmin = compat->acdirmin; data->acregmax = compat->acregmax; data->acregmin = compat->acregmin; data->retrans = compat->retrans; data->timeo = compat->timeo; data->wsize = compat->wsize; data->rsize = compat->rsize; data->flags = compat->flags; data->version = compat->version; } /* * Validate NFSv4 mount options */ static int nfs4_parse_monolithic(struct fs_context *fc, struct nfs4_mount_data *data) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct sockaddr_storage *sap = &ctx->nfs_server._address; int ret; char *c; if (!data) { if (is_remount_fc(fc)) goto done; return nfs_invalf(fc, "NFS4: mount program didn't pass any mount data"); } ctx->version = 4; if (data->version != 1) return generic_parse_monolithic(fc, data); if (in_compat_syscall()) nfs4_compat_mount_data_conv(data); if (data->host_addrlen > sizeof(ctx->nfs_server.address)) goto out_no_address; if (data->host_addrlen == 0) goto out_no_address; ctx->nfs_server.addrlen = data->host_addrlen; if (copy_from_user(sap, data->host_addr, data->host_addrlen)) return -EFAULT; if (!nfs_verify_server_address(sap)) goto out_no_address; ctx->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); if (data->auth_flavourlen) { rpc_authflavor_t pseudoflavor; if (data->auth_flavourlen > 1) goto out_inval_auth; if (copy_from_user(&pseudoflavor, data->auth_flavours, sizeof(pseudoflavor))) return -EFAULT; ctx->selected_flavor = pseudoflavor; } else { ctx->selected_flavor = RPC_AUTH_UNIX; } c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN); if (IS_ERR(c)) return PTR_ERR(c); ctx->nfs_server.hostname = c; c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN); if (IS_ERR(c)) return PTR_ERR(c); ctx->nfs_server.export_path = c; trace_nfs_mount_path(c); c = strndup_user(data->client_addr.data, 16); if (IS_ERR(c)) return PTR_ERR(c); ctx->client_address = c; /* * Translate to nfs_fs_context, which nfs_fill_super * can deal with. */ ctx->flags = data->flags & NFS4_MOUNT_FLAGMASK; ctx->rsize = data->rsize; ctx->wsize = data->wsize; ctx->timeo = data->timeo; ctx->retrans = data->retrans; ctx->acregmin = data->acregmin; ctx->acregmax = data->acregmax; ctx->acdirmin = data->acdirmin; ctx->acdirmax = data->acdirmax; ctx->nfs_server.protocol = data->proto; ret = nfs_validate_transport_protocol(fc, ctx); if (ret) return ret; done: ctx->skip_reconfig_option_check = true; return 0; out_inval_auth: return nfs_invalf(fc, "NFS4: Invalid number of RPC auth flavours %d", data->auth_flavourlen); out_no_address: return nfs_invalf(fc, "NFS4: mount program didn't pass remote address"); } #endif /* * Parse a monolithic block of data from sys_mount(). */ static int nfs_fs_context_parse_monolithic(struct fs_context *fc, void *data) { if (fc->fs_type == &nfs_fs_type) return nfs23_parse_monolithic(fc, data); #if IS_ENABLED(CONFIG_NFS_V4) if (fc->fs_type == &nfs4_fs_type) return nfs4_parse_monolithic(fc, data); #endif return nfs_invalf(fc, "NFS: Unsupported monolithic data version"); } /* * Validate the preparsed information in the config. */ static int nfs_fs_context_validate(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); struct nfs_subversion *nfs_mod; struct sockaddr_storage *sap = &ctx->nfs_server._address; int max_namelen = PAGE_SIZE; int max_pathlen = NFS_MAXPATHLEN; int port = 0; int ret; if (!fc->source) goto out_no_device_name; /* Check for sanity first. */ if (ctx->minorversion && ctx->version != 4) goto out_minorversion_mismatch; if (ctx->options & NFS_OPTION_MIGRATION && (ctx->version != 4 || ctx->minorversion != 0)) goto out_migration_misuse; /* Verify that any proto=/mountproto= options match the address * families in the addr=/mountaddr= options. */ if (ctx->protofamily != AF_UNSPEC && ctx->protofamily != ctx->nfs_server.address.sa_family) goto out_proto_mismatch; if (ctx->mountfamily != AF_UNSPEC) { if (ctx->mount_server.addrlen) { if (ctx->mountfamily != ctx->mount_server.address.sa_family) goto out_mountproto_mismatch; } else { if (ctx->mountfamily != ctx->nfs_server.address.sa_family) goto out_mountproto_mismatch; } } if (!nfs_verify_server_address(sap)) goto out_no_address; ret = nfs_validate_transport_protocol(fc, ctx); if (ret) return ret; if (ctx->version == 4) { if (IS_ENABLED(CONFIG_NFS_V4)) { if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA) port = NFS_RDMA_PORT; else port = NFS_PORT; max_namelen = NFS4_MAXNAMLEN; max_pathlen = NFS4_MAXPATHLEN; ctx->flags &= ~(NFS_MOUNT_NONLM | NFS_MOUNT_NOACL | NFS_MOUNT_VER3 | NFS_MOUNT_LOCAL_FLOCK | NFS_MOUNT_LOCAL_FCNTL); } else { goto out_v4_not_compiled; } } else { nfs_set_mount_transport_protocol(ctx); if (ctx->nfs_server.protocol == XPRT_TRANSPORT_RDMA) port = NFS_RDMA_PORT; } nfs_set_port(sap, &ctx->nfs_server.port, port); ret = nfs_parse_source(fc, max_namelen, max_pathlen); if (ret < 0) return ret; /* Load the NFS protocol module if we haven't done so yet */ if (!ctx->nfs_mod) { nfs_mod = find_nfs_version(ctx->version); if (IS_ERR(nfs_mod)) { ret = PTR_ERR(nfs_mod); goto out_version_unavailable; } ctx->nfs_mod = nfs_mod; } /* Ensure the filesystem context has the correct fs_type */ if (fc->fs_type != ctx->nfs_mod->nfs_fs) { module_put(fc->fs_type->owner); __module_get(ctx->nfs_mod->nfs_fs->owner); fc->fs_type = ctx->nfs_mod->nfs_fs; } return 0; out_no_device_name: return nfs_invalf(fc, "NFS: Device name not specified"); out_v4_not_compiled: nfs_errorf(fc, "NFS: NFSv4 is not compiled into kernel"); return -EPROTONOSUPPORT; out_no_address: return nfs_invalf(fc, "NFS: mount program didn't pass remote address"); out_mountproto_mismatch: return nfs_invalf(fc, "NFS: Mount server address does not match mountproto= option"); out_proto_mismatch: return nfs_invalf(fc, "NFS: Server address does not match proto= option"); out_minorversion_mismatch: return nfs_invalf(fc, "NFS: Mount option vers=%u does not support minorversion=%u", ctx->version, ctx->minorversion); out_migration_misuse: return nfs_invalf(fc, "NFS: 'Migration' not supported for this NFS version"); out_version_unavailable: nfs_errorf(fc, "NFS: Version unavailable"); return ret; } /* * Create an NFS superblock by the appropriate method. */ static int nfs_get_tree(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); int err = nfs_fs_context_validate(fc); if (err) return err; if (!ctx->internal) return ctx->nfs_mod->rpc_ops->try_get_tree(fc); else return nfs_get_tree_common(fc); } /* * Handle duplication of a configuration. The caller copied *src into *sc, but * it can't deal with resource pointers in the filesystem context, so we have * to do that. We need to clear pointers, copy data or get extra refs as * appropriate. */ static int nfs_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { struct nfs_fs_context *src = nfs_fc2context(src_fc), *ctx; ctx = kmemdup(src, sizeof(struct nfs_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->mntfh = nfs_alloc_fhandle(); if (!ctx->mntfh) { kfree(ctx); return -ENOMEM; } nfs_copy_fh(ctx->mntfh, src->mntfh); get_nfs_version(ctx->nfs_mod); ctx->client_address = NULL; ctx->mount_server.hostname = NULL; ctx->nfs_server.export_path = NULL; ctx->nfs_server.hostname = NULL; ctx->fscache_uniq = NULL; ctx->clone_data.fattr = NULL; fc->fs_private = ctx; return 0; } static void nfs_fs_context_free(struct fs_context *fc) { struct nfs_fs_context *ctx = nfs_fc2context(fc); if (ctx) { if (ctx->server) nfs_free_server(ctx->server); if (ctx->nfs_mod) put_nfs_version(ctx->nfs_mod); kfree(ctx->client_address); kfree(ctx->mount_server.hostname); kfree(ctx->nfs_server.export_path); kfree(ctx->nfs_server.hostname); kfree(ctx->fscache_uniq); nfs_free_fhandle(ctx->mntfh); nfs_free_fattr(ctx->clone_data.fattr); kfree(ctx); } } static const struct fs_context_operations nfs_fs_context_ops = { .free = nfs_fs_context_free, .dup = nfs_fs_context_dup, .parse_param = nfs_fs_context_parse_param, .parse_monolithic = nfs_fs_context_parse_monolithic, .get_tree = nfs_get_tree, .reconfigure = nfs_reconfigure, }; /* * Prepare superblock configuration. We use the namespaces attached to the * context. This may be the current process's namespaces, or it may be a * container's namespaces. */ static int nfs_init_fs_context(struct fs_context *fc) { struct nfs_fs_context *ctx; ctx = kzalloc(sizeof(struct nfs_fs_context), GFP_KERNEL); if (unlikely(!ctx)) return -ENOMEM; ctx->mntfh = nfs_alloc_fhandle(); if (unlikely(!ctx->mntfh)) { kfree(ctx); return -ENOMEM; } ctx->protofamily = AF_UNSPEC; ctx->mountfamily = AF_UNSPEC; ctx->mount_server.port = NFS_UNSPEC_PORT; if (fc->root) { /* reconfigure, start with the current config */ struct nfs_server *nfss = fc->root->d_sb->s_fs_info; struct net *net = nfss->nfs_client->cl_net; ctx->flags = nfss->flags; ctx->rsize = nfss->rsize; ctx->wsize = nfss->wsize; ctx->retrans = nfss->client->cl_timeout->to_retries; ctx->selected_flavor = nfss->client->cl_auth->au_flavor; ctx->acregmin = nfss->acregmin / HZ; ctx->acregmax = nfss->acregmax / HZ; ctx->acdirmin = nfss->acdirmin / HZ; ctx->acdirmax = nfss->acdirmax / HZ; ctx->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ; ctx->nfs_server.port = nfss->port; ctx->nfs_server.addrlen = nfss->nfs_client->cl_addrlen; ctx->version = nfss->nfs_client->rpc_ops->version; ctx->minorversion = nfss->nfs_client->cl_minorversion; memcpy(&ctx->nfs_server._address, &nfss->nfs_client->cl_addr, ctx->nfs_server.addrlen); if (fc->net_ns != net) { put_net(fc->net_ns); fc->net_ns = get_net(net); } ctx->nfs_mod = nfss->nfs_client->cl_nfs_mod; get_nfs_version(ctx->nfs_mod); } else { /* defaults */ ctx->timeo = NFS_UNSPEC_TIMEO; ctx->retrans = NFS_UNSPEC_RETRANS; ctx->acregmin = NFS_DEF_ACREGMIN; ctx->acregmax = NFS_DEF_ACREGMAX; ctx->acdirmin = NFS_DEF_ACDIRMIN; ctx->acdirmax = NFS_DEF_ACDIRMAX; ctx->nfs_server.port = NFS_UNSPEC_PORT; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; ctx->selected_flavor = RPC_AUTH_MAXFLAVOR; ctx->minorversion = 0; ctx->need_mount = true; ctx->xprtsec.policy = RPC_XPRTSEC_NONE; ctx->xprtsec.cert_serial = TLS_NO_CERT; ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY; fc->s_iflags |= SB_I_STABLE_WRITES; } fc->fs_private = ctx; fc->ops = &nfs_fs_context_ops; return 0; } struct file_system_type nfs_fs_type = { .owner = THIS_MODULE, .name = "nfs", .init_fs_context = nfs_init_fs_context, .parameters = nfs_fs_parameters, .kill_sb = nfs_kill_super, .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA, }; MODULE_ALIAS_FS("nfs"); EXPORT_SYMBOL_GPL(nfs_fs_type); #if IS_ENABLED(CONFIG_NFS_V4) struct file_system_type nfs4_fs_type = { .owner = THIS_MODULE, .name = "nfs4", .init_fs_context = nfs_init_fs_context, .parameters = nfs_fs_parameters, .kill_sb = nfs_kill_super, .fs_flags = FS_RENAME_DOES_D_MOVE|FS_BINARY_MOUNTDATA, }; MODULE_ALIAS_FS("nfs4"); MODULE_ALIAS("nfs4"); EXPORT_SYMBOL_GPL(nfs4_fs_type); #endif /* CONFIG_NFS_V4 */ |
8 8 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 | // SPDX-License-Identifier: GPL-2.0-only /* Kernel module to match connection tracking information. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/module.h> #include <linux/skbuff.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_state.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module"); MODULE_ALIAS("ipt_state"); MODULE_ALIAS("ip6t_state"); static bool state_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_state_info *sinfo = par->matchinfo; enum ip_conntrack_info ctinfo; unsigned int statebit; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (ct) statebit = XT_STATE_BIT(ctinfo); else if (ctinfo == IP_CT_UNTRACKED) statebit = XT_STATE_UNTRACKED; else statebit = XT_STATE_INVALID; return (sinfo->statemask & statebit); } static int state_mt_check(const struct xt_mtchk_param *par) { int ret; ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) pr_info_ratelimited("cannot load conntrack support for proto=%u\n", par->family); return ret; } static void state_mt_destroy(const struct xt_mtdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static struct xt_match state_mt_reg __read_mostly = { .name = "state", .family = NFPROTO_UNSPEC, .checkentry = state_mt_check, .match = state_mt, .destroy = state_mt_destroy, .matchsize = sizeof(struct xt_state_info), .me = THIS_MODULE, }; static int __init state_mt_init(void) { return xt_register_match(&state_mt_reg); } static void __exit state_mt_exit(void) { xt_unregister_match(&state_mt_reg); } module_init(state_mt_init); module_exit(state_mt_exit); |
30 30 29 30 30 30 26 4 29 24 24 30 5 2 3 1 2 2 1 30 25 29 7 23 1 2 3 9 4 1 27 22 30 30 5 29 26 25 30 30 30 32 32 32 32 32 32 32 31 1 32 29 3 32 1 1 1 1 31 27 30 30 31 31 30 4 4 4 1 1 1 1 1 1 27 26 26 24 7 7 7 7 7 7 7 7 7 7 7 7 7 7 2 7 7 27 31 30 31 31 14 31 31 31 14 8 8 4 4 4 4 4 4 4 4 4 4 4 29 31 31 31 31 31 31 31 30 31 31 31 31 5 4 4 4 4 4 26 27 31 31 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 | // SPDX-License-Identifier: GPL-2.0-only /* * Scanning implementation * * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018-2024 Intel Corporation */ #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/sch_generic.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/random.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "mesh.h" #define IEEE80211_PROBE_DELAY (HZ / 33) #define IEEE80211_CHANNEL_TIME (HZ / 33) #define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 9) void ieee80211_rx_bss_put(struct ieee80211_local *local, struct ieee80211_bss *bss) { if (!bss) return; cfg80211_put_bss(local->hw.wiphy, container_of((void *)bss, struct cfg80211_bss, priv)); } static bool is_uapsd_supported(struct ieee802_11_elems *elems) { u8 qos_info; if (elems->wmm_info && elems->wmm_info_len == 7 && elems->wmm_info[5] == 1) qos_info = elems->wmm_info[6]; else if (elems->wmm_param && elems->wmm_param_len == 24 && elems->wmm_param[5] == 1) qos_info = elems->wmm_param[6]; else /* no valid wmm information or parameter element found */ return false; return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD; } struct inform_bss_update_data { struct ieee80211_rx_status *rx_status; bool beacon; }; void ieee80211_inform_bss(struct wiphy *wiphy, struct cfg80211_bss *cbss, const struct cfg80211_bss_ies *ies, void *data) { struct ieee80211_local *local = wiphy_priv(wiphy); struct inform_bss_update_data *update_data = data; struct ieee80211_bss *bss = (void *)cbss->priv; struct ieee80211_rx_status *rx_status; struct ieee802_11_elems *elems; int clen, srlen; /* This happens while joining an IBSS */ if (!update_data) return; elems = ieee802_11_parse_elems(ies->data, ies->len, false, NULL); if (!elems) return; rx_status = update_data->rx_status; if (update_data->beacon) bss->device_ts_beacon = rx_status->device_timestamp; else bss->device_ts_presp = rx_status->device_timestamp; if (elems->parse_error) { if (update_data->beacon) bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON; else bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP; } else { if (update_data->beacon) bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON; else bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP; } /* save the ERP value so that it is available at association time */ if (elems->erp_info && (!elems->parse_error || !(bss->valid_data & IEEE80211_BSS_VALID_ERP))) { bss->erp_value = elems->erp_info[0]; bss->has_erp_value = true; if (!elems->parse_error) bss->valid_data |= IEEE80211_BSS_VALID_ERP; } /* replace old supported rates if we get new values */ if (!elems->parse_error || !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) { srlen = 0; if (elems->supp_rates) { clen = IEEE80211_MAX_SUPP_RATES; if (clen > elems->supp_rates_len) clen = elems->supp_rates_len; memcpy(bss->supp_rates, elems->supp_rates, clen); srlen += clen; } if (elems->ext_supp_rates) { clen = IEEE80211_MAX_SUPP_RATES - srlen; if (clen > elems->ext_supp_rates_len) clen = elems->ext_supp_rates_len; memcpy(bss->supp_rates + srlen, elems->ext_supp_rates, clen); srlen += clen; } if (srlen) { bss->supp_rates_len = srlen; if (!elems->parse_error) bss->valid_data |= IEEE80211_BSS_VALID_RATES; } } if (!elems->parse_error || !(bss->valid_data & IEEE80211_BSS_VALID_WMM)) { bss->wmm_used = elems->wmm_param || elems->wmm_info; bss->uapsd_supported = is_uapsd_supported(elems); if (!elems->parse_error) bss->valid_data |= IEEE80211_BSS_VALID_WMM; } if (update_data->beacon) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[rx_status->band]; if (!(rx_status->encoding == RX_ENC_HT) && !(rx_status->encoding == RX_ENC_VHT)) bss->beacon_rate = &sband->bitrates[rx_status->rate_idx]; } if (elems->vht_cap_elem) bss->vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info); else bss->vht_cap_info = 0; kfree(elems); } struct ieee80211_bss * ieee80211_bss_info_update(struct ieee80211_local *local, struct ieee80211_rx_status *rx_status, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_channel *channel) { bool beacon = ieee80211_is_beacon(mgmt->frame_control) || ieee80211_is_s1g_beacon(mgmt->frame_control); struct cfg80211_bss *cbss; struct inform_bss_update_data update_data = { .rx_status = rx_status, .beacon = beacon, }; struct cfg80211_inform_bss bss_meta = { .boottime_ns = rx_status->boottime_ns, .drv_data = (void *)&update_data, }; bool signal_valid; struct ieee80211_sub_if_data *scan_sdata; if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL) bss_meta.signal = 0; /* invalid signal indication */ else if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) bss_meta.signal = rx_status->signal * 100; else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal; bss_meta.chan = channel; rcu_read_lock(); scan_sdata = rcu_dereference(local->scan_sdata); if (scan_sdata && scan_sdata->vif.type == NL80211_IFTYPE_STATION && scan_sdata->vif.cfg.assoc && ieee80211_have_rx_timestamp(rx_status)) { struct ieee80211_bss_conf *link_conf = NULL; /* for an MLO connection, set the TSF data only in case we have * an indication on which of the links the frame was received */ if (ieee80211_vif_is_mld(&scan_sdata->vif)) { if (rx_status->link_valid) { s8 link_id = rx_status->link_id; link_conf = rcu_dereference(scan_sdata->vif.link_conf[link_id]); } } else { link_conf = &scan_sdata->vif.bss_conf; } if (link_conf) { bss_meta.parent_tsf = ieee80211_calculate_rx_timestamp(local, rx_status, len + FCS_LEN, 24); ether_addr_copy(bss_meta.parent_bssid, link_conf->bssid); } } rcu_read_unlock(); cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt, len, GFP_ATOMIC); if (!cbss) return NULL; /* In case the signal is invalid update the status */ signal_valid = channel == cbss->channel; if (!signal_valid) rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; return (void *)cbss->priv; } static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel, u32 scan_flags, const u8 *da) { if (!sdata) return false; /* accept broadcast on 6 GHz and for OCE */ if (is_broadcast_ether_addr(da) && (channel->band == NL80211_BAND_6GHZ || scan_flags & NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP)) return true; if (scan_flags & NL80211_SCAN_FLAG_RANDOM_ADDR) return true; return ether_addr_equal(da, sdata->vif.addr); } void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_mgmt *mgmt = (void *)skb->data; struct ieee80211_bss *bss; struct ieee80211_channel *channel; size_t min_hdr_len = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); if (!ieee80211_is_probe_resp(mgmt->frame_control) && !ieee80211_is_beacon(mgmt->frame_control) && !ieee80211_is_s1g_beacon(mgmt->frame_control)) return; if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { if (ieee80211_is_s1g_short_beacon(mgmt->frame_control)) min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_short_beacon.variable); else min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon); } if (skb->len < min_hdr_len) return; if (test_and_clear_bit(SCAN_BEACON_WAIT, &local->scanning)) { /* * we were passive scanning because of radar/no-IR, but * the beacon/proberesp rx gives us an opportunity to upgrade * to active scan */ set_bit(SCAN_BEACON_DONE, &local->scanning); wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); } channel = ieee80211_get_channel_khz(local->hw.wiphy, ieee80211_rx_status_to_khz(rx_status)); if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) return; if (ieee80211_is_probe_resp(mgmt->frame_control)) { struct ieee80211_sub_if_data *sdata1, *sdata2; struct cfg80211_scan_request *scan_req; struct cfg80211_sched_scan_request *sched_scan_req; u32 scan_req_flags = 0, sched_scan_req_flags = 0; sdata1 = rcu_dereference(local->scan_sdata); sdata2 = rcu_dereference(local->sched_scan_sdata); if (likely(!sdata1 && !sdata2)) return; scan_req = rcu_dereference(local->scan_req); sched_scan_req = rcu_dereference(local->sched_scan_req); if (scan_req) scan_req_flags = scan_req->flags; if (sched_scan_req) sched_scan_req_flags = sched_scan_req->flags; /* ignore ProbeResp to foreign address or non-bcast (OCE) * unless scanning with randomised address */ if (!ieee80211_scan_accept_presp(sdata1, channel, scan_req_flags, mgmt->da) && !ieee80211_scan_accept_presp(sdata2, channel, sched_scan_req_flags, mgmt->da)) return; } else { /* Beacons are expected only with broadcast address */ if (!is_broadcast_ether_addr(mgmt->da)) return; } /* Do not update the BSS table in case of only monitor interfaces */ if (local->open_count == local->monitors) return; bss = ieee80211_bss_info_update(local, rx_status, mgmt, skb->len, channel); if (bss) ieee80211_rx_bss_put(local, bss); } static void ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef) { memset(chandef, 0, sizeof(*chandef)); chandef->width = NL80211_CHAN_WIDTH_20_NOHT; } /* return false if no more work */ static bool ieee80211_prep_hw_scan(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct cfg80211_scan_request *req; struct cfg80211_chan_def chandef; u8 bands_used = 0; int i, ielen; u32 *n_chans; u32 flags = 0; req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); if (test_bit(SCAN_HW_CANCELLED, &local->scanning)) return false; if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { local->hw_scan_req->req.n_channels = req->n_channels; for (i = 0; i < req->n_channels; i++) { local->hw_scan_req->req.channels[i] = req->channels[i]; bands_used |= BIT(req->channels[i]->band); } } else { do { if (local->hw_scan_band == NUM_NL80211_BANDS) return false; n_chans = &local->hw_scan_req->req.n_channels; *n_chans = 0; for (i = 0; i < req->n_channels; i++) { if (req->channels[i]->band != local->hw_scan_band) continue; local->hw_scan_req->req.channels[(*n_chans)++] = req->channels[i]; bands_used |= BIT(req->channels[i]->band); } local->hw_scan_band++; } while (!*n_chans); } ieee80211_prepare_scan_chandef(&chandef); if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; ielen = ieee80211_build_preq_ies(sdata, (u8 *)local->hw_scan_req->req.ie, local->hw_scan_ies_bufsize, &local->hw_scan_req->ies, req->ie, req->ie_len, bands_used, req->rates, &chandef, flags); if (ielen < 0) return false; local->hw_scan_req->req.ie_len = ielen; local->hw_scan_req->req.no_cck = req->no_cck; ether_addr_copy(local->hw_scan_req->req.mac_addr, req->mac_addr); ether_addr_copy(local->hw_scan_req->req.mac_addr_mask, req->mac_addr_mask); ether_addr_copy(local->hw_scan_req->req.bssid, req->bssid); return true; } static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) { struct ieee80211_local *local = hw_to_local(hw); bool hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); bool was_scanning = local->scanning; struct cfg80211_scan_request *scan_req; struct ieee80211_sub_if_data *scan_sdata; struct ieee80211_sub_if_data *sdata; lockdep_assert_wiphy(local->hw.wiphy); /* * It's ok to abort a not-yet-running scan (that * we have one at all will be verified by checking * local->scan_req next), but not to complete it * successfully. */ if (WARN_ON(!local->scanning && !aborted)) aborted = true; if (WARN_ON(!local->scan_req)) return; scan_sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)); if (hw_scan && !aborted && !ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS) && ieee80211_prep_hw_scan(scan_sdata)) { int rc; rc = drv_hw_scan(local, rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)), local->hw_scan_req); if (rc == 0) return; /* HW scan failed and is going to be reported as aborted, * so clear old scan info. */ memset(&local->scan_info, 0, sizeof(local->scan_info)); aborted = true; } kfree(local->hw_scan_req); local->hw_scan_req = NULL; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); RCU_INIT_POINTER(local->scan_req, NULL); RCU_INIT_POINTER(local->scan_sdata, NULL); local->scanning = 0; local->scan_chandef.chan = NULL; synchronize_rcu(); if (scan_req != local->int_scan_req) { local->scan_info.aborted = aborted; cfg80211_scan_done(scan_req, &local->scan_info); } /* Set power back to normal operating levels. */ ieee80211_hw_conf_chan(local); if (!hw_scan && was_scanning) { ieee80211_configure_filter(local); drv_sw_scan_complete(local, scan_sdata); ieee80211_offchannel_return(local); } ieee80211_recalc_idle(local); ieee80211_mlme_notify_scan_completed(local); ieee80211_ibss_notify_scan_completed(local); /* Requeue all the work that might have been ignored while * the scan was in progress; if there was none this will * just be a no-op for the particular interface. */ list_for_each_entry(sdata, &local->interfaces, list) { if (ieee80211_sdata_running(sdata)) wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } if (was_scanning) ieee80211_start_next_roc(local); } void ieee80211_scan_completed(struct ieee80211_hw *hw, struct cfg80211_scan_info *info) { struct ieee80211_local *local = hw_to_local(hw); trace_api_scan_completed(local, info->aborted); set_bit(SCAN_COMPLETED, &local->scanning); if (info->aborted) set_bit(SCAN_ABORTED, &local->scanning); memcpy(&local->scan_info, info, sizeof(*info)); wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); } EXPORT_SYMBOL(ieee80211_scan_completed); static int ieee80211_start_sw_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { /* Software scan is not supported in multi-channel cases */ if (!local->emulate_chanctx) return -EOPNOTSUPP; /* * Hardware/driver doesn't support hw_scan, so use software * scanning instead. First send a nullfunc frame with power save * bit on so that AP will buffer the frames for us while we are not * listening, then send probe requests to each channel and wait for * the responses. After all channels are scanned, tune back to the * original channel and send a nullfunc frame with power save bit * off to trigger the AP to send us all the buffered frames. * * Note that while local->sw_scanning is true everything else but * nullfunc frames and probe requests will be dropped in * ieee80211_tx_h_check_assoc(). */ drv_sw_scan_start(local, sdata, local->scan_addr); local->leave_oper_channel_time = jiffies; local->next_scan_state = SCAN_DECISION; local->scan_channel_idx = 0; ieee80211_offchannel_stop_vifs(local); /* ensure nullfunc is transmitted before leaving operating channel */ ieee80211_flush_queues(local, NULL, false); ieee80211_configure_filter(local); /* We need to set power level at maximum rate for scanning. */ ieee80211_hw_conf_chan(local); wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); return 0; } static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *sdata_iter; unsigned int link_id; lockdep_assert_wiphy(local->hw.wiphy); if (!ieee80211_is_radar_required(local)) return true; if (!regulatory_pre_cac_allowed(local->hw.wiphy)) return false; list_for_each_entry(sdata_iter, &local->interfaces, list) { for_each_valid_link(&sdata_iter->wdev, link_id) if (sdata_iter->wdev.links[link_id].cac_started) return false; } return true; } static bool ieee80211_can_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { if (!__ieee80211_can_leave_ch(sdata)) return false; if (!list_empty(&local->roc_list)) return false; if (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL) return false; return true; } void ieee80211_run_deferred_scan(struct ieee80211_local *local) { lockdep_assert_wiphy(local->hw.wiphy); if (!local->scan_req || local->scanning) return; if (!ieee80211_can_scan(local, rcu_dereference_protected( local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)))) return; wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, round_jiffies_relative(0)); } static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 ratemask, u32 flags, u32 tx_flags, struct ieee80211_channel *channel) { struct sk_buff *skb; skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, ssid, ssid_len, ie, ie_len, flags); if (skb) { if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u16 sn = get_random_u16(); info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; hdr->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); } } static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, unsigned long *next_delay) { int i; struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; enum nl80211_band band = local->hw.conf.chandef.chan->band; u32 flags = 0, tx_flags; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); tx_flags = IEEE80211_TX_INTFL_OFFCHAN_TX_OK; if (scan_req->no_cck) tx_flags |= IEEE80211_TX_CTL_NO_CCK_RATE; if (scan_req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_SN) flags |= IEEE80211_PROBE_FLAG_RANDOM_SN; sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)); for (i = 0; i < scan_req->n_ssids; i++) ieee80211_send_scan_probe_req( sdata, local->scan_addr, scan_req->bssid, scan_req->ssids[i].ssid, scan_req->ssids[i].ssid_len, scan_req->ie, scan_req->ie_len, scan_req->rates[band], flags, tx_flags, local->hw.conf.chandef.chan); /* * After sending probe requests, wait for probe responses * on the channel. */ *next_delay = msecs_to_jiffies(scan_req->duration) > IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME ? msecs_to_jiffies(scan_req->duration) - IEEE80211_PROBE_DELAY : IEEE80211_CHANNEL_TIME; local->next_scan_state = SCAN_DECISION; } static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) { struct ieee80211_local *local = sdata->local; bool hw_scan = local->ops->hw_scan; int rc; lockdep_assert_wiphy(local->hw.wiphy); if (local->scan_req) return -EBUSY; /* For an MLO connection, if a link ID was specified, validate that it * is indeed active. */ if (ieee80211_vif_is_mld(&sdata->vif) && req->tsf_report_link_id >= 0 && !(sdata->vif.active_links & BIT(req->tsf_report_link_id))) return -EINVAL; if (!__ieee80211_can_leave_ch(sdata)) return -EBUSY; if (!ieee80211_can_scan(local, sdata)) { /* wait for the work to finish/time out */ rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); return 0; } again: if (hw_scan) { u8 *ies; local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len; if (ieee80211_hw_check(&local->hw, SINGLE_SCAN_ON_ALL_BANDS)) { int i, n_bands = 0; u8 bands_counted = 0; for (i = 0; i < req->n_channels; i++) { if (bands_counted & BIT(req->channels[i]->band)) continue; bands_counted |= BIT(req->channels[i]->band); n_bands++; } local->hw_scan_ies_bufsize *= n_bands; } local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req, req.channels, req->n_channels) + local->hw_scan_ies_bufsize, GFP_KERNEL); if (!local->hw_scan_req) return -ENOMEM; local->hw_scan_req->req.ssids = req->ssids; local->hw_scan_req->req.n_ssids = req->n_ssids; /* None of the channels are actually set * up but let UBSAN know the boundaries. */ local->hw_scan_req->req.n_channels = req->n_channels; ies = (u8 *)local->hw_scan_req + sizeof(*local->hw_scan_req) + req->n_channels * sizeof(req->channels[0]); local->hw_scan_req->req.ie = ies; local->hw_scan_req->req.flags = req->flags; eth_broadcast_addr(local->hw_scan_req->req.bssid); local->hw_scan_req->req.duration = req->duration; local->hw_scan_req->req.duration_mandatory = req->duration_mandatory; local->hw_scan_req->req.tsf_report_link_id = req->tsf_report_link_id; local->hw_scan_band = 0; local->hw_scan_req->req.n_6ghz_params = req->n_6ghz_params; local->hw_scan_req->req.scan_6ghz_params = req->scan_6ghz_params; local->hw_scan_req->req.scan_6ghz = req->scan_6ghz; /* * After allocating local->hw_scan_req, we must * go through until ieee80211_prep_hw_scan(), so * anything that might be changed here and leave * this function early must not go after this * allocation. */ } rcu_assign_pointer(local->scan_req, req); rcu_assign_pointer(local->scan_sdata, sdata); if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) get_random_mask_addr(local->scan_addr, req->mac_addr, req->mac_addr_mask); else memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN); if (hw_scan) { __set_bit(SCAN_HW_SCANNING, &local->scanning); } else if ((req->n_channels == 1) && (req->channels[0] == local->hw.conf.chandef.chan)) { /* * If we are scanning only on the operating channel * then we do not need to stop normal activities */ unsigned long next_delay; __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning); ieee80211_recalc_idle(local); /* Notify driver scan is starting, keep order of operations * same as normal software scan, in case that matters. */ drv_sw_scan_start(local, sdata, local->scan_addr); ieee80211_configure_filter(local); /* accept probe-responses */ /* We need to ensure power level is at max for scanning. */ ieee80211_hw_conf_chan(local); if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || !req->n_ssids) { next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; if (req->n_ssids) set_bit(SCAN_BEACON_WAIT, &local->scanning); } else { ieee80211_scan_state_send_probe(local, &next_delay); next_delay = IEEE80211_CHANNEL_TIME; } /* Now, just wait a bit and we are all done! */ wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, next_delay); return 0; } else { /* Do normal software scan */ __set_bit(SCAN_SW_SCANNING, &local->scanning); } ieee80211_recalc_idle(local); if (hw_scan) { WARN_ON(!ieee80211_prep_hw_scan(sdata)); rc = drv_hw_scan(local, sdata, local->hw_scan_req); } else { rc = ieee80211_start_sw_scan(local, sdata); } if (rc) { kfree(local->hw_scan_req); local->hw_scan_req = NULL; local->scanning = 0; ieee80211_recalc_idle(local); local->scan_req = NULL; RCU_INIT_POINTER(local->scan_sdata, NULL); } if (hw_scan && rc == 1) { /* * we can't fall back to software for P2P-GO * as it must update NoA etc. */ if (ieee80211_vif_type_p2p(&sdata->vif) == NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; hw_scan = false; goto again; } return rc; } static unsigned long ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) { /* * TODO: channel switching also consumes quite some time, * add that delay as well to get a better estimation */ if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) return IEEE80211_PASSIVE_CHANNEL_TIME; return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; } static void ieee80211_scan_state_decision(struct ieee80211_local *local, unsigned long *next_delay) { bool associated = false; bool tx_empty = true; bool bad_latency; struct ieee80211_sub_if_data *sdata; struct ieee80211_channel *next_chan; enum mac80211_scan_state next_scan_state; struct cfg80211_scan_request *scan_req; lockdep_assert_wiphy(local->hw.wiphy); /* * check if at least one STA interface is associated, * check if at least one STA interface has pending tx frames * and grab the lowest used beacon interval */ list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) { if (sdata->u.mgd.associated) { associated = true; if (!qdisc_all_tx_empty(sdata->dev)) { tx_empty = false; break; } } } } scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); next_chan = scan_req->channels[local->scan_channel_idx]; /* * we're currently scanning a different channel, let's * see if we can scan another channel without interfering * with the current traffic situation. * * Keep good latency, do not stay off-channel more than 125 ms. */ bad_latency = time_after(jiffies + ieee80211_scan_get_channel_time(next_chan), local->leave_oper_channel_time + HZ / 8); if (associated && !tx_empty) { if (scan_req->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) next_scan_state = SCAN_ABORT; else next_scan_state = SCAN_SUSPEND; } else if (associated && bad_latency) { next_scan_state = SCAN_SUSPEND; } else { next_scan_state = SCAN_SET_CHANNEL; } local->next_scan_state = next_scan_state; *next_delay = 0; } static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, unsigned long *next_delay) { int skip; struct ieee80211_channel *chan; struct cfg80211_scan_request *scan_req; scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); skip = 0; chan = scan_req->channels[local->scan_channel_idx]; local->scan_chandef.chan = chan; local->scan_chandef.center_freq1 = chan->center_freq; local->scan_chandef.freq1_offset = chan->freq_offset; local->scan_chandef.center_freq2 = 0; /* For scanning on the S1G band, detect the channel width according to * the channel being scanned. */ if (chan->band == NL80211_BAND_S1GHZ) { local->scan_chandef.width = ieee80211_s1g_channel_width(chan); goto set_channel; } /* If scanning on oper channel, use whatever channel-type * is currently in use. */ if (chan == local->hw.conf.chandef.chan) local->scan_chandef = local->hw.conf.chandef; else local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT; set_channel: if (ieee80211_hw_conf_chan(local)) skip = 1; /* advance state machine to next channel/band */ local->scan_channel_idx++; if (skip) { /* if we skip this channel return to the decision state */ local->next_scan_state = SCAN_DECISION; return; } /* * Probe delay is used to update the NAV, cf. 11.1.3.2.2 * (which unfortunately doesn't say _why_ step a) is done, * but it waits for the probe delay or until a frame is * received - and the received frame would update the NAV). * For now, we do not support waiting until a frame is * received. * * In any case, it is not necessary for a passive scan. */ if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || !scan_req->n_ssids) { *next_delay = max(msecs_to_jiffies(scan_req->duration), IEEE80211_PASSIVE_CHANNEL_TIME); local->next_scan_state = SCAN_DECISION; if (scan_req->n_ssids) set_bit(SCAN_BEACON_WAIT, &local->scanning); return; } /* active scan, send probes */ *next_delay = IEEE80211_PROBE_DELAY; local->next_scan_state = SCAN_SEND_PROBE; } static void ieee80211_scan_state_suspend(struct ieee80211_local *local, unsigned long *next_delay) { /* switch back to the operating channel */ local->scan_chandef.chan = NULL; ieee80211_hw_conf_chan(local); /* disable PS */ ieee80211_offchannel_return(local); *next_delay = HZ / 5; /* afterwards, resume scan & go to next channel */ local->next_scan_state = SCAN_RESUME; } static void ieee80211_scan_state_resume(struct ieee80211_local *local, unsigned long *next_delay) { ieee80211_offchannel_stop_vifs(local); if (local->ops->flush) { ieee80211_flush_queues(local, NULL, false); *next_delay = 0; } else *next_delay = HZ / 10; /* remember when we left the operating channel */ local->leave_oper_channel_time = jiffies; /* advance to the next channel to be scanned */ local->next_scan_state = SCAN_SET_CHANNEL; } void ieee80211_scan_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, scan_work.work); struct ieee80211_sub_if_data *sdata; struct cfg80211_scan_request *scan_req; unsigned long next_delay = 0; bool aborted; lockdep_assert_wiphy(local->hw.wiphy); if (!ieee80211_can_run_worker(local)) { aborted = true; goto out_complete; } sdata = rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)); scan_req = rcu_dereference_protected(local->scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); /* When scanning on-channel, the first-callback means completed. */ if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) { aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); goto out_complete; } if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); goto out_complete; } if (!sdata || !scan_req) return; if (!local->scanning) { int rc; RCU_INIT_POINTER(local->scan_req, NULL); RCU_INIT_POINTER(local->scan_sdata, NULL); rc = __ieee80211_start_scan(sdata, scan_req); if (!rc) return; /* need to complete scan in cfg80211 */ rcu_assign_pointer(local->scan_req, scan_req); aborted = true; goto out_complete; } clear_bit(SCAN_BEACON_WAIT, &local->scanning); /* * as long as no delay is required advance immediately * without scheduling a new work */ do { if (!ieee80211_sdata_running(sdata)) { aborted = true; goto out_complete; } if (test_and_clear_bit(SCAN_BEACON_DONE, &local->scanning) && local->next_scan_state == SCAN_DECISION) local->next_scan_state = SCAN_SEND_PROBE; switch (local->next_scan_state) { case SCAN_DECISION: /* if no more bands/channels left, complete scan */ if (local->scan_channel_idx >= scan_req->n_channels) { aborted = false; goto out_complete; } ieee80211_scan_state_decision(local, &next_delay); break; case SCAN_SET_CHANNEL: ieee80211_scan_state_set_channel(local, &next_delay); break; case SCAN_SEND_PROBE: ieee80211_scan_state_send_probe(local, &next_delay); break; case SCAN_SUSPEND: ieee80211_scan_state_suspend(local, &next_delay); break; case SCAN_RESUME: ieee80211_scan_state_resume(local, &next_delay); break; case SCAN_ABORT: aborted = true; goto out_complete; } } while (next_delay == 0); wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, next_delay); return; out_complete: __ieee80211_scan_completed(&local->hw, aborted); } int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, struct cfg80211_scan_request *req) { lockdep_assert_wiphy(sdata->local->hw.wiphy); return __ieee80211_start_scan(sdata, req); } int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata, const u8 *ssid, u8 ssid_len, struct ieee80211_channel **channels, unsigned int n_channels) { struct ieee80211_local *local = sdata->local; int i, n_ch = 0; enum nl80211_band band; lockdep_assert_wiphy(local->hw.wiphy); /* busy scanning */ if (local->scan_req) return -EBUSY; /* fill internal scan request */ if (!channels) { int max_n; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band] || band == NL80211_BAND_6GHZ) continue; max_n = local->hw.wiphy->bands[band]->n_channels; for (i = 0; i < max_n; i++) { struct ieee80211_channel *tmp_ch = &local->hw.wiphy->bands[band]->channels[i]; if (tmp_ch->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_DISABLED) || !cfg80211_wdev_channel_allowed(&sdata->wdev, tmp_ch)) continue; local->int_scan_req->channels[n_ch] = tmp_ch; n_ch++; } } if (WARN_ON_ONCE(n_ch == 0)) return -EINVAL; local->int_scan_req->n_channels = n_ch; } else { for (i = 0; i < n_channels; i++) { if (channels[i]->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_DISABLED) || !cfg80211_wdev_channel_allowed(&sdata->wdev, channels[i])) continue; local->int_scan_req->channels[n_ch] = channels[i]; n_ch++; } if (n_ch == 0) return -EINVAL; local->int_scan_req->n_channels = n_ch; } local->int_scan_req->ssids = &local->scan_ssid; local->int_scan_req->n_ssids = 1; memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN); local->int_scan_req->ssids[0].ssid_len = ssid_len; return __ieee80211_start_scan(sdata, sdata->local->int_scan_req); } void ieee80211_scan_cancel(struct ieee80211_local *local) { /* ensure a new scan cannot be queued */ lockdep_assert_wiphy(local->hw.wiphy); /* * We are canceling software scan, or deferred scan that was not * yet really started (see __ieee80211_start_scan ). * * Regarding hardware scan: * - we can not call __ieee80211_scan_completed() as when * SCAN_HW_SCANNING bit is set this function change * local->hw_scan_req to operate on 5G band, what race with * driver which can use local->hw_scan_req * * - we can not cancel scan_work since driver can schedule it * by ieee80211_scan_completed(..., true) to finish scan * * Hence we only call the cancel_hw_scan() callback, but the low-level * driver is still responsible for calling ieee80211_scan_completed() * after the scan was completed/aborted. */ if (!local->scan_req) return; /* * We have a scan running and the driver already reported completion, * but the worker hasn't run yet or is stuck on the mutex - mark it as * cancelled. */ if (test_bit(SCAN_HW_SCANNING, &local->scanning) && test_bit(SCAN_COMPLETED, &local->scanning)) { set_bit(SCAN_HW_CANCELLED, &local->scanning); return; } if (test_bit(SCAN_HW_SCANNING, &local->scanning)) { /* * Make sure that __ieee80211_scan_completed doesn't trigger a * scan on another band. */ set_bit(SCAN_HW_CANCELLED, &local->scanning); if (local->ops->cancel_hw_scan) drv_cancel_hw_scan(local, rcu_dereference_protected(local->scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx))); return; } wiphy_delayed_work_cancel(local->hw.wiphy, &local->scan_work); /* and clean up */ memset(&local->scan_info, 0, sizeof(local->scan_info)); __ieee80211_scan_completed(&local->hw, true); } int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_scan_ies sched_scan_ies = {}; struct cfg80211_chan_def chandef; int ret, i, iebufsz, num_bands = 0; u32 rate_masks[NUM_NL80211_BANDS] = {}; u8 bands_used = 0; u8 *ie; u32 flags = 0; lockdep_assert_wiphy(local->hw.wiphy); iebufsz = local->scan_ies_len + req->ie_len; if (!local->ops->sched_scan_start) return -EOPNOTSUPP; for (i = 0; i < NUM_NL80211_BANDS; i++) { if (local->hw.wiphy->bands[i]) { bands_used |= BIT(i); rate_masks[i] = (u32) -1; num_bands++; } } if (req->flags & NL80211_SCAN_FLAG_MIN_PREQ_CONTENT) flags |= IEEE80211_PROBE_FLAG_MIN_CONTENT; ie = kcalloc(iebufsz, num_bands, GFP_KERNEL); if (!ie) { ret = -ENOMEM; goto out; } ieee80211_prepare_scan_chandef(&chandef); ret = ieee80211_build_preq_ies(sdata, ie, num_bands * iebufsz, &sched_scan_ies, req->ie, req->ie_len, bands_used, rate_masks, &chandef, flags); if (ret < 0) goto error; ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies); if (ret == 0) { rcu_assign_pointer(local->sched_scan_sdata, sdata); rcu_assign_pointer(local->sched_scan_req, req); } error: kfree(ie); out: if (ret) { /* Clean in case of failure after HW restart or upon resume. */ RCU_INIT_POINTER(local->sched_scan_sdata, NULL); RCU_INIT_POINTER(local->sched_scan_req, NULL); } return ret; } int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req) { struct ieee80211_local *local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); if (rcu_access_pointer(local->sched_scan_sdata)) return -EBUSY; return __ieee80211_request_sched_scan_start(sdata, req); } int ieee80211_request_sched_scan_stop(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sched_scan_sdata; int ret = -ENOENT; lockdep_assert_wiphy(local->hw.wiphy); if (!local->ops->sched_scan_stop) return -EOPNOTSUPP; /* We don't want to restart sched scan anymore. */ RCU_INIT_POINTER(local->sched_scan_req, NULL); sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)); if (sched_scan_sdata) { ret = drv_sched_scan_stop(local, sched_scan_sdata); if (!ret) RCU_INIT_POINTER(local->sched_scan_sdata, NULL); } return ret; } void ieee80211_sched_scan_results(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_sched_scan_results(local); cfg80211_sched_scan_results(hw->wiphy, 0); } EXPORT_SYMBOL(ieee80211_sched_scan_results); void ieee80211_sched_scan_end(struct ieee80211_local *local) { lockdep_assert_wiphy(local->hw.wiphy); if (!rcu_access_pointer(local->sched_scan_sdata)) return; RCU_INIT_POINTER(local->sched_scan_sdata, NULL); /* If sched scan was aborted by the driver. */ RCU_INIT_POINTER(local->sched_scan_req, NULL); cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); } void ieee80211_sched_scan_stopped_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, sched_scan_stopped_work); ieee80211_sched_scan_end(local); } void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_sched_scan_stopped(local); /* * this shouldn't really happen, so for simplicity * simply ignore it, and let mac80211 reconfigure * the sched scan later on. */ if (local->in_reconfig) return; wiphy_work_queue(hw->wiphy, &local->sched_scan_stopped_work); } EXPORT_SYMBOL(ieee80211_sched_scan_stopped); |
1 2 2 5 2 2 2 2 2 2 2 2 1 5 4 3 2 2 2 3 1 1 1 2 5 3 3 1 1 1 1 5 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 | // SPDX-License-Identifier: GPL-2.0-only /* DVB USB compliant linux driver for MSI Mega Sky 580 DVB-T USB2.0 receiver * * Copyright (C) 2006 Aapo Tahkola (aet@rasterburn.org) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "m920x.h" #include "mt352.h" #include "mt352_priv.h" #include "qt1010.h" #include "tda1004x.h" #include "tda827x.h" #include "mt2060.h" #include <media/tuner.h> #include "tuner-simple.h" #include <linux/unaligned.h> /* debug */ static int dvb_usb_m920x_debug; module_param_named(debug,dvb_usb_m920x_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=rc (or-able))." DVB_USB_DEBUG_STATUS); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid); static inline int m920x_read(struct usb_device *udev, u8 request, u16 value, u16 index, void *data, int size) { int ret; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_DIR_IN, value, index, data, size, 2000); if (ret < 0) { printk(KERN_INFO "m920x_read = error: %d\n", ret); return ret; } if (ret != size) { deb("m920x_read = no data\n"); return -EIO; } return 0; } static inline int m920x_write(struct usb_device *udev, u8 request, u16 value, u16 index) { return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, NULL, 0, 2000); } static inline int m920x_write_seq(struct usb_device *udev, u8 request, struct m920x_inits *seq) { int ret; do { ret = m920x_write(udev, request, seq->data, seq->address); if (ret != 0) return ret; seq++; } while (seq->address); return 0; } static int m920x_init(struct dvb_usb_device *d, struct m920x_inits *rc_seq) { int ret, i, epi, flags = 0; int adap_enabled[M9206_MAX_ADAPTERS] = { 0 }; /* Remote controller init. */ if (d->props.rc.legacy.rc_query || d->props.rc.core.rc_query) { deb("Initialising remote control\n"); ret = m920x_write_seq(d->udev, M9206_CORE, rc_seq); if (ret != 0) { deb("Initialising remote control failed\n"); return ret; } deb("Initialising remote control success\n"); } for (i = 0; i < d->props.num_adapters; i++) flags |= d->adapter[i].props.fe[0].caps; /* Some devices(Dposh) might crash if we attempt touch at all. */ if (flags & DVB_USB_ADAP_HAS_PID_FILTER) { for (i = 0; i < d->props.num_adapters; i++) { epi = d->adapter[i].props.fe[0].stream.endpoint - 0x81; if (epi < 0 || epi >= M9206_MAX_ADAPTERS) { printk(KERN_INFO "m920x: Unexpected adapter endpoint!\n"); return -EINVAL; } adap_enabled[epi] = 1; } for (i = 0; i < M9206_MAX_ADAPTERS; i++) { if (adap_enabled[i]) continue; if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x0)) != 0) return ret; if ((ret = m920x_set_filter(d, 0x81 + i, 0, 0x02f5)) != 0) return ret; } } return 0; } static int m920x_init_ep(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt; if ((alt = usb_altnum_to_altsetting(intf, 1)) == NULL) { deb("No alt found!\n"); return -ENODEV; } return usb_set_interface(udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); } static inline void m920x_parse_rc_state(struct dvb_usb_device *d, u8 rc_state, int *state) { struct m920x_state *m = d->priv; switch (rc_state) { case 0x80: *state = REMOTE_NO_KEY_PRESSED; break; case 0x88: /* framing error or "invalid code" */ case 0x99: case 0xc0: case 0xd8: *state = REMOTE_NO_KEY_PRESSED; m->rep_count = 0; break; case 0x93: case 0x92: case 0x83: /* pinnacle PCTV310e */ case 0x82: m->rep_count = 0; *state = REMOTE_KEY_PRESSED; break; case 0x91: case 0x81: /* pinnacle PCTV310e */ /* prevent immediate auto-repeat */ if (++m->rep_count > 2) *state = REMOTE_KEY_REPEAT; else *state = REMOTE_NO_KEY_PRESSED; break; default: deb("Unexpected rc state %02x\n", rc_state); *state = REMOTE_NO_KEY_PRESSED; break; } } static int m920x_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { int i, ret = 0; u8 *rc_state; rc_state = kmalloc(2, GFP_KERNEL); if (!rc_state) return -ENOMEM; ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, rc_state, 1); if (ret != 0) goto out; ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, rc_state + 1, 1); if (ret != 0) goto out; m920x_parse_rc_state(d, rc_state[0], state); for (i = 0; i < d->props.rc.legacy.rc_map_size; i++) if (rc5_data(&d->props.rc.legacy.rc_map_table[i]) == rc_state[1]) { *event = d->props.rc.legacy.rc_map_table[i].keycode; goto out; } if (rc_state[1] != 0) deb("Unknown rc key %02x\n", rc_state[1]); *state = REMOTE_NO_KEY_PRESSED; out: kfree(rc_state); return ret; } static int m920x_rc_core_query(struct dvb_usb_device *d) { int ret = 0; u8 *rc_state; int state; rc_state = kmalloc(2, GFP_KERNEL); if (!rc_state) return -ENOMEM; if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_STATE, &rc_state[0], 1)) != 0) goto out; if ((ret = m920x_read(d->udev, M9206_CORE, 0x0, M9206_RC_KEY, &rc_state[1], 1)) != 0) goto out; deb("state=0x%02x keycode=0x%02x\n", rc_state[0], rc_state[1]); m920x_parse_rc_state(d, rc_state[0], &state); if (state == REMOTE_NO_KEY_PRESSED) rc_keyup(d->rc_dev); else if (state == REMOTE_KEY_REPEAT) rc_repeat(d->rc_dev); else rc_keydown(d->rc_dev, RC_PROTO_UNKNOWN, rc_state[1], 0); out: kfree(rc_state); return ret; } /* I2C */ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i, j; int ret = 0; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { if (msg[i].flags & (I2C_M_NO_RD_ACK | I2C_M_IGNORE_NAK | I2C_M_TEN) || msg[i].len == 0) { /* For a 0 byte message, I think sending the address * to index 0x80|0x40 would be the correct thing to * do. However, zero byte messages are only used for * probing, and since we don't know how to get the * slave's ack, we can't probe. */ ret = -ENOTSUPP; goto unlock; } /* Send START & address/RW bit */ if (!(msg[i].flags & I2C_M_NOSTART)) { if ((ret = m920x_write(d->udev, M9206_I2C, (msg[i].addr << 1) | (msg[i].flags & I2C_M_RD ? 0x01 : 0), 0x80)) != 0) goto unlock; /* Should check for ack here, if we knew how. */ } if (msg[i].flags & I2C_M_RD) { char *read = kmalloc(1, GFP_KERNEL); if (!read) { ret = -ENOMEM; goto unlock; } for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? * Send STOP, otherwise send ACK. */ int stop = (i+1 == num && j+1 == msg[i].len) ? 0x40 : 0x01; if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, read, 1)) != 0) { kfree(read); goto unlock; } msg[i].buf[j] = read[0]; } kfree(read); } else { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? Then send STOP. */ int stop = (i+1 == num && j+1 == msg[i].len) ? 0x40 : 0x00; if ((ret = m920x_write(d->udev, M9206_I2C, msg[i].buf[j], stop)) != 0) goto unlock; /* Should check for ack here too. */ } } } ret = num; unlock: mutex_unlock(&d->i2c_mutex); return ret; } static u32 m920x_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm m920x_i2c_algo = { .master_xfer = m920x_i2c_xfer, .functionality = m920x_i2c_func, }; /* pid filter */ static int m920x_set_filter(struct dvb_usb_device *d, int type, int idx, int pid) { int ret = 0; if (pid >= 0x8000) return -EINVAL; pid |= 0x8000; if ((ret = m920x_write(d->udev, M9206_FILTER, pid, (type << 8) | (idx * 4) )) != 0) return ret; if ((ret = m920x_write(d->udev, M9206_FILTER, 0, (type << 8) | (idx * 4) )) != 0) return ret; return ret; } static int m920x_update_filters(struct dvb_usb_adapter *adap) { struct m920x_state *m = adap->dev->priv; int enabled = m->filtering_enabled[adap->id]; int i, ret = 0, filter = 0; int ep = adap->props.fe[0].stream.endpoint; for (i = 0; i < M9206_MAX_FILTERS; i++) if (m->filters[adap->id][i] == 8192) enabled = 0; /* Disable all filters */ if ((ret = m920x_set_filter(adap->dev, ep, 1, enabled)) != 0) return ret; for (i = 0; i < M9206_MAX_FILTERS; i++) if ((ret = m920x_set_filter(adap->dev, ep, i + 2, 0)) != 0) return ret; /* Set */ if (enabled) { for (i = 0; i < M9206_MAX_FILTERS; i++) { if (m->filters[adap->id][i] == 0) continue; if ((ret = m920x_set_filter(adap->dev, ep, filter + 2, m->filters[adap->id][i])) != 0) return ret; filter++; } } return ret; } static int m920x_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct m920x_state *m = adap->dev->priv; m->filtering_enabled[adap->id] = onoff ? 1 : 0; return m920x_update_filters(adap); } static int m920x_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { struct m920x_state *m = adap->dev->priv; m->filters[adap->id][index] = onoff ? pid : 0; return m920x_update_filters(adap); } static int m920x_firmware_download(struct usb_device *udev, const struct firmware *fw) { u16 value, index, size; u8 *read, *buff; int i, pass, ret = 0; buff = kmalloc(65536, GFP_KERNEL); if (buff == NULL) return -ENOMEM; read = kmalloc(4, GFP_KERNEL); if (!read) { kfree(buff); return -ENOMEM; } if ((ret = m920x_read(udev, M9206_FILTER, 0x0, 0x8000, read, 4)) != 0) goto done; deb("%*ph\n", 4, read); if ((ret = m920x_read(udev, M9206_FW, 0x0, 0x0, read, 1)) != 0) goto done; deb("%x\n", read[0]); for (pass = 0; pass < 2; pass++) { for (i = 0; i + (sizeof(u16) * 3) < fw->size;) { value = get_unaligned_le16(fw->data + i); i += sizeof(u16); index = get_unaligned_le16(fw->data + i); i += sizeof(u16); size = get_unaligned_le16(fw->data + i); i += sizeof(u16); if (pass == 1) { /* Will stall if using fw->data ... */ memcpy(buff, fw->data + i, size); ret = usb_control_msg(udev, usb_sndctrlpipe(udev,0), M9206_FW, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, buff, size, 20); if (ret != size) { deb("error while uploading fw!\n"); ret = -EIO; goto done; } msleep(3); } i += size; } if (i != fw->size) { deb("bad firmware file!\n"); ret = -EINVAL; goto done; } } msleep(36); /* m920x will disconnect itself from the bus after this. */ (void) m920x_write(udev, M9206_CORE, 0x01, M9206_FW_GO); deb("firmware uploaded!\n"); done: kfree(read); kfree(buff); return ret; } /* Callbacks for DVB USB */ static int m920x_identify_state(struct usb_device *udev, const struct dvb_usb_device_properties *props, const struct dvb_usb_device_description **desc, int *cold) { struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(usb_ifnum_to_if(udev, 0), 1); *cold = (alt == NULL) ? 1 : 0; return 0; } /* demod configurations */ static int m920x_mt352_demod_init(struct dvb_frontend *fe) { int ret; static const u8 config[] = { CONFIG, 0x3d }; static const u8 clock[] = { CLOCK_CTL, 0x30 }; static const u8 reset[] = { RESET, 0x80 }; static const u8 adc_ctl[] = { ADC_CTL_1, 0x40 }; static const u8 agc[] = { AGC_TARGET, 0x1c, 0x20 }; static const u8 sec_agc[] = { 0x69, 0x00, 0xff, 0xff, 0x40, 0xff, 0x00, 0x40, 0x40 }; static const u8 unk1[] = { 0x93, 0x1a }; static const u8 unk2[] = { 0xb5, 0x7a }; deb("Demod init!\n"); if ((ret = mt352_write(fe, config, ARRAY_SIZE(config))) != 0) return ret; if ((ret = mt352_write(fe, clock, ARRAY_SIZE(clock))) != 0) return ret; if ((ret = mt352_write(fe, reset, ARRAY_SIZE(reset))) != 0) return ret; if ((ret = mt352_write(fe, adc_ctl, ARRAY_SIZE(adc_ctl))) != 0) return ret; if ((ret = mt352_write(fe, agc, ARRAY_SIZE(agc))) != 0) return ret; if ((ret = mt352_write(fe, sec_agc, ARRAY_SIZE(sec_agc))) != 0) return ret; if ((ret = mt352_write(fe, unk1, ARRAY_SIZE(unk1))) != 0) return ret; if ((ret = mt352_write(fe, unk2, ARRAY_SIZE(unk2))) != 0) return ret; return 0; } static struct mt352_config m920x_mt352_config = { .demod_address = 0x0f, .no_tuner = 1, .demod_init = m920x_mt352_demod_init, }; static struct tda1004x_config m920x_tda10046_08_config = { .demod_address = 0x08, .invert = 0, .invert_oclk = 0, .ts_mode = TDA10046_TS_SERIAL, .xtal_freq = TDA10046_XTAL_16M, .if_freq = TDA10046_FREQ_045, .agc_config = TDA10046_AGC_TDA827X, .gpio_config = TDA10046_GPTRI, .request_firmware = NULL, }; static struct tda1004x_config m920x_tda10046_0b_config = { .demod_address = 0x0b, .invert = 0, .invert_oclk = 0, .ts_mode = TDA10046_TS_SERIAL, .xtal_freq = TDA10046_XTAL_16M, .if_freq = TDA10046_FREQ_045, .agc_config = TDA10046_AGC_TDA827X, .gpio_config = TDA10046_GPTRI, .request_firmware = NULL, /* uses firmware EEPROM */ }; /* tuner configurations */ static struct qt1010_config m920x_qt1010_config = { .i2c_address = 0x62 }; static struct mt2060_config m920x_mt2060_config = { .i2c_address = 0x60, /* 0xc0 */ .clock_out = 0, }; /* Callbacks for DVB USB */ static int m920x_mt352_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); adap->fe_adap[0].fe = dvb_attach(mt352_attach, &m920x_mt352_config, &adap->dev->i2c_adap); if ((adap->fe_adap[0].fe) == NULL) return -EIO; return 0; } static int m920x_mt352_frontend_attach_vp7049(struct dvb_usb_adapter *adap) { struct m920x_inits vp7049_fe_init_seq[] = { /* XXX without these commands the frontend cannot be detected, * they must be sent BEFORE the frontend is attached */ { 0xff28, 0x00 }, { 0xff23, 0x00 }, { 0xff28, 0x00 }, { 0xff23, 0x00 }, { 0xff21, 0x20 }, { 0xff21, 0x60 }, { 0xff28, 0x00 }, { 0xff22, 0x00 }, { 0xff20, 0x30 }, { 0xff20, 0x20 }, { 0xff20, 0x30 }, { } /* terminating entry */ }; int ret; deb("%s\n", __func__); ret = m920x_write_seq(adap->dev->udev, M9206_CORE, vp7049_fe_init_seq); if (ret != 0) { deb("Initialization of vp7049 frontend failed."); return ret; } return m920x_mt352_frontend_attach(adap); } static int m920x_tda10046_08_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); adap->fe_adap[0].fe = dvb_attach(tda10046_attach, &m920x_tda10046_08_config, &adap->dev->i2c_adap); if ((adap->fe_adap[0].fe) == NULL) return -EIO; return 0; } static int m920x_tda10046_0b_frontend_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); adap->fe_adap[0].fe = dvb_attach(tda10046_attach, &m920x_tda10046_0b_config, &adap->dev->i2c_adap); if ((adap->fe_adap[0].fe) == NULL) return -EIO; return 0; } static int m920x_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(qt1010_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &m920x_qt1010_config) == NULL) return -ENODEV; return 0; } static int m920x_tda8275_60_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(tda827x_attach, adap->fe_adap[0].fe, 0x60, &adap->dev->i2c_adap, NULL) == NULL) return -ENODEV; return 0; } static int m920x_tda8275_61_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n",__func__); if (dvb_attach(tda827x_attach, adap->fe_adap[0].fe, 0x61, &adap->dev->i2c_adap, NULL) == NULL) return -ENODEV; return 0; } static int m920x_fmd1216me_tuner_attach(struct dvb_usb_adapter *adap) { dvb_attach(simple_tuner_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, 0x61, TUNER_PHILIPS_FMD1216ME_MK3); return 0; } static int m920x_mt2060_tuner_attach(struct dvb_usb_adapter *adap) { deb("%s\n", __func__); if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &m920x_mt2060_config, 1220) == NULL) return -ENODEV; return 0; } /* device-specific initialization */ static struct m920x_inits megasky_rc_init [] = { { M9206_RC_INIT2, 0xa8 }, { M9206_RC_INIT1, 0x51 }, { } /* terminating entry */ }; static struct m920x_inits tvwalkertwin_rc_init [] = { { M9206_RC_INIT2, 0x00 }, { M9206_RC_INIT1, 0xef }, { 0xff28, 0x00 }, { 0xff23, 0x00 }, { 0xff21, 0x30 }, { } /* terminating entry */ }; static struct m920x_inits pinnacle310e_init[] = { /* without these the tuner doesn't work */ { 0xff20, 0x9b }, { 0xff22, 0x70 }, /* rc settings */ { 0xff50, 0x80 }, { M9206_RC_INIT1, 0x00 }, { M9206_RC_INIT2, 0xff }, { } /* terminating entry */ }; static struct m920x_inits vp7049_rc_init[] = { { 0xff28, 0x00 }, { 0xff23, 0x00 }, { 0xff21, 0x70 }, { M9206_RC_INIT2, 0x00 }, { M9206_RC_INIT1, 0xff }, { } /* terminating entry */ }; /* ir keymaps */ static struct rc_map_table rc_map_megasky_table[] = { { 0x0012, KEY_POWER }, { 0x001e, KEY_CYCLEWINDOWS }, /* min/max */ { 0x0002, KEY_CHANNELUP }, { 0x0005, KEY_CHANNELDOWN }, { 0x0003, KEY_VOLUMEUP }, { 0x0006, KEY_VOLUMEDOWN }, { 0x0004, KEY_MUTE }, { 0x0007, KEY_OK }, /* TS */ { 0x0008, KEY_STOP }, { 0x0009, KEY_MENU }, /* swap */ { 0x000a, KEY_REWIND }, { 0x001b, KEY_PAUSE }, { 0x001f, KEY_FASTFORWARD }, { 0x000c, KEY_RECORD }, { 0x000d, KEY_CAMERA }, /* screenshot */ { 0x000e, KEY_COFFEE }, /* "MTS" */ }; static struct rc_map_table rc_map_tvwalkertwin_table[] = { { 0x0001, KEY_ZOOM }, /* Full Screen */ { 0x0002, KEY_CAMERA }, /* snapshot */ { 0x0003, KEY_MUTE }, { 0x0004, KEY_REWIND }, { 0x0005, KEY_PLAYPAUSE }, /* Play/Pause */ { 0x0006, KEY_FASTFORWARD }, { 0x0007, KEY_RECORD }, { 0x0008, KEY_STOP }, { 0x0009, KEY_TIME }, /* Timeshift */ { 0x000c, KEY_COFFEE }, /* Recall */ { 0x000e, KEY_CHANNELUP }, { 0x0012, KEY_POWER }, { 0x0015, KEY_MENU }, /* source */ { 0x0018, KEY_CYCLEWINDOWS }, /* TWIN PIP */ { 0x001a, KEY_CHANNELDOWN }, { 0x001b, KEY_VOLUMEDOWN }, { 0x001e, KEY_VOLUMEUP }, }; static struct rc_map_table rc_map_pinnacle310e_table[] = { { 0x16, KEY_POWER }, { 0x17, KEY_FAVORITES }, { 0x0f, KEY_TEXT }, { 0x48, KEY_PROGRAM }, /* preview */ { 0x1c, KEY_EPG }, { 0x04, KEY_LIST }, /* record list */ { 0x03, KEY_1 }, { 0x01, KEY_2 }, { 0x06, KEY_3 }, { 0x09, KEY_4 }, { 0x1d, KEY_5 }, { 0x1f, KEY_6 }, { 0x0d, KEY_7 }, { 0x19, KEY_8 }, { 0x1b, KEY_9 }, { 0x15, KEY_0 }, { 0x0c, KEY_CANCEL }, { 0x4a, KEY_CLEAR }, { 0x13, KEY_BACK }, { 0x00, KEY_TAB }, { 0x4b, KEY_UP }, { 0x4e, KEY_LEFT }, { 0x52, KEY_RIGHT }, { 0x51, KEY_DOWN }, { 0x4f, KEY_ENTER }, /* could also be KEY_OK */ { 0x1e, KEY_VOLUMEUP }, { 0x0a, KEY_VOLUMEDOWN }, { 0x05, KEY_CHANNELUP }, { 0x02, KEY_CHANNELDOWN }, { 0x11, KEY_RECORD }, { 0x14, KEY_PLAY }, { 0x4c, KEY_PAUSE }, { 0x1a, KEY_STOP }, { 0x40, KEY_REWIND }, { 0x12, KEY_FASTFORWARD }, { 0x41, KEY_PREVIOUSSONG }, /* Replay */ { 0x42, KEY_NEXTSONG }, /* Skip */ { 0x54, KEY_CAMERA }, /* Capture */ /* { 0x50, KEY_SAP }, */ /* Sap */ { 0x47, KEY_CYCLEWINDOWS }, /* Pip */ { 0x4d, KEY_SCREEN }, /* FullScreen */ { 0x08, KEY_SUBTITLE }, { 0x0e, KEY_MUTE }, /* { 0x49, KEY_LR }, */ /* L/R */ { 0x07, KEY_SLEEP }, /* Hibernate */ { 0x08, KEY_VIDEO }, /* A/V */ { 0x0e, KEY_MENU }, /* Recall */ { 0x45, KEY_ZOOMIN }, { 0x46, KEY_ZOOMOUT }, { 0x18, KEY_RED }, /* Red */ { 0x53, KEY_GREEN }, /* Green */ { 0x5e, KEY_YELLOW }, /* Yellow */ { 0x5f, KEY_BLUE }, /* Blue */ }; /* DVB USB Driver stuff */ static struct dvb_usb_device_properties megasky_properties; static struct dvb_usb_device_properties digivox_mini_ii_properties; static struct dvb_usb_device_properties tvwalkertwin_properties; static struct dvb_usb_device_properties dposh_properties; static struct dvb_usb_device_properties pinnacle_pctv310e_properties; static struct dvb_usb_device_properties vp7049_properties; static int m920x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dvb_usb_device *d = NULL; int ret; struct m920x_inits *rc_init_seq = NULL; int bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber; deb("Probing for m920x device at interface %d\n", bInterfaceNumber); if (bInterfaceNumber == 0) { /* Single-tuner device, or first interface on * multi-tuner device */ ret = dvb_usb_device_init(intf, &megasky_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = megasky_rc_init; goto found; } ret = dvb_usb_device_init(intf, &digivox_mini_ii_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { /* No remote control, so no rc_init_seq */ goto found; } /* This configures both tuners on the TV Walker Twin */ ret = dvb_usb_device_init(intf, &tvwalkertwin_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = tvwalkertwin_rc_init; goto found; } ret = dvb_usb_device_init(intf, &dposh_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { /* Remote controller not supported yet. */ goto found; } ret = dvb_usb_device_init(intf, &pinnacle_pctv310e_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = pinnacle310e_init; goto found; } ret = dvb_usb_device_init(intf, &vp7049_properties, THIS_MODULE, &d, adapter_nr); if (ret == 0) { rc_init_seq = vp7049_rc_init; goto found; } return ret; } else { /* Another interface on a multi-tuner device */ /* The LifeView TV Walker Twin gets here, but struct * tvwalkertwin_properties already configured both * tuners, so there is nothing for us to do here */ } found: if ((ret = m920x_init_ep(intf)) < 0) return ret; if (d && (ret = m920x_init(d, rc_init_seq)) != 0) return ret; return ret; } enum { MSI_MEGASKY580, ANUBIS_MSI_DIGI_VOX_MINI_II, ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD, ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM, DPOSH_M9206_COLD, DPOSH_M9206_WARM, VISIONPLUS_PINNACLE_PCTV310E, AZUREWAVE_TWINHAN_VP7049, }; static struct usb_device_id m920x_table[] = { DVB_USB_DEV(MSI, MSI_MEGASKY580), DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_MSI_DIGI_VOX_MINI_II), DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD), DVB_USB_DEV(ANUBIS_ELECTRONIC, ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM), DVB_USB_DEV(DPOSH, DPOSH_M9206_COLD), DVB_USB_DEV(DPOSH, DPOSH_M9206_WARM), DVB_USB_DEV(VISIONPLUS, VISIONPLUS_PINNACLE_PCTV310E), DVB_USB_DEV(AZUREWAVE, AZUREWAVE_TWINHAN_VP7049), { } }; MODULE_DEVICE_TABLE (usb, m920x_table); static struct dvb_usb_device_properties megasky_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-megasky-02.fw", .download_firmware = m920x_firmware_download, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_megasky_table, .rc_map_size = ARRAY_SIZE(rc_map_megasky_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_qt1010_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "MSI Mega Sky 580 DVB-T USB2.0", { &m920x_table[MSI_MEGASKY580], NULL }, { NULL }, } } }; static struct dvb_usb_device_properties digivox_mini_ii_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-digivox-02.fw", .download_firmware = m920x_firmware_download, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_08_frontend_attach, .tuner_attach = m920x_tda8275_60_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 0x4000, } } }, }}, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "MSI DIGI VOX mini II DVB-T USB2.0", { &m920x_table[ANUBIS_MSI_DIGI_VOX_MINI_II], NULL }, { NULL }, }, } }; /* LifeView TV Walker Twin support by Nick Andrew <nick@nick-andrew.net> * * LifeView TV Walker Twin has 1 x M9206, 2 x TDA10046, 2 x TDA8275A * TDA10046 #0 is located at i2c address 0x08 * TDA10046 #1 is located at i2c address 0x0b * TDA8275A #0 is located at i2c address 0x60 * TDA8275A #1 is located at i2c address 0x61 */ static struct dvb_usb_device_properties tvwalkertwin_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-tvwalkert.fw", .download_firmware = m920x_firmware_download, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_tvwalkertwin_table, .rc_map_size = ARRAY_SIZE(rc_map_tvwalkertwin_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 2, .adapter = {{ .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_08_frontend_attach, .tuner_attach = m920x_tda8275_60_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }}, }},{ .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_tda10046_0b_frontend_attach, .tuner_attach = m920x_tda8275_61_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x82, .u = { .bulk = { .buffersize = 512, } } }}, }, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { .name = "LifeView TV Walker Twin DVB-T USB2.0", .cold_ids = { &m920x_table[ANUBIS_LIFEVIEW_TV_WALKER_TWIN_COLD], NULL }, .warm_ids = { &m920x_table[ANUBIS_LIFEVIEW_TV_WALKER_TWIN_WARM], NULL }, }, } }; static struct dvb_usb_device_properties dposh_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-dposh-01.fw", .download_firmware = m920x_firmware_download, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ /* Hardware pid filters don't work with this device/firmware */ .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_qt1010_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, }}, }}, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { .name = "Dposh DVB-T USB2.0", .cold_ids = { &m920x_table[DPOSH_M9206_COLD], NULL }, .warm_ids = { &m920x_table[DPOSH_M9206_WARM], NULL }, }, } }; static struct dvb_usb_device_properties pinnacle_pctv310e_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .download_firmware = NULL, .rc.legacy = { .rc_interval = 100, .rc_map_table = rc_map_pinnacle310e_table, .rc_map_size = ARRAY_SIZE(rc_map_pinnacle310e_table), .rc_query = m920x_rc_query, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach, .tuner_attach = m920x_fmd1216me_tuner_attach, .stream = { .type = USB_ISOC, .count = 5, .endpoint = 0x84, .u = { .isoc = { .framesperurb = 128, .framesize = 564, .interval = 1, } } }, }}, } }, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "Pinnacle PCTV 310e", { &m920x_table[VISIONPLUS_PINNACLE_PCTV310E], NULL }, { NULL }, } } }; static struct dvb_usb_device_properties vp7049_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, .firmware = "dvb-usb-vp7049-0.95.fw", .download_firmware = m920x_firmware_download, .rc.core = { .rc_interval = 150, .rc_codes = RC_MAP_TWINHAN_VP1027_DVBS, .rc_query = m920x_rc_core_query, .allowed_protos = RC_PROTO_BIT_UNKNOWN, }, .size_of_priv = sizeof(struct m920x_state), .identify_state = m920x_identify_state, .num_adapters = 1, .adapter = {{ .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 8, .pid_filter = m920x_pid_filter, .pid_filter_ctrl = m920x_pid_filter_ctrl, .frontend_attach = m920x_mt352_frontend_attach_vp7049, .tuner_attach = m920x_mt2060_tuner_attach, .stream = { .type = USB_BULK, .count = 8, .endpoint = 0x81, .u = { .bulk = { .buffersize = 512, } } }, } }, } }, .i2c_algo = &m920x_i2c_algo, .num_device_descs = 1, .devices = { { "DTV-DVB UDTT7049", { &m920x_table[AZUREWAVE_TWINHAN_VP7049], NULL }, { NULL }, } } }; static struct usb_driver m920x_driver = { .name = "dvb_usb_m920x", .probe = m920x_probe, .disconnect = dvb_usb_device_exit, .id_table = m920x_table, }; module_usb_driver(m920x_driver); MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>"); MODULE_DESCRIPTION("DVB Driver for ULI M920x"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); |
16 11 4 3 1 3 17 11 11 19 7 12 7 5 18 19 7 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | // SPDX-License-Identifier: GPL-2.0-only /* Xtables module to match packets using a BPF filter. * Copyright 2013 Google Inc. * Written by Willem de Bruijn <willemb@google.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/syscalls.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/netfilter/xt_bpf.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>"); MODULE_DESCRIPTION("Xtables: BPF filter match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_bpf"); MODULE_ALIAS("ip6t_bpf"); static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, struct bpf_prog **ret) { struct sock_fprog_kern program; if (len > XT_BPF_MAX_NUM_INSTR) return -EINVAL; program.len = len; program.filter = insns; if (bpf_prog_create(ret, &program)) { pr_info_ratelimited("check failed: parse error\n"); return -EINVAL; } return 0; } static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) { struct bpf_prog *prog; prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); *ret = prog; return 0; } static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) { if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) return -EINVAL; *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER); return PTR_ERR_OR_ZERO(*ret); } static int bpf_mt_check(const struct xt_mtchk_param *par) { struct xt_bpf_info *info = par->matchinfo; return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); } static int bpf_mt_check_v1(const struct xt_mtchk_param *par) { struct xt_bpf_info_v1 *info = par->matchinfo; if (info->mode == XT_BPF_MODE_BYTECODE) return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); else if (info->mode == XT_BPF_MODE_FD_ELF) return __bpf_mt_check_fd(info->fd, &info->filter); else if (info->mode == XT_BPF_MODE_PATH_PINNED) return __bpf_mt_check_path(info->path, &info->filter); else return -EINVAL; } static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info *info = par->matchinfo; return bpf_prog_run(info->filter, skb); } static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb); } static void bpf_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_bpf_info *info = par->matchinfo; bpf_prog_destroy(info->filter); } static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; bpf_prog_destroy(info->filter); } static struct xt_match bpf_mt_reg[] __read_mostly = { { .name = "bpf", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check, .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), .usersize = offsetof(struct xt_bpf_info, filter), .me = THIS_MODULE, }, { .name = "bpf", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check_v1, .match = bpf_mt_v1, .destroy = bpf_mt_destroy_v1, .matchsize = sizeof(struct xt_bpf_info_v1), .usersize = offsetof(struct xt_bpf_info_v1, filter), .me = THIS_MODULE, }, }; static int __init bpf_mt_init(void) { return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } static void __exit bpf_mt_exit(void) { xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } module_init(bpf_mt_init); module_exit(bpf_mt_exit); |
5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 | // SPDX-License-Identifier: GPL-2.0-only /* * NFC hardware simulation driver * Copyright (c) 2013, Intel Corporation. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/digital.h> #define NFCSIM_ERR(d, fmt, args...) nfc_err(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_DBG(d, fmt, args...) dev_dbg(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_VERSION "0.2" #define NFCSIM_MODE_NONE 0 #define NFCSIM_MODE_INITIATOR 1 #define NFCSIM_MODE_TARGET 2 #define NFCSIM_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ NFC_DIGITAL_DRV_CAPS_TG_CRC) struct nfcsim { struct nfc_digital_dev *nfc_digital_dev; struct work_struct recv_work; struct delayed_work send_work; struct nfcsim_link *link_in; struct nfcsim_link *link_out; bool up; u8 mode; u8 rf_tech; u16 recv_timeout; nfc_digital_cmd_complete_t cb; void *arg; u8 dropframe; }; struct nfcsim_link { struct mutex lock; u8 rf_tech; u8 mode; u8 shutdown; struct sk_buff *skb; wait_queue_head_t recv_wait; u8 cond; }; static struct nfcsim_link *nfcsim_link_new(void) { struct nfcsim_link *link; link = kzalloc(sizeof(struct nfcsim_link), GFP_KERNEL); if (!link) return NULL; mutex_init(&link->lock); init_waitqueue_head(&link->recv_wait); return link; } static void nfcsim_link_free(struct nfcsim_link *link) { dev_kfree_skb(link->skb); kfree(link); } static void nfcsim_link_recv_wake(struct nfcsim_link *link) { link->cond = 1; wake_up_interruptible(&link->recv_wait); } static void nfcsim_link_set_skb(struct nfcsim_link *link, struct sk_buff *skb, u8 rf_tech, u8 mode) { mutex_lock(&link->lock); dev_kfree_skb(link->skb); link->skb = skb; link->rf_tech = rf_tech; link->mode = mode; mutex_unlock(&link->lock); } static void nfcsim_link_recv_cancel(struct nfcsim_link *link) { mutex_lock(&link->lock); link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static void nfcsim_link_shutdown(struct nfcsim_link *link) { mutex_lock(&link->lock); link->shutdown = 1; link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static struct sk_buff *nfcsim_link_recv_skb(struct nfcsim_link *link, int timeout, u8 rf_tech, u8 mode) { int rc; struct sk_buff *skb; rc = wait_event_interruptible_timeout(link->recv_wait, link->cond, msecs_to_jiffies(timeout)); mutex_lock(&link->lock); skb = link->skb; link->skb = NULL; if (!rc) { rc = -ETIMEDOUT; goto done; } if (!skb || link->rf_tech != rf_tech || link->mode == mode) { rc = -EINVAL; goto done; } if (link->shutdown) { rc = -ENODEV; goto done; } done: mutex_unlock(&link->lock); if (rc < 0) { dev_kfree_skb(skb); skb = ERR_PTR(rc); } link->cond = 0; return skb; } static void nfcsim_send_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work); /* * To effectively send data, the device just wake up its link_out which * is the link_in of the peer device. The exchanged skb has already been * stored in the dev->link_out through nfcsim_link_set_skb(). */ nfcsim_link_recv_wake(dev->link_out); } static void nfcsim_recv_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, recv_work); struct sk_buff *skb; skb = nfcsim_link_recv_skb(dev->link_in, dev->recv_timeout, dev->rf_tech, dev->mode); if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); if (!IS_ERR(skb)) dev_kfree_skb(skb); return; } dev->cb(dev->nfc_digital_dev, dev->arg, skb); } static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); u8 delay; if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); return -ENODEV; } dev->recv_timeout = timeout; dev->cb = cb; dev->arg = arg; schedule_work(&dev->recv_work); if (dev->dropframe) { NFCSIM_DBG(dev, "dropping frame (out of %d)\n", dev->dropframe); dev_kfree_skb(skb); dev->dropframe--; return 0; } if (skb) { nfcsim_link_set_skb(dev->link_out, skb, dev->rf_tech, dev->mode); /* Add random delay (between 3 and 10 ms) before sending data */ get_random_bytes(&delay, 1); delay = 3 + (delay & 0x07); schedule_delayed_work(&dev->send_work, msecs_to_jiffies(delay)); } return 0; } static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev) { const struct nfcsim *dev = nfc_digital_get_drvdata(ddev); nfcsim_link_recv_cancel(dev->link_in); } static int nfcsim_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); dev->up = on; return 0; } static int nfcsim_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_INITIATOR; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_TARGET; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, NULL, timeout, cb, arg); } static const struct nfc_digital_ops nfcsim_digital_ops = { .in_configure_hw = nfcsim_in_configure_hw, .in_send_cmd = nfcsim_in_send_cmd, .tg_listen = nfcsim_tg_listen, .tg_configure_hw = nfcsim_tg_configure_hw, .tg_send_cmd = nfcsim_tg_send_cmd, .abort_cmd = nfcsim_abort_cmd, .switch_rf = nfcsim_switch_rf, }; static struct dentry *nfcsim_debugfs_root; static void nfcsim_debugfs_init(void) { nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL); } static void nfcsim_debugfs_remove(void) { debugfs_remove_recursive(nfcsim_debugfs_root); } static void nfcsim_debugfs_init_dev(struct nfcsim *dev) { struct dentry *dev_dir; char devname[5]; /* nfcX\0 */ u32 idx; int n; if (!nfcsim_debugfs_root) { NFCSIM_ERR(dev, "nfcsim debugfs not initialized\n"); return; } idx = dev->nfc_digital_dev->nfc_dev->idx; n = snprintf(devname, sizeof(devname), "nfc%d", idx); if (n >= sizeof(devname)) { NFCSIM_ERR(dev, "Could not compute dev name for dev %d\n", idx); return; } dev_dir = debugfs_create_dir(devname, nfcsim_debugfs_root); debugfs_create_u8("dropframe", 0664, dev_dir, &dev->dropframe); } static struct nfcsim *nfcsim_device_new(struct nfcsim_link *link_in, struct nfcsim_link *link_out) { struct nfcsim *dev; int rc; dev = kzalloc(sizeof(struct nfcsim), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); INIT_DELAYED_WORK(&dev->send_work, nfcsim_send_wq); INIT_WORK(&dev->recv_work, nfcsim_recv_wq); dev->nfc_digital_dev = nfc_digital_allocate_device(&nfcsim_digital_ops, NFC_PROTO_NFC_DEP_MASK, NFCSIM_CAPABILITIES, 0, 0); if (!dev->nfc_digital_dev) { kfree(dev); return ERR_PTR(-ENOMEM); } nfc_digital_set_drvdata(dev->nfc_digital_dev, dev); dev->link_in = link_in; dev->link_out = link_out; rc = nfc_digital_register_device(dev->nfc_digital_dev); if (rc) { pr_err("Could not register digital device (%d)\n", rc); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); return ERR_PTR(rc); } nfcsim_debugfs_init_dev(dev); return dev; } static void nfcsim_device_free(struct nfcsim *dev) { nfc_digital_unregister_device(dev->nfc_digital_dev); dev->up = false; nfcsim_link_shutdown(dev->link_in); cancel_delayed_work_sync(&dev->send_work); cancel_work_sync(&dev->recv_work); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); } static struct nfcsim *dev0; static struct nfcsim *dev1; static int __init nfcsim_init(void) { struct nfcsim_link *link0, *link1; int rc; link0 = nfcsim_link_new(); link1 = nfcsim_link_new(); if (!link0 || !link1) { rc = -ENOMEM; goto exit_err; } nfcsim_debugfs_init(); dev0 = nfcsim_device_new(link0, link1); if (IS_ERR(dev0)) { rc = PTR_ERR(dev0); goto exit_err; } dev1 = nfcsim_device_new(link1, link0); if (IS_ERR(dev1)) { nfcsim_device_free(dev0); rc = PTR_ERR(dev1); goto exit_err; } pr_info("nfcsim " NFCSIM_VERSION " initialized\n"); return 0; exit_err: pr_err("Failed to initialize nfcsim driver (%d)\n", rc); if (link0) nfcsim_link_free(link0); if (link1) nfcsim_link_free(link1); return rc; } static void __exit nfcsim_exit(void) { struct nfcsim_link *link0, *link1; link0 = dev0->link_in; link1 = dev0->link_out; nfcsim_device_free(dev0); nfcsim_device_free(dev1); nfcsim_link_free(link0); nfcsim_link_free(link1); nfcsim_debugfs_remove(); } module_init(nfcsim_init); module_exit(nfcsim_exit); MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION); MODULE_VERSION(NFCSIM_VERSION); MODULE_LICENSE("GPL"); |
331 330 325 330 328 330 330 330 184 185 3 331 330 330 3 285 285 278 284 281 280 281 283 69 69 34 285 285 283 34 5 5 5 4 4 4 5 5 3 6 6 6 2 2 2 6 6 5 11 6 5 11 5 5 6 1 11 8 8 8 8 8 8 8 104 101 101 101 101 104 325 2 324 324 6 6 6 321 6 323 318 248 322 435 363 361 363 360 361 414 420 409 403 394 20 17 16 16 395 421 6 418 418 418 417 18 440 439 440 440 436 438 418 419 436 18 436 464 462 3 15 15 15 15 9 14 277 1 278 141 4 275 2 2 2 276 3 275 251 178 276 17 17 17 15 15 15 15 14 14 1 466 463 466 441 442 4 4 4 4 57 43 57 32 32 14 13 14 32 32 18 18 36 36 9 1 9 39 14 14 13 14 14 14 14 30 31 25 6 14 14 14 14 14 14 14 14 14 14 1 1 14 14 14 14 14 3 3 3 3 3 3 11 11 11 11 10 11 11 10 10 10 10 11 11 8 8 5 11 5 10 6 6 6 6 6 6 6 5 5 5 5 5 5 5 80 80 80 80 80 80 78 80 80 80 80 78 80 80 80 80 78 80 389 388 387 387 388 366 367 366 80 80 80 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 | // SPDX-License-Identifier: GPL-2.0-only /* * x_tables core - Backend for {ip,ip6,arp}_tables * * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> * * Based on existing ip_tables code which is * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/audit.h> #include <linux/user_namespace.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_arp.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter_arp/arp_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); #define XT_PCPU_BLOCK_SIZE 4096 #define XT_MAX_TABLE_SIZE (512 * 1024 * 1024) struct xt_template { struct list_head list; /* called when table is needed in the given netns */ int (*table_init)(struct net *net); struct module *me; /* A unique name... */ char name[XT_TABLE_MAXNAMELEN]; }; static struct list_head xt_templates[NFPROTO_NUMPROTO]; struct xt_pernet { struct list_head tables[NFPROTO_NUMPROTO]; }; struct compat_delta { unsigned int offset; /* offset in kernel */ int delta; /* delta in 32bit user land */ }; struct xt_af { struct mutex mutex; struct list_head match; struct list_head target; #ifdef CONFIG_NETFILTER_XTABLES_COMPAT struct mutex compat_mutex; struct compat_delta *compat_tab; unsigned int number; /* number of slots in compat_tab[] */ unsigned int cur; /* number of used slots in compat_tab[] */ #endif }; static unsigned int xt_pernet_id __read_mostly; static struct xt_af *xt __read_mostly; static const char *const xt_prefix[NFPROTO_NUMPROTO] = { [NFPROTO_UNSPEC] = "x", [NFPROTO_IPV4] = "ip", [NFPROTO_ARP] = "arp", [NFPROTO_BRIDGE] = "eb", [NFPROTO_IPV6] = "ip6", }; /* Registration hooks for targets. */ int xt_register_target(struct xt_target *target) { u_int8_t af = target->family; mutex_lock(&xt[af].mutex); list_add(&target->list, &xt[af].target); mutex_unlock(&xt[af].mutex); return 0; } EXPORT_SYMBOL(xt_register_target); void xt_unregister_target(struct xt_target *target) { u_int8_t af = target->family; mutex_lock(&xt[af].mutex); list_del(&target->list); mutex_unlock(&xt[af].mutex); } EXPORT_SYMBOL(xt_unregister_target); int xt_register_targets(struct xt_target *target, unsigned int n) { unsigned int i; int err = 0; for (i = 0; i < n; i++) { err = xt_register_target(&target[i]); if (err) goto err; } return err; err: if (i > 0) xt_unregister_targets(target, i); return err; } EXPORT_SYMBOL(xt_register_targets); void xt_unregister_targets(struct xt_target *target, unsigned int n) { while (n-- > 0) xt_unregister_target(&target[n]); } EXPORT_SYMBOL(xt_unregister_targets); int xt_register_match(struct xt_match *match) { u_int8_t af = match->family; mutex_lock(&xt[af].mutex); list_add(&match->list, &xt[af].match); mutex_unlock(&xt[af].mutex); return 0; } EXPORT_SYMBOL(xt_register_match); void xt_unregister_match(struct xt_match *match) { u_int8_t af = match->family; mutex_lock(&xt[af].mutex); list_del(&match->list); mutex_unlock(&xt[af].mutex); } EXPORT_SYMBOL(xt_unregister_match); int xt_register_matches(struct xt_match *match, unsigned int n) { unsigned int i; int err = 0; for (i = 0; i < n; i++) { err = xt_register_match(&match[i]); if (err) goto err; } return err; err: if (i > 0) xt_unregister_matches(match, i); return err; } EXPORT_SYMBOL(xt_register_matches); void xt_unregister_matches(struct xt_match *match, unsigned int n) { while (n-- > 0) xt_unregister_match(&match[n]); } EXPORT_SYMBOL(xt_unregister_matches); /* * These are weird, but module loading must not be done with mutex * held (since they will register), and we have to have a single * function to use. */ /* Find match, grabs ref. Returns ERR_PTR() on error. */ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) { struct xt_match *m; int err = -ENOENT; if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) return ERR_PTR(-EINVAL); mutex_lock(&xt[af].mutex); list_for_each_entry(m, &xt[af].match, list) { if (strcmp(m->name, name) == 0) { if (m->revision == revision) { if (try_module_get(m->me)) { mutex_unlock(&xt[af].mutex); return m; } } else err = -EPROTOTYPE; /* Found something. */ } } mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC) /* Try searching again in the family-independent list */ return xt_find_match(NFPROTO_UNSPEC, name, revision); return ERR_PTR(err); } EXPORT_SYMBOL(xt_find_match); struct xt_match * xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) { struct xt_match *match; if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) return ERR_PTR(-EINVAL); match = xt_find_match(nfproto, name, revision); if (IS_ERR(match)) { request_module("%st_%s", xt_prefix[nfproto], name); match = xt_find_match(nfproto, name, revision); } return match; } EXPORT_SYMBOL_GPL(xt_request_find_match); /* Find target, grabs ref. Returns ERR_PTR() on error. */ static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) { struct xt_target *t; int err = -ENOENT; if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) return ERR_PTR(-EINVAL); mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt[af].target, list) { if (strcmp(t->name, name) == 0) { if (t->revision == revision) { if (try_module_get(t->me)) { mutex_unlock(&xt[af].mutex); return t; } } else err = -EPROTOTYPE; /* Found something. */ } } mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC) /* Try searching again in the family-independent list */ return xt_find_target(NFPROTO_UNSPEC, name, revision); return ERR_PTR(err); } struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) { struct xt_target *target; if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) return ERR_PTR(-EINVAL); target = xt_find_target(af, name, revision); if (IS_ERR(target)) { request_module("%st_%s", xt_prefix[af], name); target = xt_find_target(af, name, revision); } return target; } EXPORT_SYMBOL_GPL(xt_request_find_target); static int xt_obj_to_user(u16 __user *psize, u16 size, void __user *pname, const char *name, u8 __user *prev, u8 rev) { if (put_user(size, psize)) return -EFAULT; if (copy_to_user(pname, name, strlen(name) + 1)) return -EFAULT; if (put_user(rev, prev)) return -EFAULT; return 0; } #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ U->u.user.name, K->u.kernel.TYPE->name, \ &U->u.user.revision, K->u.kernel.TYPE->revision) int xt_data_to_user(void __user *dst, const void *src, int usersize, int size, int aligned_size) { usersize = usersize ? : size; if (copy_to_user(dst, src, usersize)) return -EFAULT; if (usersize != aligned_size && clear_user(dst + usersize, aligned_size - usersize)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(xt_data_to_user); #define XT_DATA_TO_USER(U, K, TYPE) \ xt_data_to_user(U->data, K->data, \ K->u.kernel.TYPE->usersize, \ K->u.kernel.TYPE->TYPE##size, \ XT_ALIGN(K->u.kernel.TYPE->TYPE##size)) int xt_match_to_user(const struct xt_entry_match *m, struct xt_entry_match __user *u) { return XT_OBJ_TO_USER(u, m, match, 0) || XT_DATA_TO_USER(u, m, match); } EXPORT_SYMBOL_GPL(xt_match_to_user); int xt_target_to_user(const struct xt_entry_target *t, struct xt_entry_target __user *u) { return XT_OBJ_TO_USER(u, t, target, 0) || XT_DATA_TO_USER(u, t, target); } EXPORT_SYMBOL_GPL(xt_target_to_user); static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) { const struct xt_match *m; int have_rev = 0; mutex_lock(&xt[af].mutex); list_for_each_entry(m, &xt[af].match, list) { if (strcmp(m->name, name) == 0) { if (m->revision > *bestp) *bestp = m->revision; if (m->revision == revision) have_rev = 1; } } mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); return have_rev; } static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) { const struct xt_target *t; int have_rev = 0; mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt[af].target, list) { if (strcmp(t->name, name) == 0) { if (t->revision > *bestp) *bestp = t->revision; if (t->revision == revision) have_rev = 1; } } mutex_unlock(&xt[af].mutex); if (af != NFPROTO_UNSPEC && !have_rev) return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); return have_rev; } /* Returns true or false (if no such extension at all) */ int xt_find_revision(u8 af, const char *name, u8 revision, int target, int *err) { int have_rev, best = -1; if (target == 1) have_rev = target_revfn(af, name, revision, &best); else have_rev = match_revfn(af, name, revision, &best); /* Nothing at all? Return 0 to try loading module. */ if (best == -1) { *err = -ENOENT; return 0; } *err = best; if (!have_rev) *err = -EPROTONOSUPPORT; return 1; } EXPORT_SYMBOL_GPL(xt_find_revision); static char * textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) { static const char *const inetbr_names[] = { "PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING", "BROUTING", }; static const char *const arp_names[] = { "INPUT", "FORWARD", "OUTPUT", }; const char *const *names; unsigned int i, max; char *p = buf; bool np = false; int res; names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : ARRAY_SIZE(inetbr_names); *p = '\0'; for (i = 0; i < max; ++i) { if (!(mask & (1 << i))) continue; res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); if (res > 0) { size -= res; p += res; } np = true; } return buf; } /** * xt_check_proc_name - check that name is suitable for /proc file creation * * @name: file name candidate * @size: length of buffer * * some x_tables modules wish to create a file in /proc. * This function makes sure that the name is suitable for this * purpose, it checks that name is NUL terminated and isn't a 'special' * name, like "..". * * returns negative number on error or 0 if name is useable. */ int xt_check_proc_name(const char *name, unsigned int size) { if (name[0] == '\0') return -EINVAL; if (strnlen(name, size) == size) return -ENAMETOOLONG; if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0 || strchr(name, '/')) return -EINVAL; return 0; } EXPORT_SYMBOL(xt_check_proc_name); int xt_check_match(struct xt_mtchk_param *par, unsigned int size, u16 proto, bool inv_proto) { int ret; if (XT_ALIGN(par->match->matchsize) != size && par->match->matchsize != -1) { /* * ebt_among is exempt from centralized matchsize checking * because it uses a dynamic-size data set. */ pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n", xt_prefix[par->family], par->match->name, par->match->revision, XT_ALIGN(par->match->matchsize), size); return -EINVAL; } if (par->match->table != NULL && strcmp(par->match->table, par->table) != 0) { pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n", xt_prefix[par->family], par->match->name, par->match->table, par->table); return -EINVAL; } if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { char used[64], allow[64]; pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n", xt_prefix[par->family], par->match->name, textify_hooks(used, sizeof(used), par->hook_mask, par->family), textify_hooks(allow, sizeof(allow), par->match->hooks, par->family)); return -EINVAL; } if (par->match->proto && (par->match->proto != proto || inv_proto)) { pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n", xt_prefix[par->family], par->match->name, par->match->proto); return -EINVAL; } if (par->match->checkentry != NULL) { ret = par->match->checkentry(par); if (ret < 0) return ret; else if (ret > 0) /* Flag up potential errors. */ return -EIO; } return 0; } EXPORT_SYMBOL_GPL(xt_check_match); /** xt_check_entry_match - check that matches end before start of target * * @match: beginning of xt_entry_match * @target: beginning of this rules target (alleged end of matches) * @alignment: alignment requirement of match structures * * Validates that all matches add up to the beginning of the target, * and that each match covers at least the base structure size. * * Return: 0 on success, negative errno on failure. */ static int xt_check_entry_match(const char *match, const char *target, const size_t alignment) { const struct xt_entry_match *pos; int length = target - match; if (length == 0) /* no matches */ return 0; pos = (struct xt_entry_match *)match; do { if ((unsigned long)pos % alignment) return -EINVAL; if (length < (int)sizeof(struct xt_entry_match)) return -EINVAL; if (pos->u.match_size < sizeof(struct xt_entry_match)) return -EINVAL; if (pos->u.match_size > length) return -EINVAL; length -= pos->u.match_size; pos = ((void *)((char *)(pos) + (pos)->u.match_size)); } while (length > 0); return 0; } /** xt_check_table_hooks - check hook entry points are sane * * @info xt_table_info to check * @valid_hooks - hook entry points that we can enter from * * Validates that the hook entry and underflows points are set up. * * Return: 0 on success, negative errno on failure. */ int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks) { const char *err = "unsorted underflow"; unsigned int i, max_uflow, max_entry; bool check_hooks = false; BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow)); max_entry = 0; max_uflow = 0; for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) { if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) return -EINVAL; if (info->underflow[i] == 0xFFFFFFFF) return -EINVAL; if (check_hooks) { if (max_uflow > info->underflow[i]) goto error; if (max_uflow == info->underflow[i]) { err = "duplicate underflow"; goto error; } if (max_entry > info->hook_entry[i]) { err = "unsorted entry"; goto error; } if (max_entry == info->hook_entry[i]) { err = "duplicate entry"; goto error; } } max_entry = info->hook_entry[i]; max_uflow = info->underflow[i]; check_hooks = true; } return 0; error: pr_err_ratelimited("%s at hook %d\n", err, i); return -EINVAL; } EXPORT_SYMBOL(xt_check_table_hooks); static bool verdict_ok(int verdict) { if (verdict > 0) return true; if (verdict < 0) { int v = -verdict - 1; if (verdict == XT_RETURN) return true; switch (v) { case NF_ACCEPT: return true; case NF_DROP: return true; case NF_QUEUE: return true; default: break; } return false; } return false; } static bool error_tg_ok(unsigned int usersize, unsigned int kernsize, const char *msg, unsigned int msglen) { return usersize == kernsize && strnlen(msg, msglen) < msglen; } #ifdef CONFIG_NETFILTER_XTABLES_COMPAT int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) { struct xt_af *xp = &xt[af]; WARN_ON(!mutex_is_locked(&xt[af].compat_mutex)); if (WARN_ON(!xp->compat_tab)) return -ENOMEM; if (xp->cur >= xp->number) return -EINVAL; if (xp->cur) delta += xp->compat_tab[xp->cur - 1].delta; xp->compat_tab[xp->cur].offset = offset; xp->compat_tab[xp->cur].delta = delta; xp->cur++; return 0; } EXPORT_SYMBOL_GPL(xt_compat_add_offset); void xt_compat_flush_offsets(u_int8_t af) { WARN_ON(!mutex_is_locked(&xt[af].compat_mutex)); if (xt[af].compat_tab) { vfree(xt[af].compat_tab); xt[af].compat_tab = NULL; xt[af].number = 0; xt[af].cur = 0; } } EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); int xt_compat_calc_jump(u_int8_t af, unsigned int offset) { struct compat_delta *tmp = xt[af].compat_tab; int mid, left = 0, right = xt[af].cur - 1; while (left <= right) { mid = (left + right) >> 1; if (offset > tmp[mid].offset) left = mid + 1; else if (offset < tmp[mid].offset) right = mid - 1; else return mid ? tmp[mid - 1].delta : 0; } return left ? tmp[left - 1].delta : 0; } EXPORT_SYMBOL_GPL(xt_compat_calc_jump); int xt_compat_init_offsets(u8 af, unsigned int number) { size_t mem; WARN_ON(!mutex_is_locked(&xt[af].compat_mutex)); if (!number || number > (INT_MAX / sizeof(struct compat_delta))) return -EINVAL; if (WARN_ON(xt[af].compat_tab)) return -EINVAL; mem = sizeof(struct compat_delta) * number; if (mem > XT_MAX_TABLE_SIZE) return -ENOMEM; xt[af].compat_tab = vmalloc(mem); if (!xt[af].compat_tab) return -ENOMEM; xt[af].number = number; xt[af].cur = 0; return 0; } EXPORT_SYMBOL(xt_compat_init_offsets); int xt_compat_match_offset(const struct xt_match *match) { u_int16_t csize = match->compatsize ? : match->matchsize; return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); } EXPORT_SYMBOL_GPL(xt_compat_match_offset); void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, unsigned int *size) { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; int off = xt_compat_match_offset(match); u_int16_t msize = cm->u.user.match_size; char name[sizeof(m->u.user.name)]; m = *dstptr; memcpy(m, cm, sizeof(*cm)); if (match->compat_from_user) match->compat_from_user(m->data, cm->data); else memcpy(m->data, cm->data, msize - sizeof(*cm)); msize += off; m->u.user.match_size = msize; strscpy(name, match->name, sizeof(name)); module_put(match->me); strscpy_pad(m->u.user.name, name, sizeof(m->u.user.name)); *size += off; *dstptr += msize; } EXPORT_SYMBOL_GPL(xt_compat_match_from_user); #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ xt_data_to_user(U->data, K->data, \ K->u.kernel.TYPE->usersize, \ C_SIZE, \ COMPAT_XT_ALIGN(C_SIZE)) int xt_compat_match_to_user(const struct xt_entry_match *m, void __user **dstptr, unsigned int *size) { const struct xt_match *match = m->u.kernel.match; struct compat_xt_entry_match __user *cm = *dstptr; int off = xt_compat_match_offset(match); u_int16_t msize = m->u.user.match_size - off; if (XT_OBJ_TO_USER(cm, m, match, msize)) return -EFAULT; if (match->compat_to_user) { if (match->compat_to_user((void __user *)cm->data, m->data)) return -EFAULT; } else { if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) return -EFAULT; } *size -= off; *dstptr += msize; return 0; } EXPORT_SYMBOL_GPL(xt_compat_match_to_user); /* non-compat version may have padding after verdict */ struct compat_xt_standard_target { struct compat_xt_entry_target t; compat_uint_t verdict; }; struct compat_xt_error_target { struct compat_xt_entry_target t; char errorname[XT_FUNCTION_MAXNAMELEN]; }; int xt_compat_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset) { long size_of_base_struct = elems - (const char *)base; const struct compat_xt_entry_target *t; const char *e = base; if (target_offset < size_of_base_struct) return -EINVAL; if (target_offset + sizeof(*t) > next_offset) return -EINVAL; t = (void *)(e + target_offset); if (t->u.target_size < sizeof(*t)) return -EINVAL; if (target_offset + t->u.target_size > next_offset) return -EINVAL; if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) { const struct compat_xt_standard_target *st = (const void *)t; if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset) return -EINVAL; if (!verdict_ok(st->verdict)) return -EINVAL; } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) { const struct compat_xt_error_target *et = (const void *)t; if (!error_tg_ok(t->u.target_size, sizeof(*et), et->errorname, sizeof(et->errorname))) return -EINVAL; } /* compat_xt_entry match has less strict alignment requirements, * otherwise they are identical. In case of padding differences * we need to add compat version of xt_check_entry_match. */ BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); return xt_check_entry_match(elems, base + target_offset, __alignof__(struct compat_xt_entry_match)); } EXPORT_SYMBOL(xt_compat_check_entry_offsets); #endif /* CONFIG_NETFILTER_XTABLES_COMPAT */ /** * xt_check_entry_offsets - validate arp/ip/ip6t_entry * * @base: pointer to arp/ip/ip6t_entry * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems * @target_offset: the arp/ip/ip6_t->target_offset * @next_offset: the arp/ip/ip6_t->next_offset * * validates that target_offset and next_offset are sane and that all * match sizes (if any) align with the target offset. * * This function does not validate the targets or matches themselves, it * only tests that all the offsets and sizes are correct, that all * match structures are aligned, and that the last structure ends where * the target structure begins. * * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version. * * The arp/ip/ip6t_entry structure @base must have passed following tests: * - it must point to a valid memory location * - base to base + next_offset must be accessible, i.e. not exceed allocated * length. * * A well-formed entry looks like this: * * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry * e->elems[]-----' | | * matchsize | | * matchsize | | * | | * target_offset---------------------------------' | * next_offset---------------------------------------------------' * * elems[]: flexible array member at end of ip(6)/arpt_entry struct. * This is where matches (if any) and the target reside. * target_offset: beginning of target. * next_offset: start of the next rule; also: size of this rule. * Since targets have a minimum size, target_offset + minlen <= next_offset. * * Every match stores its size, sum of sizes must not exceed target_offset. * * Return: 0 on success, negative errno on failure. */ int xt_check_entry_offsets(const void *base, const char *elems, unsigned int target_offset, unsigned int next_offset) { long size_of_base_struct = elems - (const char *)base; const struct xt_entry_target *t; const char *e = base; /* target start is within the ip/ip6/arpt_entry struct */ if (target_offset < size_of_base_struct) return -EINVAL; if (target_offset + sizeof(*t) > next_offset) return -EINVAL; t = (void *)(e + target_offset); if (t->u.target_size < sizeof(*t)) return -EINVAL; if (target_offset + t->u.target_size > next_offset) return -EINVAL; if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) { const struct xt_standard_target *st = (const void *)t; if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset) return -EINVAL; if (!verdict_ok(st->verdict)) return -EINVAL; } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) { const struct xt_error_target *et = (const void *)t; if (!error_tg_ok(t->u.target_size, sizeof(*et), et->errorname, sizeof(et->errorname))) return -EINVAL; } return xt_check_entry_match(elems, base + target_offset, __alignof__(struct xt_entry_match)); } EXPORT_SYMBOL(xt_check_entry_offsets); /** * xt_alloc_entry_offsets - allocate array to store rule head offsets * * @size: number of entries * * Return: NULL or zeroed kmalloc'd or vmalloc'd array */ unsigned int *xt_alloc_entry_offsets(unsigned int size) { if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int)) return NULL; return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL); } EXPORT_SYMBOL(xt_alloc_entry_offsets); /** * xt_find_jump_offset - check if target is a valid jump offset * * @offsets: array containing all valid rule start offsets of a rule blob * @target: the jump target to search for * @size: entries in @offset */ bool xt_find_jump_offset(const unsigned int *offsets, unsigned int target, unsigned int size) { int m, low = 0, hi = size; while (hi > low) { m = (low + hi) / 2u; if (offsets[m] > target) hi = m; else if (offsets[m] < target) low = m + 1; else return true; } return false; } EXPORT_SYMBOL(xt_find_jump_offset); int xt_check_target(struct xt_tgchk_param *par, unsigned int size, u16 proto, bool inv_proto) { int ret; if (XT_ALIGN(par->target->targetsize) != size) { pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n", xt_prefix[par->family], par->target->name, par->target->revision, XT_ALIGN(par->target->targetsize), size); return -EINVAL; } if (par->target->table != NULL && strcmp(par->target->table, par->table) != 0) { pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n", xt_prefix[par->family], par->target->name, par->target->table, par->table); return -EINVAL; } if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { char used[64], allow[64]; pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n", xt_prefix[par->family], par->target->name, textify_hooks(used, sizeof(used), par->hook_mask, par->family), textify_hooks(allow, sizeof(allow), par->target->hooks, par->family)); return -EINVAL; } if (par->target->proto && (par->target->proto != proto || inv_proto)) { pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n", xt_prefix[par->family], par->target->name, par->target->proto); return -EINVAL; } if (par->target->checkentry != NULL) { ret = par->target->checkentry(par); if (ret < 0) return ret; else if (ret > 0) /* Flag up potential errors. */ return -EIO; } return 0; } EXPORT_SYMBOL_GPL(xt_check_target); /** * xt_copy_counters - copy counters and metadata from a sockptr_t * * @arg: src sockptr * @len: alleged size of userspace memory * @info: where to store the xt_counters_info metadata * * Copies counter meta data from @user and stores it in @info. * * vmallocs memory to hold the counters, then copies the counter data * from @user to the new memory and returns a pointer to it. * * If called from a compat syscall, @info gets converted automatically to the * 64bit representation. * * The metadata associated with the counters is stored in @info. * * Return: returns pointer that caller has to test via IS_ERR(). * If IS_ERR is false, caller has to vfree the pointer. */ void *xt_copy_counters(sockptr_t arg, unsigned int len, struct xt_counters_info *info) { size_t offset; void *mem; u64 size; #ifdef CONFIG_NETFILTER_XTABLES_COMPAT if (in_compat_syscall()) { /* structures only differ in size due to alignment */ struct compat_xt_counters_info compat_tmp; if (len <= sizeof(compat_tmp)) return ERR_PTR(-EINVAL); len -= sizeof(compat_tmp); if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0) return ERR_PTR(-EFAULT); memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; offset = sizeof(compat_tmp); } else #endif { if (len <= sizeof(*info)) return ERR_PTR(-EINVAL); len -= sizeof(*info); if (copy_from_sockptr(info, arg, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); offset = sizeof(*info); } info->name[sizeof(info->name) - 1] = '\0'; size = sizeof(struct xt_counters); size *= info->num_counters; if (size != (u64)len) return ERR_PTR(-EINVAL); mem = vmalloc(len); if (!mem) return ERR_PTR(-ENOMEM); if (copy_from_sockptr_offset(mem, arg, offset, len) == 0) return mem; vfree(mem); return ERR_PTR(-EFAULT); } EXPORT_SYMBOL_GPL(xt_copy_counters); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT int xt_compat_target_offset(const struct xt_target *target) { u_int16_t csize = target->compatsize ? : target->targetsize; return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); } EXPORT_SYMBOL_GPL(xt_compat_target_offset); void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, unsigned int *size) { const struct xt_target *target = t->u.kernel.target; struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; int off = xt_compat_target_offset(target); u_int16_t tsize = ct->u.user.target_size; char name[sizeof(t->u.user.name)]; t = *dstptr; memcpy(t, ct, sizeof(*ct)); if (target->compat_from_user) target->compat_from_user(t->data, ct->data); else unsafe_memcpy(t->data, ct->data, tsize - sizeof(*ct), /* UAPI 0-sized destination */); tsize += off; t->u.user.target_size = tsize; strscpy(name, target->name, sizeof(name)); module_put(target->me); strscpy_pad(t->u.user.name, name, sizeof(t->u.user.name)); *size += off; *dstptr += tsize; } EXPORT_SYMBOL_GPL(xt_compat_target_from_user); int xt_compat_target_to_user(const struct xt_entry_target *t, void __user **dstptr, unsigned int *size) { const struct xt_target *target = t->u.kernel.target; struct compat_xt_entry_target __user *ct = *dstptr; int off = xt_compat_target_offset(target); u_int16_t tsize = t->u.user.target_size - off; if (XT_OBJ_TO_USER(ct, t, target, tsize)) return -EFAULT; if (target->compat_to_user) { if (target->compat_to_user((void __user *)ct->data, t->data)) return -EFAULT; } else { if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) return -EFAULT; } *size -= off; *dstptr += tsize; return 0; } EXPORT_SYMBOL_GPL(xt_compat_target_to_user); #endif struct xt_table_info *xt_alloc_table_info(unsigned int size) { struct xt_table_info *info = NULL; size_t sz = sizeof(*info) + size; if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE) return NULL; info = kvmalloc(sz, GFP_KERNEL_ACCOUNT); if (!info) return NULL; memset(info, 0, sizeof(*info)); info->size = size; return info; } EXPORT_SYMBOL(xt_alloc_table_info); void xt_free_table_info(struct xt_table_info *info) { int cpu; if (info->jumpstack != NULL) { for_each_possible_cpu(cpu) kvfree(info->jumpstack[cpu]); kvfree(info->jumpstack); } kvfree(info); } EXPORT_SYMBOL(xt_free_table_info); struct xt_table *xt_find_table(struct net *net, u8 af, const char *name) { struct xt_pernet *xt_net = net_generic(net, xt_pernet_id); struct xt_table *t; mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt_net->tables[af], list) { if (strcmp(t->name, name) == 0) { mutex_unlock(&xt[af].mutex); return t; } } mutex_unlock(&xt[af].mutex); return NULL; } EXPORT_SYMBOL(xt_find_table); /* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name) { struct xt_pernet *xt_net = net_generic(net, xt_pernet_id); struct module *owner = NULL; struct xt_template *tmpl; struct xt_table *t; mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt_net->tables[af], list) if (strcmp(t->name, name) == 0 && try_module_get(t->me)) return t; /* Table doesn't exist in this netns, check larval list */ list_for_each_entry(tmpl, &xt_templates[af], list) { int err; if (strcmp(tmpl->name, name)) continue; if (!try_module_get(tmpl->me)) goto out; owner = tmpl->me; mutex_unlock(&xt[af].mutex); err = tmpl->table_init(net); if (err < 0) { module_put(owner); return ERR_PTR(err); } mutex_lock(&xt[af].mutex); break; } /* and once again: */ list_for_each_entry(t, &xt_net->tables[af], list) if (strcmp(t->name, name) == 0 && owner == t->me) return t; module_put(owner); out: mutex_unlock(&xt[af].mutex); return ERR_PTR(-ENOENT); } EXPORT_SYMBOL_GPL(xt_find_table_lock); struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, const char *name) { struct xt_table *t = xt_find_table_lock(net, af, name); #ifdef CONFIG_MODULES if (IS_ERR(t)) { int err = request_module("%stable_%s", xt_prefix[af], name); if (err < 0) return ERR_PTR(err); t = xt_find_table_lock(net, af, name); } #endif return t; } EXPORT_SYMBOL_GPL(xt_request_find_table_lock); void xt_table_unlock(struct xt_table *table) { mutex_unlock(&xt[table->af].mutex); } EXPORT_SYMBOL_GPL(xt_table_unlock); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT void xt_compat_lock(u_int8_t af) { mutex_lock(&xt[af].compat_mutex); } EXPORT_SYMBOL_GPL(xt_compat_lock); void xt_compat_unlock(u_int8_t af) { mutex_unlock(&xt[af].compat_mutex); } EXPORT_SYMBOL_GPL(xt_compat_unlock); #endif DEFINE_PER_CPU(seqcount_t, xt_recseq); EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); struct static_key xt_tee_enabled __read_mostly; EXPORT_SYMBOL_GPL(xt_tee_enabled); static int xt_jumpstack_alloc(struct xt_table_info *i) { unsigned int size; int cpu; size = sizeof(void **) * nr_cpu_ids; if (size > PAGE_SIZE) i->jumpstack = kvzalloc(size, GFP_KERNEL); else i->jumpstack = kzalloc(size, GFP_KERNEL); if (i->jumpstack == NULL) return -ENOMEM; /* ruleset without jumps -- no stack needed */ if (i->stacksize == 0) return 0; /* Jumpstack needs to be able to record two full callchains, one * from the first rule set traversal, plus one table reentrancy * via -j TEE without clobbering the callchain that brought us to * TEE target. * * This is done by allocating two jumpstacks per cpu, on reentry * the upper half of the stack is used. * * see the jumpstack setup in ipt_do_table() for more details. */ size = sizeof(void *) * i->stacksize * 2u; for_each_possible_cpu(cpu) { i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); if (i->jumpstack[cpu] == NULL) /* * Freeing will be done later on by the callers. The * chain is: xt_replace_table -> __do_replace -> * do_replace -> xt_free_table_info. */ return -ENOMEM; } return 0; } struct xt_counters *xt_counters_alloc(unsigned int counters) { struct xt_counters *mem; if (counters == 0 || counters > INT_MAX / sizeof(*mem)) return NULL; counters *= sizeof(*mem); if (counters > XT_MAX_TABLE_SIZE) return NULL; return vzalloc(counters); } EXPORT_SYMBOL(xt_counters_alloc); struct xt_table_info * xt_replace_table(struct xt_table *table, unsigned int num_counters, struct xt_table_info *newinfo, int *error) { struct xt_table_info *private; unsigned int cpu; int ret; ret = xt_jumpstack_alloc(newinfo); if (ret < 0) { *error = ret; return NULL; } /* Do the substitution. */ local_bh_disable(); private = table->private; /* Check inside lock: is the old number correct? */ if (num_counters != private->number) { pr_debug("num_counters != table->private->number (%u/%u)\n", num_counters, private->number); local_bh_enable(); *error = -EAGAIN; return NULL; } newinfo->initial_entries = private->initial_entries; /* * Ensure contents of newinfo are visible before assigning to * private. */ smp_wmb(); table->private = newinfo; /* make sure all cpus see new ->private value */ smp_mb(); /* * Even though table entries have now been swapped, other CPU's * may still be using the old entries... */ local_bh_enable(); /* ... so wait for even xt_recseq on all cpus */ for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); u32 seq = raw_read_seqcount(s); if (seq & 1) { do { cond_resched(); cpu_relax(); } while (seq == raw_read_seqcount(s)); } } audit_log_nfcfg(table->name, table->af, private->number, !private->number ? AUDIT_XT_OP_REGISTER : AUDIT_XT_OP_REPLACE, GFP_KERNEL); return private; } EXPORT_SYMBOL_GPL(xt_replace_table); struct xt_table *xt_register_table(struct net *net, const struct xt_table *input_table, struct xt_table_info *bootstrap, struct xt_table_info *newinfo) { struct xt_pernet *xt_net = net_generic(net, xt_pernet_id); struct xt_table_info *private; struct xt_table *t, *table; int ret; /* Don't add one object to multiple lists. */ table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto out; } mutex_lock(&xt[table->af].mutex); /* Don't autoload: we'd eat our tail... */ list_for_each_entry(t, &xt_net->tables[table->af], list) { if (strcmp(t->name, table->name) == 0) { ret = -EEXIST; goto unlock; } } /* Simplifies replace_table code. */ table->private = bootstrap; if (!xt_replace_table(table, 0, newinfo, &ret)) goto unlock; private = table->private; pr_debug("table->private->number = %u\n", private->number); /* save number of initial entries */ private->initial_entries = private->number; list_add(&table->list, &xt_net->tables[table->af]); mutex_unlock(&xt[table->af].mutex); return table; unlock: mutex_unlock(&xt[table->af].mutex); kfree(table); out: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(xt_register_table); void *xt_unregister_table(struct xt_table *table) { struct xt_table_info *private; mutex_lock(&xt[table->af].mutex); private = table->private; list_del(&table->list); mutex_unlock(&xt[table->af].mutex); audit_log_nfcfg(table->name, table->af, private->number, AUDIT_XT_OP_UNREGISTER, GFP_KERNEL); kfree(table->ops); kfree(table); return private; } EXPORT_SYMBOL_GPL(xt_unregister_table); #ifdef CONFIG_PROC_FS static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) { u8 af = (unsigned long)pde_data(file_inode(seq->file)); struct net *net = seq_file_net(seq); struct xt_pernet *xt_net; xt_net = net_generic(net, xt_pernet_id); mutex_lock(&xt[af].mutex); return seq_list_start(&xt_net->tables[af], *pos); } static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) { u8 af = (unsigned long)pde_data(file_inode(seq->file)); struct net *net = seq_file_net(seq); struct xt_pernet *xt_net; xt_net = net_generic(net, xt_pernet_id); return seq_list_next(v, &xt_net->tables[af], pos); } static void xt_table_seq_stop(struct seq_file *seq, void *v) { u_int8_t af = (unsigned long)pde_data(file_inode(seq->file)); mutex_unlock(&xt[af].mutex); } static int xt_table_seq_show(struct seq_file *seq, void *v) { struct xt_table *table = list_entry(v, struct xt_table, list); if (*table->name) seq_printf(seq, "%s\n", table->name); return 0; } static const struct seq_operations xt_table_seq_ops = { .start = xt_table_seq_start, .next = xt_table_seq_next, .stop = xt_table_seq_stop, .show = xt_table_seq_show, }; /* * Traverse state for ip{,6}_{tables,matches} for helping crossing * the multi-AF mutexes. */ struct nf_mttg_trav { struct list_head *head, *curr; uint8_t class; }; enum { MTTG_TRAV_INIT, MTTG_TRAV_NFP_UNSPEC, MTTG_TRAV_NFP_SPEC, MTTG_TRAV_DONE, }; static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, bool is_target) { static const uint8_t next_class[] = { [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, }; uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file)); struct nf_mttg_trav *trav = seq->private; if (ppos != NULL) ++(*ppos); switch (trav->class) { case MTTG_TRAV_INIT: trav->class = MTTG_TRAV_NFP_UNSPEC; mutex_lock(&xt[NFPROTO_UNSPEC].mutex); trav->head = trav->curr = is_target ? &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; break; case MTTG_TRAV_NFP_UNSPEC: trav->curr = trav->curr->next; if (trav->curr != trav->head) break; mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); mutex_lock(&xt[nfproto].mutex); trav->head = trav->curr = is_target ? &xt[nfproto].target : &xt[nfproto].match; trav->class = next_class[trav->class]; break; case MTTG_TRAV_NFP_SPEC: trav->curr = trav->curr->next; if (trav->curr != trav->head) break; fallthrough; default: return NULL; } return trav; } static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, bool is_target) { struct nf_mttg_trav *trav = seq->private; unsigned int j; trav->class = MTTG_TRAV_INIT; for (j = 0; j < *pos; ++j) if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) return NULL; return trav; } static void xt_mttg_seq_stop(struct seq_file *seq, void *v) { uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file)); struct nf_mttg_trav *trav = seq->private; switch (trav->class) { case MTTG_TRAV_NFP_UNSPEC: mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); break; case MTTG_TRAV_NFP_SPEC: mutex_unlock(&xt[nfproto].mutex); break; } } static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) { return xt_mttg_seq_start(seq, pos, false); } static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) { return xt_mttg_seq_next(seq, v, ppos, false); } static int xt_match_seq_show(struct seq_file *seq, void *v) { const struct nf_mttg_trav *trav = seq->private; const struct xt_match *match; switch (trav->class) { case MTTG_TRAV_NFP_UNSPEC: case MTTG_TRAV_NFP_SPEC: if (trav->curr == trav->head) return 0; match = list_entry(trav->curr, struct xt_match, list); if (*match->name) seq_printf(seq, "%s\n", match->name); } return 0; } static const struct seq_operations xt_match_seq_ops = { .start = xt_match_seq_start, .next = xt_match_seq_next, .stop = xt_mttg_seq_stop, .show = xt_match_seq_show, }; static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) { return xt_mttg_seq_start(seq, pos, true); } static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) { return xt_mttg_seq_next(seq, v, ppos, true); } static int xt_target_seq_show(struct seq_file *seq, void *v) { const struct nf_mttg_trav *trav = seq->private; const struct xt_target *target; switch (trav->class) { case MTTG_TRAV_NFP_UNSPEC: case MTTG_TRAV_NFP_SPEC: if (trav->curr == trav->head) return 0; target = list_entry(trav->curr, struct xt_target, list); if (*target->name) seq_printf(seq, "%s\n", target->name); } return 0; } static const struct seq_operations xt_target_seq_ops = { .start = xt_target_seq_start, .next = xt_target_seq_next, .stop = xt_mttg_seq_stop, .show = xt_target_seq_show, }; #define FORMAT_TABLES "_tables_names" #define FORMAT_MATCHES "_tables_matches" #define FORMAT_TARGETS "_tables_targets" #endif /* CONFIG_PROC_FS */ /** * xt_hook_ops_alloc - set up hooks for a new table * @table: table with metadata needed to set up hooks * @fn: Hook function * * This function will create the nf_hook_ops that the x_table needs * to hand to xt_hook_link_net(). */ struct nf_hook_ops * xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) { unsigned int hook_mask = table->valid_hooks; uint8_t i, num_hooks = hweight32(hook_mask); uint8_t hooknum; struct nf_hook_ops *ops; if (!num_hooks) return ERR_PTR(-EINVAL); ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); if (ops == NULL) return ERR_PTR(-ENOMEM); for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; hook_mask >>= 1, ++hooknum) { if (!(hook_mask & 1)) continue; ops[i].hook = fn; ops[i].pf = table->af; ops[i].hooknum = hooknum; ops[i].priority = table->priority; ++i; } return ops; } EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); int xt_register_template(const struct xt_table *table, int (*table_init)(struct net *net)) { int ret = -EEXIST, af = table->af; struct xt_template *t; mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt_templates[af], list) { if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0)) goto out_unlock; } ret = -ENOMEM; t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) goto out_unlock; BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name)); strscpy(t->name, table->name, sizeof(t->name)); t->table_init = table_init; t->me = table->me; list_add(&t->list, &xt_templates[af]); ret = 0; out_unlock: mutex_unlock(&xt[af].mutex); return ret; } EXPORT_SYMBOL_GPL(xt_register_template); void xt_unregister_template(const struct xt_table *table) { struct xt_template *t; int af = table->af; mutex_lock(&xt[af].mutex); list_for_each_entry(t, &xt_templates[af], list) { if (strcmp(table->name, t->name)) continue; list_del(&t->list); mutex_unlock(&xt[af].mutex); kfree(t); return; } mutex_unlock(&xt[af].mutex); WARN_ON_ONCE(1); } EXPORT_SYMBOL_GPL(xt_unregister_template); int xt_proto_init(struct net *net, u_int8_t af) { #ifdef CONFIG_PROC_FS char buf[XT_FUNCTION_MAXNAMELEN]; struct proc_dir_entry *proc; kuid_t root_uid; kgid_t root_gid; #endif if (af >= ARRAY_SIZE(xt_prefix)) return -EINVAL; #ifdef CONFIG_PROC_FS root_uid = make_kuid(net->user_ns, 0); root_gid = make_kgid(net->user_ns, 0); strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops, sizeof(struct seq_net_private), (void *)(unsigned long)af); if (!proc) goto out; if (uid_valid(root_uid) && gid_valid(root_gid)) proc_set_user(proc, root_uid, root_gid); strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); proc = proc_create_seq_private(buf, 0440, net->proc_net, &xt_match_seq_ops, sizeof(struct nf_mttg_trav), (void *)(unsigned long)af); if (!proc) goto out_remove_tables; if (uid_valid(root_uid) && gid_valid(root_gid)) proc_set_user(proc, root_uid, root_gid); strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TARGETS, sizeof(buf)); proc = proc_create_seq_private(buf, 0440, net->proc_net, &xt_target_seq_ops, sizeof(struct nf_mttg_trav), (void *)(unsigned long)af); if (!proc) goto out_remove_matches; if (uid_valid(root_uid) && gid_valid(root_gid)) proc_set_user(proc, root_uid, root_gid); #endif return 0; #ifdef CONFIG_PROC_FS out_remove_matches: strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); out_remove_tables: strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); out: return -1; #endif } EXPORT_SYMBOL_GPL(xt_proto_init); void xt_proto_fini(struct net *net, u_int8_t af) { #ifdef CONFIG_PROC_FS char buf[XT_FUNCTION_MAXNAMELEN]; strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TABLES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_TARGETS, sizeof(buf)); remove_proc_entry(buf, net->proc_net); strscpy(buf, xt_prefix[af], sizeof(buf)); strlcat(buf, FORMAT_MATCHES, sizeof(buf)); remove_proc_entry(buf, net->proc_net); #endif /*CONFIG_PROC_FS*/ } EXPORT_SYMBOL_GPL(xt_proto_fini); /** * xt_percpu_counter_alloc - allocate x_tables rule counter * * @state: pointer to xt_percpu allocation state * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct * * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then * contain the address of the real (percpu) counter. * * Rule evaluation needs to use xt_get_this_cpu_counter() helper * to fetch the real percpu counter. * * To speed up allocation and improve data locality, a 4kb block is * allocated. Freeing any counter may free an entire block, so all * counters allocated using the same state must be freed at the same * time. * * xt_percpu_counter_alloc_state contains the base address of the * allocated page and the current sub-offset. * * returns false on error. */ bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, struct xt_counters *counter) { BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); if (nr_cpu_ids <= 1) return true; if (!state->mem) { state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, XT_PCPU_BLOCK_SIZE); if (!state->mem) return false; } counter->pcnt = (__force unsigned long)(state->mem + state->off); state->off += sizeof(*counter); if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { state->mem = NULL; state->off = 0; } return true; } EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); void xt_percpu_counter_free(struct xt_counters *counters) { unsigned long pcnt = counters->pcnt; if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) free_percpu((void __percpu *)pcnt); } EXPORT_SYMBOL_GPL(xt_percpu_counter_free); static int __net_init xt_net_init(struct net *net) { struct xt_pernet *xt_net = net_generic(net, xt_pernet_id); int i; for (i = 0; i < NFPROTO_NUMPROTO; i++) INIT_LIST_HEAD(&xt_net->tables[i]); return 0; } static void __net_exit xt_net_exit(struct net *net) { struct xt_pernet *xt_net = net_generic(net, xt_pernet_id); int i; for (i = 0; i < NFPROTO_NUMPROTO; i++) WARN_ON_ONCE(!list_empty(&xt_net->tables[i])); } static struct pernet_operations xt_net_ops = { .init = xt_net_init, .exit = xt_net_exit, .id = &xt_pernet_id, .size = sizeof(struct xt_pernet), }; static int __init xt_init(void) { unsigned int i; int rv; for_each_possible_cpu(i) { seqcount_init(&per_cpu(xt_recseq, i)); } xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); if (!xt) return -ENOMEM; for (i = 0; i < NFPROTO_NUMPROTO; i++) { mutex_init(&xt[i].mutex); #ifdef CONFIG_NETFILTER_XTABLES_COMPAT mutex_init(&xt[i].compat_mutex); xt[i].compat_tab = NULL; #endif INIT_LIST_HEAD(&xt[i].target); INIT_LIST_HEAD(&xt[i].match); INIT_LIST_HEAD(&xt_templates[i]); } rv = register_pernet_subsys(&xt_net_ops); if (rv < 0) kfree(xt); return rv; } static void __exit xt_fini(void) { unregister_pernet_subsys(&xt_net_ops); kfree(xt); } module_init(xt_init); module_exit(xt_fini); |
25 16 6 15 15 3 25 25 25 25 25 25 25 25 25 25 25 25 25 25 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Red Hat, Inc. * Copyright (c) 2016-2021 Christoph Hellwig. */ #include <linux/fs.h> #include <linux/iomap.h> #include "trace.h" /* * Advance to the next range we need to map. * * If the iomap is marked IOMAP_F_STALE, it means the existing map was not fully * processed - it was aborted because the extent the iomap spanned may have been * changed during the operation. In this case, the iteration behaviour is to * remap the unprocessed range of the iter, and that means we may need to remap * even when we've made no progress (i.e. iter->processed = 0). Hence the * "finished iterating" case needs to distinguish between * (processed = 0) meaning we are done and (processed = 0 && stale) meaning we * need to remap the entire remaining range. */ static inline int iomap_iter_advance(struct iomap_iter *iter) { bool stale = iter->iomap.flags & IOMAP_F_STALE; int ret = 1; /* handle the previous iteration (if any) */ if (iter->iomap.length) { if (iter->processed < 0) return iter->processed; if (WARN_ON_ONCE(iter->processed > iomap_length(iter))) return -EIO; iter->pos += iter->processed; iter->len -= iter->processed; if (!iter->len || (!iter->processed && !stale)) ret = 0; } /* clear the per iteration state */ iter->processed = 0; memset(&iter->iomap, 0, sizeof(iter->iomap)); memset(&iter->srcmap, 0, sizeof(iter->srcmap)); return ret; } static inline void iomap_iter_done(struct iomap_iter *iter) { WARN_ON_ONCE(iter->iomap.offset > iter->pos); WARN_ON_ONCE(iter->iomap.length == 0); WARN_ON_ONCE(iter->iomap.offset + iter->iomap.length <= iter->pos); WARN_ON_ONCE(iter->iomap.flags & IOMAP_F_STALE); trace_iomap_iter_dstmap(iter->inode, &iter->iomap); if (iter->srcmap.type != IOMAP_HOLE) trace_iomap_iter_srcmap(iter->inode, &iter->srcmap); } /** * iomap_iter - iterate over a ranges in a file * @iter: iteration structue * @ops: iomap ops provided by the file system * * Iterate over filesystem-provided space mappings for the provided file range. * * This function handles cleanup of resources acquired for iteration when the * filesystem indicates there are no more space mappings, which means that this * function must be called in a loop that continues as long it returns a * positive value. If 0 or a negative value is returned, the caller must not * return to the loop body. Within a loop body, there are two ways to break out * of the loop body: leave @iter.processed unchanged, or set it to a negative * errno. */ int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops) { int ret; if (iter->iomap.length && ops->iomap_end) { ret = ops->iomap_end(iter->inode, iter->pos, iomap_length(iter), iter->processed > 0 ? iter->processed : 0, iter->flags, &iter->iomap); if (ret < 0 && !iter->processed) return ret; } trace_iomap_iter(iter, ops, _RET_IP_); ret = iomap_iter_advance(iter); if (ret <= 0) return ret; ret = ops->iomap_begin(iter->inode, iter->pos, iter->len, iter->flags, &iter->iomap, &iter->srcmap); if (ret < 0) return ret; iomap_iter_done(iter); return 1; } |
420 336 339 386 140 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 | /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * Copyright 2016 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DRM_DRV_H_ #define _DRM_DRV_H_ #include <linux/list.h> #include <linux/irqreturn.h> #include <video/nomodeset.h> #include <drm/drm_device.h> struct dmem_cgroup_region; struct drm_fb_helper; struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; struct drm_minor; struct dma_buf; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; struct drm_printer; struct sg_table; /** * enum drm_driver_feature - feature flags * * See &drm_driver.driver_features, drm_device.driver_features and * drm_core_check_feature(). */ enum drm_driver_feature { /** * @DRIVER_GEM: * * Driver use the GEM memory manager. This should be set for all modern * drivers. */ DRIVER_GEM = BIT(0), /** * @DRIVER_MODESET: * * Driver supports mode setting interfaces (KMS). */ DRIVER_MODESET = BIT(1), /** * @DRIVER_RENDER: * * Driver supports dedicated render nodes. See also the :ref:`section on * render nodes <drm_render_node>` for details. */ DRIVER_RENDER = BIT(3), /** * @DRIVER_ATOMIC: * * Driver supports the full atomic modesetting userspace API. Drivers * which only use atomic internally, but do not support the full * userspace API (e.g. not all properties converted to atomic, or * multi-plane updates are not guaranteed to be tear-free) should not * set this flag. */ DRIVER_ATOMIC = BIT(4), /** * @DRIVER_SYNCOBJ: * * Driver supports &drm_syncobj for explicit synchronization of command * submission. */ DRIVER_SYNCOBJ = BIT(5), /** * @DRIVER_SYNCOBJ_TIMELINE: * * Driver supports the timeline flavor of &drm_syncobj for explicit * synchronization of command submission. */ DRIVER_SYNCOBJ_TIMELINE = BIT(6), /** * @DRIVER_COMPUTE_ACCEL: * * Driver supports compute acceleration devices. This flag is mutually exclusive with * @DRIVER_RENDER and @DRIVER_MODESET. Devices that support both graphics and compute * acceleration should be handled by two drivers that are connected using auxiliary bus. */ DRIVER_COMPUTE_ACCEL = BIT(7), /** * @DRIVER_GEM_GPUVA: * * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), /** * @DRIVER_CURSOR_HOTSPOT: * * Driver supports and requires cursor hotspot information in the * cursor plane (e.g. cursor plane has to actually track the mouse * cursor and the clients are required to set hotspot in order for * the cursor planes to work correctly). */ DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ /** * @DRIVER_USE_AGP: * * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage * AGP resources. New drivers don't need this. */ DRIVER_USE_AGP = BIT(25), /** * @DRIVER_LEGACY: * * Denote a legacy driver using shadow attach. Do not use. */ DRIVER_LEGACY = BIT(26), /** * @DRIVER_PCI_DMA: * * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace * will be enabled. Only for legacy drivers. Do not use. */ DRIVER_PCI_DMA = BIT(27), /** * @DRIVER_SG: * * Driver can perform scatter/gather DMA, allocation and mapping of * scatter/gather buffers will be enabled. Only for legacy drivers. Do * not use. */ DRIVER_SG = BIT(28), /** * @DRIVER_HAVE_DMA: * * Driver supports DMA, the userspace DMA API will be supported. Only * for legacy drivers. Do not use. */ DRIVER_HAVE_DMA = BIT(29), /** * @DRIVER_HAVE_IRQ: * * Legacy irq support. Only for legacy drivers. Do not use. */ DRIVER_HAVE_IRQ = BIT(30), }; /** * struct drm_driver - DRM driver structure * * This structure represent the common code for a family of cards. There will be * one &struct drm_device for each card present in this family. It contains lots * of vfunc entries, and a pile of those probably should be moved to more * appropriate places like &drm_mode_config_funcs or into a new operations * structure for GEM drivers. */ struct drm_driver { /** * @load: * * Backward-compatible driver callback to complete initialization steps * after the driver is registered. For this reason, may suffer from * race conditions and its use is deprecated for new drivers. It is * therefore only supported for existing drivers not yet converted to * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for * proper and race-free way to set up a &struct drm_device. * * This is deprecated, do not use! * * Returns: * * Zero on success, non-zero value on failure. */ int (*load) (struct drm_device *, unsigned long flags); /** * @open: * * Driver callback when a new &struct drm_file is opened. Useful for * setting up driver-private data structures like buffer allocators, * execution contexts or similar things. Such driver-private resources * must be released again in @postclose. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to set up any modeset related resources * in this callback. Doing so would be a driver design bug. * * Returns: * * 0 on success, a negative error code on failure, which will be * promoted to userspace as the result of the open() system call. */ int (*open) (struct drm_device *, struct drm_file *); /** * @postclose: * * One of the driver callbacks when a new &struct drm_file is closed. * Useful for tearing down driver-private data structures allocated in * @open like buffer allocators, execution contexts or similar things. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to tear down any modeset related * resources in this callback. Doing so would be a driver design bug. */ void (*postclose) (struct drm_device *, struct drm_file *); /** * @unload: * * Reverse the effects of the driver load callback. Ideally, * the clean up performed by the driver should happen in the * reverse order of the initialization. Similarly to the load * hook, this handler is deprecated and its usage should be * dropped in favor of an open-coded teardown function at the * driver layer. See drm_dev_unregister() and drm_dev_put() * for the proper way to remove a &struct drm_device. * * The unload() hook is called right after unregistering * the device. * */ void (*unload) (struct drm_device *); /** * @release: * * Optional callback for destroying device data after the final * reference is released, i.e. the device is being destroyed. * * This is deprecated, clean up all memory allocations associated with a * &drm_device using drmm_add_action(), drmm_kmalloc() and related * managed resources functions. */ void (*release) (struct drm_device *); /** * @master_set: * * Called whenever the minor master is set. Only used by vmwgfx. */ void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, bool from_open); /** * @master_drop: * * Called whenever the minor master is dropped. Only used by vmwgfx. */ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); /** * @debugfs_init: * * Allows drivers to create driver-specific debugfs files. */ void (*debugfs_init)(struct drm_minor *minor); /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by the CMA * and SHMEM GEM helpers. Returns a GEM object on success, or an * ERR_PTR()-encoded error code otherwise. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); /** * @prime_handle_to_fd: * * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /** * @gem_prime_import: * * Import hook for GEM drivers. * * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); /** * @gem_prime_import_sg_table: * * Optional hook used by the PRIME helper functions * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). */ struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); /** * @dumb_create: * * This creates a new dumb buffer in the driver's backing storage manager (GEM, * TTM or something else entirely) and returns the resulting buffer handle. This * handle can then be wrapped up into a framebuffer modeset object. * * Note that userspace is not allowed to use such objects for render * acceleration - drivers must create their own private ioctls for such a use * case. * * Width, height and depth are specified in the &drm_mode_create_dumb * argument. The callback needs to fill the handle, pitch and size for * the created buffer. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); /** * @dumb_map_offset: * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * The default implementation is drm_gem_create_mmap_offset(). GEM based * drivers must not overwrite this. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); /** * @fbdev_probe: * * Allocates and initialize the fb_info structure for fbdev emulation. * Furthermore it also needs to allocate the DRM framebuffer used to * back the fbdev. * * This callback is mandatory for fbdev support. * * Returns: * * 0 on success ot a negative error code otherwise. */ int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, struct drm_fb_helper_surface_size *sizes); /** * @show_fdinfo: * * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. */ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f); /** @major: driver major number */ int major; /** @minor: driver minor number */ int minor; /** @patchlevel: driver patch level */ int patchlevel; /** @name: driver name */ char *name; /** @desc: driver description */ char *desc; /** * @driver_features: * Driver features, see &enum drm_driver_feature. Drivers can disable * some features on a per-instance basis using * &drm_device.driver_features. */ u32 driver_features; /** * @ioctls: * * Array of driver-private IOCTL description entries. See the chapter on * :ref:`IOCTL support in the userland interfaces * chapter<drm_driver_ioctl>` for the full details. */ const struct drm_ioctl_desc *ioctls; /** @num_ioctls: Number of entries in @ioctls. */ int num_ioctls; /** * @fops: * * File operations for the DRM device node. See the discussion in * :ref:`file operations<drm_driver_fops>` for in-depth coverage and * some examples. */ const struct file_operations *fops; }; void *__devm_drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); struct dmem_cgroup_region * drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size); /** * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance * @parent: Parent device object * @driver: DRM driver * @type: the type of the struct which contains struct &drm_device * @member: the name of the &drm_device within @type. * * This allocates and initialize a new DRM device. No device registration is done. * Call drm_dev_register() to advertice the device to user space and register it * with other core subsystems. This should be done last in the device * initialization sequence to make sure userspace can't access an inconsistent * state. * * The initial ref-count of the object is 1. Use drm_dev_get() and * drm_dev_put() to take and drop further ref-counts. * * It is recommended that drivers embed &struct drm_device into their own device * structure. * * Note that this manages the lifetime of the resulting &drm_device * automatically using devres. The DRM device initialized with this function is * automatically put on driver detach using drm_dev_put(). * * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. */ #define devm_drm_dev_alloc(parent, driver, type, member) \ ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ offsetof(type, member))) struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); void drm_dev_get(struct drm_device *dev); void drm_dev_put(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); bool drm_dev_enter(struct drm_device *dev, int *idx); void drm_dev_exit(int idx); void drm_dev_unplug(struct drm_device *dev); /** * drm_dev_is_unplugged - is a DRM device unplugged * @dev: DRM device * * This function can be called to check whether a hotpluggable is unplugged. * Unplugging itself is singalled through drm_dev_unplug(). If a device is * unplugged, these two functions guarantee that any store before calling * drm_dev_unplug() is visible to callers of this function after it completes * * WARNING: This function fundamentally races against drm_dev_unplug(). It is * recommended that drivers instead use the underlying drm_dev_enter() and * drm_dev_exit() function pairs. */ static inline bool drm_dev_is_unplugged(struct drm_device *dev) { int idx; if (drm_dev_enter(dev, &idx)) { drm_dev_exit(idx); return false; } return true; } /** * drm_core_check_all_features - check driver feature flags mask * @dev: DRM device to check * @features: feature flag(s) mask * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if all features in the @features mask are supported, false * otherwise. */ static inline bool drm_core_check_all_features(const struct drm_device *dev, u32 features) { u32 supported = dev->driver->driver_features & dev->driver_features; return features && (supported & features) == features; } /** * drm_core_check_feature - check driver feature flags * @dev: DRM device to check * @feature: feature flag * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if the @feature is supported, false otherwise. */ static inline bool drm_core_check_feature(const struct drm_device *dev, enum drm_driver_feature feature) { return drm_core_check_all_features(dev, feature); } /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() * @dev: DRM device * * This check is useful if drivers do not have DRIVER_ATOMIC set but * have atomic modesetting internally implemented. */ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } /* TODO: Inline drm_firmware_drivers_only() in all its callers. */ static inline bool drm_firmware_drivers_only(void) { return video_firmware_drivers_only(); } #if defined(CONFIG_DEBUG_FS) void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root); #else static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) { } #endif #endif |
3 3 2 5 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 | // SPDX-License-Identifier: GPL-2.0-or-later /* * connector.c * * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> * All rights reserved. */ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/list.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <linux/moduleparam.h> #include <linux/connector.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <net/sock.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR); static struct cn_dev cdev; static int cn_already_initialized; /* * Sends mult (multiple) cn_msg at a time. * * msg->seq and msg->ack are used to determine message genealogy. * When someone sends message it puts there locally unique sequence * and random acknowledge numbers. Sequence number may be copied into * nlmsghdr->nlmsg_seq too. * * Sequence number is incremented with each message to be sent. * * If we expect a reply to our message then the sequence number in * received message MUST be the same as in original message, and * acknowledge number MUST be the same + 1. * * If we receive a message and its sequence number is not equal to the * one we are expecting then it is a new message. * * If we receive a message and its sequence number is the same as one * we are expecting but it's acknowledgement number is not equal to * the acknowledgement number in the original message + 1, then it is * a new message. * * If msg->len != len, then additional cn_msg messages are expected following * the first msg. * * The message is sent to, the portid if given, the group if given, both if * both, or if both are zero then the group is looked up and sent there. */ int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group, gfp_t gfp_mask, netlink_filter_fn filter, void *filter_data) { struct cn_callback_entry *__cbq; unsigned int size; struct sk_buff *skb; struct nlmsghdr *nlh; struct cn_msg *data; struct cn_dev *dev = &cdev; u32 group = 0; int found = 0; if (portid || __group) { group = __group; } else { spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { if (cn_cb_equal(&__cbq->id.id, &msg->id)) { found = 1; group = __cbq->group; break; } } spin_unlock_bh(&dev->cbdev->queue_lock); if (!found) return -ENODEV; } if (!portid && !netlink_has_listeners(dev->nls, group)) return -ESRCH; size = sizeof(*msg) + len; skb = nlmsg_new(size, gfp_mask); if (!skb) return -ENOMEM; nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0); if (!nlh) { kfree_skb(skb); return -EMSGSIZE; } data = nlmsg_data(nlh); memcpy(data, msg, size); NETLINK_CB(skb).dst_group = group; if (group) return netlink_broadcast_filtered(dev->nls, skb, portid, group, gfp_mask, filter, (void *)filter_data); return netlink_unicast(dev->nls, skb, portid, !gfpflags_allow_blocking(gfp_mask)); } EXPORT_SYMBOL_GPL(cn_netlink_send_mult); /* same as cn_netlink_send_mult except msg->len is used for len */ int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, gfp_t gfp_mask) { return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, NULL, NULL); } EXPORT_SYMBOL_GPL(cn_netlink_send); /* * Callback helper - queues work and setup destructor for given data. */ static int cn_call_callback(struct sk_buff *skb) { struct nlmsghdr *nlh; struct cn_callback_entry *i, *cbq = NULL; struct cn_dev *dev = &cdev; struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb)); struct netlink_skb_parms *nsp = &NETLINK_CB(skb); int err = -ENODEV; /* verify msg->len is within skb */ nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len) return -EINVAL; spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) { if (cn_cb_equal(&i->id.id, &msg->id)) { refcount_inc(&i->refcnt); cbq = i; break; } } spin_unlock_bh(&dev->cbdev->queue_lock); if (cbq != NULL) { cbq->callback(msg, nsp); kfree_skb(skb); cn_queue_release_callback(cbq); err = 0; } return err; } /* * Allow non-root access for NETLINK_CONNECTOR family having CN_IDX_PROC * multicast group. */ static int cn_bind(struct net *net, int group) { unsigned long groups = (unsigned long) group; if (ns_capable(net->user_ns, CAP_NET_ADMIN)) return 0; if (test_bit(CN_IDX_PROC - 1, &groups)) return 0; return -EPERM; } static void cn_release(struct sock *sk, unsigned long *groups) { if (groups && test_bit(CN_IDX_PROC - 1, groups)) { kfree(sk->sk_user_data); sk->sk_user_data = NULL; } } /* * Main netlink receiving function. * * It checks skb, netlink header and msg sizes, and calls callback helper. */ static void cn_rx_skb(struct sk_buff *skb) { struct nlmsghdr *nlh; int len, err; if (skb->len >= NLMSG_HDRLEN) { nlh = nlmsg_hdr(skb); len = nlmsg_len(nlh); if (len < (int)sizeof(struct cn_msg) || skb->len < nlh->nlmsg_len || len > CONNECTOR_MAX_MSG_SIZE) return; err = cn_call_callback(skb_get(skb)); if (err < 0) kfree_skb(skb); } } /* * Callback add routing - adds callback with given ID and name. * If there is registered callback with the same ID it will not be added. * * May sleep. */ int cn_add_callback(const struct cb_id *id, const char *name, void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) { struct cn_dev *dev = &cdev; if (!cn_already_initialized) return -EAGAIN; return cn_queue_add_callback(dev->cbdev, name, id, callback); } EXPORT_SYMBOL_GPL(cn_add_callback); /* * Callback remove routing - removes callback * with given ID. * If there is no registered callback with given * ID nothing happens. * * May sleep while waiting for reference counter to become zero. */ void cn_del_callback(const struct cb_id *id) { struct cn_dev *dev = &cdev; cn_queue_del_callback(dev->cbdev, id); } EXPORT_SYMBOL_GPL(cn_del_callback); static int __maybe_unused cn_proc_show(struct seq_file *m, void *v) { struct cn_queue_dev *dev = cdev.cbdev; struct cn_callback_entry *cbq; seq_printf(m, "Name ID\n"); spin_lock_bh(&dev->queue_lock); list_for_each_entry(cbq, &dev->queue_list, callback_entry) { seq_printf(m, "%-15s %u:%u\n", cbq->id.name, cbq->id.id.idx, cbq->id.id.val); } spin_unlock_bh(&dev->queue_lock); return 0; } static int cn_init(void) { struct cn_dev *dev = &cdev; struct netlink_kernel_cfg cfg = { .groups = CN_NETLINK_USERS + 0xf, .input = cn_rx_skb, .flags = NL_CFG_F_NONROOT_RECV, .bind = cn_bind, .release = cn_release, }; dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg); if (!dev->nls) return -EIO; dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls); if (!dev->cbdev) { netlink_kernel_release(dev->nls); return -EINVAL; } cn_already_initialized = 1; proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show); return 0; } static void cn_fini(void) { struct cn_dev *dev = &cdev; cn_already_initialized = 0; remove_proc_entry("connector", init_net.proc_net); cn_queue_free_dev(dev->cbdev); netlink_kernel_release(dev->nls); } subsys_initcall(cn_init); module_exit(cn_fini); |
25 25 25 25 25 25 25 25 13 13 13 7 6 17 17 17 14 6 6 17 3 14 14 14 14 14 3 3 14 13 13 13 7 11 13 11 11 5 5 5 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 | // SPDX-License-Identifier: GPL-2.0-only /* * dma-fence-util: misc functions for dma_fence objects * * Copyright (C) 2022 Advanced Micro Devices, Inc. * Authors: * Christian König <christian.koenig@amd.com> */ #include <linux/dma-fence.h> #include <linux/dma-fence-array.h> #include <linux/dma-fence-chain.h> #include <linux/dma-fence-unwrap.h> #include <linux/slab.h> #include <linux/sort.h> /* Internal helper to start new array iteration, don't use directly */ static struct dma_fence * __dma_fence_unwrap_array(struct dma_fence_unwrap *cursor) { cursor->array = dma_fence_chain_contained(cursor->chain); cursor->index = 0; return dma_fence_array_first(cursor->array); } /** * dma_fence_unwrap_first - return the first fence from fence containers * @head: the entrypoint into the containers * @cursor: current position inside the containers * * Unwraps potential dma_fence_chain/dma_fence_array containers and return the * first fence. */ struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor) { cursor->chain = dma_fence_get(head); return __dma_fence_unwrap_array(cursor); } EXPORT_SYMBOL_GPL(dma_fence_unwrap_first); /** * dma_fence_unwrap_next - return the next fence from a fence containers * @cursor: current position inside the containers * * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return * the next fence from them. */ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) { struct dma_fence *tmp; ++cursor->index; tmp = dma_fence_array_next(cursor->array, cursor->index); if (tmp) return tmp; cursor->chain = dma_fence_chain_walk(cursor->chain); return __dma_fence_unwrap_array(cursor); } EXPORT_SYMBOL_GPL(dma_fence_unwrap_next); static int fence_cmp(const void *_a, const void *_b) { struct dma_fence *a = *(struct dma_fence **)_a; struct dma_fence *b = *(struct dma_fence **)_b; if (a->context < b->context) return -1; else if (a->context > b->context) return 1; if (dma_fence_is_later(b, a)) return 1; else if (dma_fence_is_later(a, b)) return -1; return 0; } /* Implementation for the dma_fence_merge() marco, don't use directly */ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, struct dma_fence **fences, struct dma_fence_unwrap *iter) { struct dma_fence_array *result; struct dma_fence *tmp, **array; ktime_t timestamp; int i, j, count; count = 0; timestamp = ns_to_ktime(0); for (i = 0; i < num_fences; ++i) { dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { ++count; } else { ktime_t t = dma_fence_timestamp(tmp); if (ktime_after(t, timestamp)) timestamp = t; } } } /* * If we couldn't find a pending fence just return a private signaled * fence with the timestamp of the last signaled one. */ if (count == 0) return dma_fence_allocate_private_stub(timestamp); array = kmalloc_array(count, sizeof(*array), GFP_KERNEL); if (!array) return NULL; count = 0; for (i = 0; i < num_fences; ++i) { dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { array[count++] = dma_fence_get(tmp); } else { ktime_t t = dma_fence_timestamp(tmp); if (ktime_after(t, timestamp)) timestamp = t; } } } if (count == 0 || count == 1) goto return_fastpath; sort(array, count, sizeof(*array), fence_cmp, NULL); /* * Only keep the most recent fence for each context. */ j = 0; for (i = 1; i < count; i++) { if (array[i]->context == array[j]->context) dma_fence_put(array[i]); else array[++j] = array[i]; } count = ++j; if (count > 1) { result = dma_fence_array_create(count, array, dma_fence_context_alloc(1), 1, false); if (!result) { for (i = 0; i < count; i++) dma_fence_put(array[i]); tmp = NULL; goto return_tmp; } return &result->base; } return_fastpath: if (count == 0) tmp = dma_fence_allocate_private_stub(timestamp); else tmp = array[0]; return_tmp: kfree(array); return tmp; } EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge); |
4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 | // SPDX-License-Identifier: GPL-2.0-only /* * Generic GPIO card-detect helper * * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> */ #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/mmc/host.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/slab.h> #include "slot-gpio.h" struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; irq_handler_t cd_gpio_isr; char *ro_label; char *cd_label; u32 cd_debounce_delay_ms; int cd_irq; }; static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) { /* Schedule a card detection after a debounce timeout */ struct mmc_host *host = dev_id; struct mmc_gpio *ctx = host->slot.handler_priv; host->trigger_card_event = true; mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms)); return IRQ_HANDLED; } int mmc_gpio_alloc(struct mmc_host *host) { const char *devname = dev_name(host->parent); struct mmc_gpio *ctx; ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->cd_debounce_delay_ms = 200; ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname); if (!ctx->cd_label) return -ENOMEM; ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname); if (!ctx->ro_label) return -ENOMEM; ctx->cd_irq = -EINVAL; host->slot.handler_priv = ctx; host->slot.cd_irq = -EINVAL; return 0; } void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq) { struct mmc_gpio *ctx = host->slot.handler_priv; if (!ctx || irq < 0) return; ctx->cd_irq = irq; } EXPORT_SYMBOL(mmc_gpio_set_cd_irq); int mmc_gpio_get_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int cansleep; if (!ctx || !ctx->ro_gpio) return -ENOSYS; cansleep = gpiod_cansleep(ctx->ro_gpio); return cansleep ? gpiod_get_value_cansleep(ctx->ro_gpio) : gpiod_get_value(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); int mmc_gpio_get_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int cansleep; if (!ctx || !ctx->cd_gpio) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); } EXPORT_SYMBOL(mmc_gpio_get_cd); void mmc_gpiod_request_cd_irq(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; int irq = -EINVAL; int ret; if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) return; /* * Do not use IRQ if the platform prefers to poll, e.g., because that * IRQ number is already used by another unit and cannot be shared. */ if (ctx->cd_irq >= 0) irq = ctx->cd_irq; else if (!(host->caps & MMC_CAP_NEEDS_POLL)) irq = gpiod_to_irq(ctx->cd_gpio); if (irq >= 0) { if (!ctx->cd_gpio_isr) ctx->cd_gpio_isr = mmc_gpio_cd_irqt; ret = devm_request_threaded_irq(host->parent, irq, NULL, ctx->cd_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, ctx->cd_label, host); if (ret < 0) irq = ret; } host->slot.cd_irq = irq; if (irq < 0) host->caps |= MMC_CAP_NEEDS_POLL; } EXPORT_SYMBOL(mmc_gpiod_request_cd_irq); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on) { int ret = 0; if (!(host->caps & MMC_CAP_CD_WAKE) || host->slot.cd_irq < 0 || on == host->slot.cd_wake_enabled) return 0; if (on) { ret = enable_irq_wake(host->slot.cd_irq); host->slot.cd_wake_enabled = !ret; } else { disable_irq_wake(host->slot.cd_irq); host->slot.cd_wake_enabled = false; } return ret; } EXPORT_SYMBOL(mmc_gpio_set_cd_wake); /* Register an alternate interrupt service routine for * the card-detect GPIO. */ void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr) { struct mmc_gpio *ctx = host->slot.handler_priv; WARN_ON(ctx->cd_gpio_isr); ctx->cd_gpio_isr = isr; } EXPORT_SYMBOL(mmc_gpio_set_cd_isr); /** * mmc_gpiod_request_cd - request a gpio descriptor for card-detection * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). * * Returns zero on success, else an error. */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; int ret; desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); if (IS_ERR(desc)) return PTR_ERR(desc); /* Update default label if no con_id provided */ if (!con_id) gpiod_set_consumer_name(desc, ctx->cd_label); if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) ctx->cd_debounce_delay_ms = debounce / 1000; } /* override forces default (active-low) polarity ... */ if (override_active_level && !gpiod_is_active_low(desc)) gpiod_toggle_active_low(desc); /* ... or active-high */ if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) gpiod_toggle_active_low(desc); ctx->cd_gpio = desc; return 0; } EXPORT_SYMBOL(mmc_gpiod_request_cd); /** * mmc_gpiod_set_cd_config - set config for card-detection GPIO * @host: mmc host * @config: Generic pinconf config (from pinconf_to_config_packed()) * * This can be used by mmc host drivers to fixup a card-detection GPIO's config * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor * through mmc_gpiod_request_cd(). * * Returns: * 0 on success, or a negative errno value on error. */ int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config) { struct mmc_gpio *ctx = host->slot.handler_priv; return gpiod_set_config(ctx->cd_gpio, config); } EXPORT_SYMBOL(mmc_gpiod_set_cd_config); bool mmc_can_gpio_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; return ctx->cd_gpio ? true : false; } EXPORT_SYMBOL(mmc_can_gpio_cd); /** * mmc_gpiod_request_ro - request a gpio descriptor for write protection * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; int ret; desc = devm_gpiod_get_index(host->parent, con_id, idx, GPIOD_IN); if (IS_ERR(desc)) return PTR_ERR(desc); /* Update default label if no con_id provided */ if (!con_id) gpiod_set_consumer_name(desc, ctx->ro_label); if (debounce) { ret = gpiod_set_debounce(desc, debounce); if (ret < 0) return ret; } if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; return 0; } EXPORT_SYMBOL(mmc_gpiod_request_ro); bool mmc_can_gpio_ro(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; return ctx->ro_gpio ? true : false; } EXPORT_SYMBOL(mmc_can_gpio_ro); |
1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 | // SPDX-License-Identifier: GPL-2.0-only /* Industrial I/O event handling * * Copyright (c) 2008 Jonathan Cameron * * Based on elements of hwmon and input subsystems. */ #include <linux/anon_inodes.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/iio/iio.h> #include <linux/iio/iio-opaque.h> #include "iio_core.h" #include <linux/iio/sysfs.h> #include <linux/iio/events.h> /** * struct iio_event_interface - chrdev interface for an event line * @wait: wait queue to allow blocking reads of events * @det_events: list of detected events * @dev_attr_list: list of event interface sysfs attribute * @flags: file operations related flags including busy flag. * @group: event interface sysfs attribute group * @read_lock: lock to protect kfifo read operations * @ioctl_handler: handler for event ioctl() calls */ struct iio_event_interface { wait_queue_head_t wait; DECLARE_KFIFO(det_events, struct iio_event_data, 16); struct list_head dev_attr_list; unsigned long flags; struct attribute_group group; struct mutex read_lock; struct iio_ioctl_handler ioctl_handler; }; bool iio_event_enabled(const struct iio_event_interface *ev_int) { return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags); } /** * iio_push_event() - try to add event to the list for userspace reading * @indio_dev: IIO device structure * @ev_code: What event * @timestamp: When the event occurred * * Note: The caller must make sure that this function is not running * concurrently for the same indio_dev more than once. * * This function may be safely used as soon as a valid reference to iio_dev has * been obtained via iio_device_alloc(), but any events that are submitted * before iio_device_register() has successfully completed will be silently * discarded. **/ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; struct iio_event_data ev; int copied; if (!ev_int) return 0; /* Does anyone care? */ if (iio_event_enabled(ev_int)) { ev.id = ev_code; ev.timestamp = timestamp; copied = kfifo_put(&ev_int->det_events, ev); if (copied != 0) wake_up_poll(&ev_int->wait, EPOLLIN); } return 0; } EXPORT_SYMBOL(iio_push_event); /** * iio_event_poll() - poll the event queue to find out if it has data * @filep: File structure pointer to identify the device * @wait: Poll table pointer to add the wait queue on * * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading * or a negative error code on failure */ static __poll_t iio_event_poll(struct file *filep, struct poll_table_struct *wait) { struct iio_dev *indio_dev = filep->private_data; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; __poll_t events = 0; if (!indio_dev->info) return events; poll_wait(filep, &ev_int->wait, wait); if (!kfifo_is_empty(&ev_int->det_events)) events = EPOLLIN | EPOLLRDNORM; return events; } static ssize_t iio_event_chrdev_read(struct file *filep, char __user *buf, size_t count, loff_t *f_ps) { struct iio_dev *indio_dev = filep->private_data; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; unsigned int copied; int ret; if (!indio_dev->info) return -ENODEV; if (count < sizeof(struct iio_event_data)) return -EINVAL; do { if (kfifo_is_empty(&ev_int->det_events)) { if (filep->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(ev_int->wait, !kfifo_is_empty(&ev_int->det_events) || indio_dev->info == NULL); if (ret) return ret; if (indio_dev->info == NULL) return -ENODEV; } if (mutex_lock_interruptible(&ev_int->read_lock)) return -ERESTARTSYS; ret = kfifo_to_user(&ev_int->det_events, buf, count, &copied); mutex_unlock(&ev_int->read_lock); if (ret) return ret; /* * If we couldn't read anything from the fifo (a different * thread might have been faster) we either return -EAGAIN if * the file descriptor is non-blocking, otherwise we go back to * sleep and wait for more data to arrive. */ if (copied == 0 && (filep->f_flags & O_NONBLOCK)) return -EAGAIN; } while (copied == 0); return copied; } static int iio_event_chrdev_release(struct inode *inode, struct file *filep) { struct iio_dev *indio_dev = filep->private_data; struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); iio_device_put(indio_dev); return 0; } static const struct file_operations iio_event_chrdev_fileops = { .read = iio_event_chrdev_read, .poll = iio_event_poll, .release = iio_event_chrdev_release, .owner = THIS_MODULE, .llseek = noop_llseek, }; static int iio_event_getfd(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; int fd; if (ev_int == NULL) return -ENODEV; fd = mutex_lock_interruptible(&iio_dev_opaque->mlock); if (fd) return fd; if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) { fd = -EBUSY; goto unlock; } iio_device_get(indio_dev); fd = anon_inode_getfd("iio:event", &iio_event_chrdev_fileops, indio_dev, O_RDONLY | O_CLOEXEC); if (fd < 0) { clear_bit(IIO_BUSY_BIT_POS, &ev_int->flags); iio_device_put(indio_dev); } else { kfifo_reset_out(&ev_int->det_events); } unlock: mutex_unlock(&iio_dev_opaque->mlock); return fd; } static const char * const iio_ev_type_text[] = { [IIO_EV_TYPE_THRESH] = "thresh", [IIO_EV_TYPE_MAG] = "mag", [IIO_EV_TYPE_ROC] = "roc", [IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive", [IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive", [IIO_EV_TYPE_CHANGE] = "change", [IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced", [IIO_EV_TYPE_GESTURE] = "gesture", }; static const char * const iio_ev_dir_text[] = { [IIO_EV_DIR_EITHER] = "either", [IIO_EV_DIR_RISING] = "rising", [IIO_EV_DIR_FALLING] = "falling", [IIO_EV_DIR_SINGLETAP] = "singletap", [IIO_EV_DIR_DOUBLETAP] = "doubletap", }; static const char * const iio_ev_info_text[] = { [IIO_EV_INFO_ENABLE] = "en", [IIO_EV_INFO_VALUE] = "value", [IIO_EV_INFO_HYSTERESIS] = "hysteresis", [IIO_EV_INFO_PERIOD] = "period", [IIO_EV_INFO_HIGH_PASS_FILTER_3DB] = "high_pass_filter_3db", [IIO_EV_INFO_LOW_PASS_FILTER_3DB] = "low_pass_filter_3db", [IIO_EV_INFO_TIMEOUT] = "timeout", [IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout", [IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay", [IIO_EV_INFO_RUNNING_PERIOD] = "runningperiod", [IIO_EV_INFO_RUNNING_COUNT] = "runningcount", }; static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr) { return attr->c->event_spec[attr->address & 0xffff].dir; } static enum iio_event_type iio_ev_attr_type(struct iio_dev_attr *attr) { return attr->c->event_spec[attr->address & 0xffff].type; } static enum iio_event_info iio_ev_attr_info(struct iio_dev_attr *attr) { return (attr->address >> 16) & 0xffff; } static ssize_t iio_ev_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; bool val; ret = kstrtobool(buf, &val); if (ret < 0) return ret; if (!indio_dev->info->write_event_config) return -EINVAL; ret = indio_dev->info->write_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), val); return (ret < 0) ? ret : len; } static ssize_t iio_ev_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val; if (!indio_dev->info->read_event_config) return -EINVAL; val = indio_dev->info->read_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr)); if (val < 0) return val; else return sysfs_emit(buf, "%d\n", val); } static ssize_t iio_ev_value_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, val2, val_arr[2]; int ret; if (!indio_dev->info->read_event_value) return -EINVAL; ret = indio_dev->info->read_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), &val, &val2); if (ret < 0) return ret; val_arr[0] = val; val_arr[1] = val2; return iio_format_value(buf, ret, 2, val_arr); } static ssize_t iio_ev_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val, val2; int ret; if (!indio_dev->info->write_event_value) return -EINVAL; ret = iio_str_to_fixpoint(buf, 100000, &val, &val2); if (ret) return ret; ret = indio_dev->info->write_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), val, val2); if (ret < 0) return ret; return len; } static ssize_t iio_ev_label_show(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); if (indio_dev->info->read_event_label) return indio_dev->info->read_event_label(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), buf); return -EINVAL; } static int iio_device_add_event(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int spec_index, enum iio_event_type type, enum iio_event_direction dir, enum iio_shared_by shared_by, const unsigned long *mask) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf); ssize_t (*store)(struct device *dev, struct device_attribute *attr, const char *buf, size_t len); unsigned int attrcount = 0; unsigned int i; char *postfix; int ret; for_each_set_bit(i, mask, sizeof(*mask)*8) { if (i >= ARRAY_SIZE(iio_ev_info_text)) return -EINVAL; if (dir != IIO_EV_DIR_NONE) postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", iio_ev_type_text[type], iio_ev_dir_text[dir], iio_ev_info_text[i]); else postfix = kasprintf(GFP_KERNEL, "%s_%s", iio_ev_type_text[type], iio_ev_info_text[i]); if (postfix == NULL) return -ENOMEM; if (i == IIO_EV_INFO_ENABLE) { show = iio_ev_state_show; store = iio_ev_state_store; } else { show = iio_ev_value_show; store = iio_ev_value_store; } ret = __iio_add_chan_devattr(postfix, chan, show, store, (i << 16) | spec_index, shared_by, &indio_dev->dev, NULL, &iio_dev_opaque->event_interface->dev_attr_list); kfree(postfix); if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) continue; if (ret) return ret; attrcount++; } return attrcount; } static int iio_device_add_event_label(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int spec_index, enum iio_event_type type, enum iio_event_direction dir) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); char *postfix; int ret; if (!indio_dev->info->read_event_label) return 0; if (dir != IIO_EV_DIR_NONE) postfix = kasprintf(GFP_KERNEL, "%s_%s_label", iio_ev_type_text[type], iio_ev_dir_text[dir]); else postfix = kasprintf(GFP_KERNEL, "%s_label", iio_ev_type_text[type]); if (postfix == NULL) return -ENOMEM; ret = __iio_add_chan_devattr(postfix, chan, &iio_ev_label_show, NULL, spec_index, IIO_SEPARATE, &indio_dev->dev, NULL, &iio_dev_opaque->event_interface->dev_attr_list); kfree(postfix); if (ret < 0) return ret; return 1; } static int iio_device_add_event_sysfs(struct iio_dev *indio_dev, struct iio_chan_spec const *chan) { int ret = 0, i, attrcount = 0; enum iio_event_direction dir; enum iio_event_type type; for (i = 0; i < chan->num_event_specs; i++) { type = chan->event_spec[i].type; dir = chan->event_spec[i].dir; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SEPARATE, &chan->event_spec[i].mask_separate); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_TYPE, &chan->event_spec[i].mask_shared_by_type); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_DIR, &chan->event_spec[i].mask_shared_by_dir); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event(indio_dev, chan, i, type, dir, IIO_SHARED_BY_ALL, &chan->event_spec[i].mask_shared_by_all); if (ret < 0) return ret; attrcount += ret; ret = iio_device_add_event_label(indio_dev, chan, i, type, dir); if (ret < 0) return ret; attrcount += ret; } ret = attrcount; return ret; } static inline int __iio_add_event_config_attrs(struct iio_dev *indio_dev) { int j, ret, attrcount = 0; /* Dynamically created from the channels array */ for (j = 0; j < indio_dev->num_channels; j++) { ret = iio_device_add_event_sysfs(indio_dev, &indio_dev->channels[j]); if (ret < 0) return ret; attrcount += ret; } return attrcount; } static bool iio_check_for_dynamic_events(struct iio_dev *indio_dev) { int j; for (j = 0; j < indio_dev->num_channels; j++) { if (indio_dev->channels[j].num_event_specs != 0) return true; } return false; } static void iio_setup_ev_int(struct iio_event_interface *ev_int) { INIT_KFIFO(ev_int->det_events); init_waitqueue_head(&ev_int->wait); mutex_init(&ev_int->read_lock); } static long iio_event_ioctl(struct iio_dev *indio_dev, struct file *filp, unsigned int cmd, unsigned long arg) { int __user *ip = (int __user *)arg; int fd; if (cmd == IIO_GET_EVENT_FD_IOCTL) { fd = iio_event_getfd(indio_dev); if (fd < 0) return fd; if (copy_to_user(ip, &fd, sizeof(fd))) return -EFAULT; return 0; } return IIO_IOCTL_UNHANDLED; } static const char *iio_event_group_name = "events"; int iio_device_register_eventset(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int; struct iio_dev_attr *p; int ret = 0, attrcount_orig = 0, attrcount, attrn; struct attribute **attr; if (!(indio_dev->info->event_attrs || iio_check_for_dynamic_events(indio_dev))) return 0; ev_int = kzalloc(sizeof(*ev_int), GFP_KERNEL); if (!ev_int) return -ENOMEM; iio_dev_opaque->event_interface = ev_int; INIT_LIST_HEAD(&ev_int->dev_attr_list); iio_setup_ev_int(ev_int); if (indio_dev->info->event_attrs != NULL) { attr = indio_dev->info->event_attrs->attrs; while (*attr++ != NULL) attrcount_orig++; } attrcount = attrcount_orig; if (indio_dev->channels) { ret = __iio_add_event_config_attrs(indio_dev); if (ret < 0) goto error_free_setup_event_lines; attrcount += ret; } ev_int->group.name = iio_event_group_name; ev_int->group.attrs = kcalloc(attrcount + 1, sizeof(ev_int->group.attrs[0]), GFP_KERNEL); if (ev_int->group.attrs == NULL) { ret = -ENOMEM; goto error_free_setup_event_lines; } if (indio_dev->info->event_attrs) memcpy(ev_int->group.attrs, indio_dev->info->event_attrs->attrs, sizeof(ev_int->group.attrs[0]) * attrcount_orig); attrn = attrcount_orig; /* Add all elements from the list. */ list_for_each_entry(p, &ev_int->dev_attr_list, l) ev_int->group.attrs[attrn++] = &p->dev_attr.attr; ret = iio_device_register_sysfs_group(indio_dev, &ev_int->group); if (ret) goto error_free_group_attrs; ev_int->ioctl_handler.ioctl = iio_event_ioctl; iio_device_ioctl_handler_register(&iio_dev_opaque->indio_dev, &ev_int->ioctl_handler); return 0; error_free_group_attrs: kfree(ev_int->group.attrs); error_free_setup_event_lines: iio_free_chan_devattr_list(&ev_int->dev_attr_list); kfree(ev_int); iio_dev_opaque->event_interface = NULL; return ret; } /** * iio_device_wakeup_eventset - Wakes up the event waitqueue * @indio_dev: The IIO device * * Wakes up the event waitqueue used for poll() and blocking read(). * Should usually be called when the device is unregistered. */ void iio_device_wakeup_eventset(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); if (iio_dev_opaque->event_interface == NULL) return; wake_up(&iio_dev_opaque->event_interface->wait); } void iio_device_unregister_eventset(struct iio_dev *indio_dev) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; if (ev_int == NULL) return; iio_device_ioctl_handler_unregister(&ev_int->ioctl_handler); iio_free_chan_devattr_list(&ev_int->dev_attr_list); kfree(ev_int->group.attrs); kfree(ev_int); iio_dev_opaque->event_interface = NULL; } |
337 338 338 339 1020 1020 1016 886 886 885 883 886 884 884 885 886 1022 886 1022 338 339 339 1020 336 336 1014 1014 1013 881 880 1013 881 881 878 880 180 878 1024 889 881 1022 336 337 59 336 1022 868 868 866 19 18 18 18 335 1023 1026 1023 1026 299 300 1047 868 316 317 866 147 870 38 868 865 869 865 4 4 4 80 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net-sysfs.c - network device class and attributes * * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org> */ #include <linux/capability.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/sched/isolation.h> #include <linux/nsproxy.h> #include <net/sock.h> #include <net/net_namespace.h> #include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <linux/jiffies.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_net.h> #include <linux/cpu.h> #include <net/netdev_rx_queue.h> #include <net/rps.h> #include "dev.h" #include "net-sysfs.h" #ifdef CONFIG_SYSFS static const char fmt_hex[] = "%#x\n"; static const char fmt_dec[] = "%d\n"; static const char fmt_uint[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; /* Caller holds RTNL, netdev->lock or RCU */ static inline int dev_isalive(const struct net_device *dev) { return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; } /* use same locking rules as GIF* ioctl's */ static ssize_t netdev_show(const struct device *dev, struct device_attribute *attr, char *buf, ssize_t (*format)(const struct net_device *, char *)) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; rcu_read_lock(); if (dev_isalive(ndev)) ret = (*format)(ndev, buf); rcu_read_unlock(); return ret; } /* generate a show function for simple field */ #define NETDEVICE_SHOW(field, format_string) \ static ssize_t format_##field(const struct net_device *dev, char *buf) \ { \ return sysfs_emit(buf, format_string, READ_ONCE(dev->field)); \ } \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return netdev_show(dev, attr, buf, format_##field); \ } \ #define NETDEVICE_SHOW_RO(field, format_string) \ NETDEVICE_SHOW(field, format_string); \ static DEVICE_ATTR_RO(field) #define NETDEVICE_SHOW_RW(field, format_string) \ NETDEVICE_SHOW(field, format_string); \ static DEVICE_ATTR_RW(field) /* use same locking and permission rules as SIF* ioctl's */ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int (*set)(struct net_device *, unsigned long)) { struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); unsigned long new; int ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; ret = kstrtoul(buf, 0, &new); if (ret) goto err; if (!rtnl_trylock()) return restart_syscall(); if (dev_isalive(netdev)) { ret = (*set)(netdev, new); if (ret == 0) ret = len; } rtnl_unlock(); err: return ret; } /* Same as netdev_store() but takes netdev_lock() instead of rtnl_lock() */ static ssize_t netdev_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, int (*set)(struct net_device *, unsigned long)) { struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); unsigned long new; int ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; ret = kstrtoul(buf, 0, &new); if (ret) return ret; netdev_lock(netdev); if (dev_isalive(netdev)) { ret = (*set)(netdev, new); if (ret == 0) ret = len; } netdev_unlock(netdev); return ret; } NETDEVICE_SHOW_RO(dev_id, fmt_hex); NETDEVICE_SHOW_RO(dev_port, fmt_dec); NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec); NETDEVICE_SHOW_RO(addr_len, fmt_dec); NETDEVICE_SHOW_RO(ifindex, fmt_dec); NETDEVICE_SHOW_RO(type, fmt_dec); NETDEVICE_SHOW_RO(link_mode, fmt_dec); static ssize_t iflink_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, dev_get_iflink(ndev)); } static DEVICE_ATTR_RO(iflink); static ssize_t format_name_assign_type(const struct net_device *dev, char *buf) { return sysfs_emit(buf, fmt_dec, READ_ONCE(dev->name_assign_type)); } static ssize_t name_assign_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; if (READ_ONCE(ndev->name_assign_type) != NET_NAME_UNKNOWN) ret = netdev_show(dev, attr, buf, format_name_assign_type); return ret; } static DEVICE_ATTR_RO(name_assign_type); /* use same locking rules as GIFHWADDR ioctl's (dev_get_mac_address()) */ static ssize_t address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); ssize_t ret = -EINVAL; down_read(&dev_addr_sem); rcu_read_lock(); if (dev_isalive(ndev)) ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len); rcu_read_unlock(); up_read(&dev_addr_sem); return ret; } static DEVICE_ATTR_RO(address); static ssize_t broadcast_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); int ret = -EINVAL; rcu_read_lock(); if (dev_isalive(ndev)) ret = sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); rcu_read_unlock(); return ret; } static DEVICE_ATTR_RO(broadcast); static int change_carrier(struct net_device *dev, unsigned long new_carrier) { if (!netif_running(dev)) return -EINVAL; return dev_change_carrier(dev, (bool)new_carrier); } static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct net_device *netdev = to_net_dev(dev); /* The check is also done in change_carrier; this helps returning early * without hitting the trylock/restart in netdev_store. */ if (!netdev->netdev_ops->ndo_change_carrier) return -EOPNOTSUPP; return netdev_store(dev, attr, buf, len, change_carrier); } static ssize_t carrier_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { /* Synchronize carrier state with link watch, * see also rtnl_getlink(). */ linkwatch_sync_dev(netdev); ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev)); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RW(carrier); static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; /* The check is also done in __ethtool_get_link_ksettings; this helps * returning early without hitting the trylock/restart below. */ if (!netdev->ethtool_ops->get_link_ksettings) return ret; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_link_ksettings cmd; if (!__ethtool_get_link_ksettings(netdev, &cmd)) ret = sysfs_emit(buf, fmt_dec, cmd.base.speed); } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(speed); static ssize_t duplex_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL; /* The check is also done in __ethtool_get_link_ksettings; this helps * returning early without hitting the trylock/restart below. */ if (!netdev->ethtool_ops->get_link_ksettings) return ret; if (!rtnl_trylock()) return restart_syscall(); if (netif_running(netdev)) { struct ethtool_link_ksettings cmd; if (!__ethtool_get_link_ksettings(netdev, &cmd)) { const char *duplex; switch (cmd.base.duplex) { case DUPLEX_HALF: duplex = "half"; break; case DUPLEX_FULL: duplex = "full"; break; default: duplex = "unknown"; break; } ret = sysfs_emit(buf, "%s\n", duplex); } } rtnl_unlock(); return ret; } static DEVICE_ATTR_RO(duplex); static ssize_t testing_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) return sysfs_emit(buf, fmt_dec, !!netif_testing(netdev)); return -EINVAL; } static DEVICE_ATTR_RO(testing); static ssize_t dormant_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); if (netif_running(netdev)) return sysfs_emit(buf, fmt_dec, !!netif_dormant(netdev)); return -EINVAL; } static DEVICE_ATTR_RO(dormant); static const char *const operstates[] = { "unknown", "notpresent", /* currently unused */ "down", "lowerlayerdown", "testing", "dormant", "up" }; static ssize_t operstate_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct net_device *netdev = to_net_dev(dev); unsigned char operstate; operstate = READ_ONCE(netdev->operstate); if (!netif_running(netdev)) operstate = IF_OPER_DOWN; if (operstate >= ARRAY_SIZE(operstates)) return -EINVAL; /* should not happen */ return sysfs_emit(buf, "%s\n", operstates[operstate]); } static DEVICE_ATTR_RO(operstate); static ssize_t carrier_changes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count) + atomic_read(&netdev->carrier_down_count)); } static DEVICE_ATTR_RO(carrier_changes); static ssize_t carrier_up_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_up_count)); } static DEVICE_ATTR_RO(carrier_up_count); static ssize_t carrier_down_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); return sysfs_emit(buf, fmt_dec, atomic_read(&netdev->carrier_down_count)); } static DEVICE_ATTR_RO(carrier_down_count); /* read-write attributes */ static int change_mtu(struct net_device *dev, unsigned long new_mtu) { return dev_set_mtu(dev, (int)new_mtu); } static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_mtu); } NETDEVICE_SHOW_RW(mtu, fmt_dec); static int change_flags(struct net_device *dev, unsigned long new_flags) { return dev_change_flags(dev, (unsigned int)new_flags, NULL); } static ssize_t flags_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return netdev_store(dev, attr, buf, len, change_flags); } NETDEVICE_SHOW_RW(flags, fmt_hex); static ssize_t tx_queue_len_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len); } NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec); static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) { netdev_set_gro_flush_timeout(dev, val); return 0; } static ssize_t gro_flush_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; return netdev_lock_store(dev, attr, buf, len, change_gro_flush_timeout); } NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val) { if (val > S32_MAX) return -ERANGE; netdev_set_defer_hard_irqs(dev, (u32)val); return 0; } st |