Total coverage: 375105 (19%)of 2039179
36 59 59 58 26 26 26 26 26 39 40 4 40 38 38 38 36 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 // SPDX-License-Identifier: GPL-2.0 #include <linux/swap_cgroup.h> #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/swapops.h> /* depends on mm.h include */ static DEFINE_MUTEX(swap_cgroup_mutex); /* Pack two cgroup id (short) of two entries in one swap_cgroup (atomic_t) */ #define ID_PER_SC (sizeof(struct swap_cgroup) / sizeof(unsigned short)) #define ID_SHIFT (BITS_PER_TYPE(unsigned short)) #define ID_MASK (BIT(ID_SHIFT) - 1) struct swap_cgroup { atomic_t ids; }; struct swap_cgroup_ctrl { struct swap_cgroup *map; }; static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map, pgoff_t offset) { unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT; unsigned int old_ids = atomic_read(&map[offset / ID_PER_SC].ids); BUILD_BUG_ON(!is_power_of_2(ID_PER_SC)); BUILD_BUG_ON(sizeof(struct swap_cgroup) != sizeof(atomic_t)); return (old_ids >> shift) & ID_MASK; } static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map, pgoff_t offset, unsigned short new_id) { unsigned short old_id; struct swap_cgroup *sc = &map[offset / ID_PER_SC]; unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT; unsigned int new_ids, old_ids = atomic_read(&sc->ids); do { old_id = (old_ids >> shift) & ID_MASK; new_ids = (old_ids & ~(ID_MASK << shift)); new_ids |= ((unsigned int)new_id) << shift; } while (!atomic_try_cmpxchg(&sc->ids, &old_ids, new_ids)); return old_id; } /** * swap_cgroup_record - record mem_cgroup for a set of swap entries. * These entries must belong to one single folio, and that folio * must be being charged for swap space (swap out), and these * entries must not have been charged * * @folio: the folio that the swap entry belongs to * @id: mem_cgroup ID to be recorded * @ent: the first swap entry to be recorded */ void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent) { unsigned int nr_ents = folio_nr_pages(folio); struct swap_cgroup *map; pgoff_t offset, end; unsigned short old; offset = swp_offset(ent); end = offset + nr_ents; map = swap_cgroup_ctrl[swp_type(ent)].map; do { old = __swap_cgroup_id_xchg(map, offset, id); VM_BUG_ON(old); } while (++offset != end); } /** * swap_cgroup_clear - clear mem_cgroup for a set of swap entries. * These entries must be being uncharged from swap. They either * belongs to one single folio in the swap cache (swap in for * cgroup v1), or no longer have any users (slot freeing). * * @ent: the first swap entry to be recorded into * @nr_ents: number of swap entries to be recorded * * Returns the existing old value. */ unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents) { pgoff_t offset, end; struct swap_cgroup *map; unsigned short old, iter = 0; offset = swp_offset(ent); end = offset + nr_ents; map = swap_cgroup_ctrl[swp_type(ent)].map; do { old = __swap_cgroup_id_xchg(map, offset, 0); if (!iter) iter = old; VM_BUG_ON(iter != old); } while (++offset != end); return old; } /** * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry * @ent: swap entry to be looked up. * * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) */ unsigned short lookup_swap_cgroup_id(swp_entry_t ent) { struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) return 0; ctrl = &swap_cgroup_ctrl[swp_type(ent)]; return __swap_cgroup_id_lookup(ctrl->map, swp_offset(ent)); } int swap_cgroup_swapon(int type, unsigned long max_pages) { struct swap_cgroup *map; struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) return 0; BUILD_BUG_ON(sizeof(unsigned short) * ID_PER_SC != sizeof(struct swap_cgroup)); map = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_SC) * sizeof(struct swap_cgroup)); if (!map) goto nomem; ctrl = &swap_cgroup_ctrl[type]; mutex_lock(&swap_cgroup_mutex); ctrl->map = map; mutex_unlock(&swap_cgroup_mutex); return 0; nomem: pr_info("couldn't allocate enough memory for swap_cgroup\n"); pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n"); return -ENOMEM; } void swap_cgroup_swapoff(int type) { struct swap_cgroup *map; struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) return; mutex_lock(&swap_cgroup_mutex); ctrl = &swap_cgroup_ctrl[type]; map = ctrl->map; ctrl->map = NULL; mutex_unlock(&swap_cgroup_mutex); vfree(map); }
1 1 1 1 2 2 1 1 2 2 4 1 1 1 5 5 1 4 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 // SPDX-License-Identifier: GPL-2.0-or-later /* * PCBC: Propagating Cipher Block Chaining mode * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * Derived from cbc.c * - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ #include <crypto/algapi.h> #include <crypto/internal/cipher.h> #include <crypto/internal/skcipher.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, struct skcipher_walk *walk, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; do { crypto_xor(iv, src, bsize); crypto_cipher_encrypt_one(tfm, dst, iv); crypto_xor_cpy(iv, dst, src, bsize); src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, struct skcipher_walk *walk, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { memcpy(tmpbuf, dst, bsize); crypto_xor(iv, dst, bsize); crypto_cipher_encrypt_one(tfm, dst, iv); crypto_xor_cpy(iv, tmpbuf, dst, bsize); dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static int crypto_pcbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_encrypt_inplace(req, &walk, cipher); else nbytes = crypto_pcbc_encrypt_segment(req, &walk, cipher); err = skcipher_walk_done(&walk, nbytes); } return err; } static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, struct skcipher_walk *walk, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; do { crypto_cipher_decrypt_one(tfm, dst, src); crypto_xor(dst, iv, bsize); crypto_xor_cpy(iv, dst, src, bsize); src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, struct skcipher_walk *walk, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { memcpy(tmpbuf, dst, bsize); crypto_cipher_decrypt_one(tfm, dst, dst); crypto_xor(dst, iv, bsize); crypto_xor_cpy(iv, dst, tmpbuf, bsize); dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; } static int crypto_pcbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_decrypt_inplace(req, &walk, cipher); else nbytes = crypto_pcbc_decrypt_segment(req, &walk, cipher); err = skcipher_walk_done(&walk, nbytes); } return err; } static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; int err; inst = skcipher_alloc_instance_simple(tmpl, tb); if (IS_ERR(inst)) return PTR_ERR(inst); inst->alg.encrypt = crypto_pcbc_encrypt; inst->alg.decrypt = crypto_pcbc_decrypt; err = skcipher_register_instance(tmpl, inst); if (err) inst->free(inst); return err; } static struct crypto_template crypto_pcbc_tmpl = { .name = "pcbc", .create = crypto_pcbc_create, .module = THIS_MODULE, }; static int __init crypto_pcbc_module_init(void) { return crypto_register_template(&crypto_pcbc_tmpl); } static void __exit crypto_pcbc_module_exit(void) { crypto_unregister_template(&crypto_pcbc_tmpl); } module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); MODULE_IMPORT_NS("CRYPTO_INTERNAL");
2 6 3 5 5 3 3 1 3 5 5 5 3 1 5 4 4 4 6 4 4 4 4 6 4 3 1 4 4 1 1 1 4 5 3 5 5 4 2 5 1 4 4 1 1 3 2 2 2 2 2 2 1 1 1 1 1 1 1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright Red Hat Inc. 2017 * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp stream queue/scheduling. * * Please send any bug reports or fixes you make to the * email addresched(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> */ #include <linux/list.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> /* Priority handling * RFC DRAFT ndata section 3.4 */ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream); static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p) { p->users++; return p; } static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p) { if (p && --p->users == 0) kfree(p); } static struct sctp_stream_priorities *sctp_sched_prio_new_head( struct sctp_stream *stream, int prio, gfp_t gfp) { struct sctp_stream_priorities *p; p = kmalloc(sizeof(*p), gfp); if (!p) return NULL; INIT_LIST_HEAD(&p->prio_sched); INIT_LIST_HEAD(&p->active); p->next = NULL; p->prio = prio; p->users = 1; return p; } static struct sctp_stream_priorities *sctp_sched_prio_get_head( struct sctp_stream *stream, int prio, gfp_t gfp) { struct sctp_stream_priorities *p; int i; /* Look into scheduled priorities first, as they are sorted and * we can find it fast IF it's scheduled. */ list_for_each_entry(p, &stream->prio_list, prio_sched) { if (p->prio == prio) return sctp_sched_prio_head_get(p); if (p->prio > prio) break; } /* No luck. So we search on all streams now. */ for (i = 0; i < stream->outcnt; i++) { if (!SCTP_SO(stream, i)->ext) continue; p = SCTP_SO(stream, i)->ext->prio_head; if (!p) /* Means all other streams won't be initialized * as well. */ break; if (p->prio == prio) return sctp_sched_prio_head_get(p); } /* If not even there, allocate a new one. */ return sctp_sched_prio_new_head(stream, prio, gfp); } static void sctp_sched_prio_next_stream(struct sctp_stream_priorities *p) { struct list_head *pos; pos = p->next->prio_list.next; if (pos == &p->active) pos = pos->next; p->next = list_entry(pos, struct sctp_stream_out_ext, prio_list); } static bool sctp_sched_prio_unsched(struct sctp_stream_out_ext *soute) { bool scheduled = false; if (!list_empty(&soute->prio_list)) { struct sctp_stream_priorities *prio_head = soute->prio_head; /* Scheduled */ scheduled = true; if (prio_head->next == soute) /* Try to move to the next stream */ sctp_sched_prio_next_stream(prio_head); list_del_init(&soute->prio_list); /* Also unsched the priority if this was the last stream */ if (list_empty(&prio_head->active)) { list_del_init(&prio_head->prio_sched); /* If there is no stream left, clear next */ prio_head->next = NULL; } } return scheduled; } static void sctp_sched_prio_sched(struct sctp_stream *stream, struct sctp_stream_out_ext *soute) { struct sctp_stream_priorities *prio, *prio_head; prio_head = soute->prio_head; /* Nothing to do if already scheduled */ if (!list_empty(&soute->prio_list)) return; /* Schedule the stream. If there is a next, we schedule the new * one before it, so it's the last in round robin order. * If there isn't, we also have to schedule the priority. */ if (prio_head->next) { list_add(&soute->prio_list, prio_head->next->prio_list.prev); return; } list_add(&soute->prio_list, &prio_head->active); prio_head->next = soute; list_for_each_entry(prio, &stream->prio_list, prio_sched) { if (prio->prio > prio_head->prio) { list_add(&prio_head->prio_sched, prio->prio_sched.prev); return; } } list_add_tail(&prio_head->prio_sched, &stream->prio_list); } static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid, __u16 prio, gfp_t gfp) { struct sctp_stream_out *sout = SCTP_SO(stream, sid); struct sctp_stream_out_ext *soute = sout->ext; struct sctp_stream_priorities *prio_head, *old; bool reschedule = false; old = soute->prio_head; if (old && old->prio == prio) return 0; prio_head = sctp_sched_prio_get_head(stream, prio, gfp); if (!prio_head) return -ENOMEM; reschedule = sctp_sched_prio_unsched(soute); soute->prio_head = prio_head; if (reschedule) sctp_sched_prio_sched(stream, soute); sctp_sched_prio_head_put(old); return 0; } static int sctp_sched_prio_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { *value = SCTP_SO(stream, sid)->ext->prio_head->prio; return 0; } static int sctp_sched_prio_init(struct sctp_stream *stream) { INIT_LIST_HEAD(&stream->prio_list); return 0; } static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { INIT_LIST_HEAD(&SCTP_SO(stream, sid)->ext->prio_list); return sctp_sched_prio_set(stream, sid, 0, gfp); } static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid) { sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head); SCTP_SO(stream, sid)->ext->prio_head = NULL; } static void sctp_sched_prio_enqueue(struct sctp_outq *q, struct sctp_datamsg *msg) { struct sctp_stream *stream; struct sctp_chunk *ch; __u16 sid; ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); sid = sctp_chunk_stream_no(ch); stream = &q->asoc->stream; sctp_sched_prio_sched(stream, SCTP_SO(stream, sid)->ext); } static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q) { struct sctp_stream *stream = &q->asoc->stream; struct sctp_stream_priorities *prio; struct sctp_stream_out_ext *soute; struct sctp_chunk *ch = NULL; /* Bail out quickly if queue is empty */ if (list_empty(&q->out_chunk_list)) goto out; /* Find which chunk is next. It's easy, it's either the current * one or the first chunk on the next active stream. */ if (stream->out_curr) { soute = stream->out_curr->ext; } else { prio = list_entry(stream->prio_list.next, struct sctp_stream_priorities, prio_sched); soute = prio->next; } ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list); sctp_sched_dequeue_common(q, ch); out: return ch; } static void sctp_sched_prio_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) { struct sctp_stream_priorities *prio; struct sctp_stream_out_ext *soute; __u16 sid; /* Last chunk on that msg, move to the next stream on * this priority. */ sid = sctp_chunk_stream_no(ch); soute = SCTP_SO(&q->asoc->stream, sid)->ext; prio = soute->prio_head; sctp_sched_prio_next_stream(prio); if (list_empty(&soute->outq)) sctp_sched_prio_unsched(soute); } static void sctp_sched_prio_sched_all(struct sctp_stream *stream) { struct sctp_association *asoc; struct sctp_stream_out *sout; struct sctp_chunk *ch; asoc = container_of(stream, struct sctp_association, stream); list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) { __u16 sid; sid = sctp_chunk_stream_no(ch); sout = SCTP_SO(stream, sid); if (sout->ext) sctp_sched_prio_sched(stream, sout->ext); } } static void sctp_sched_prio_unsched_all(struct sctp_stream *stream) { struct sctp_stream_priorities *p, *tmp; struct sctp_stream_out_ext *soute, *souttmp; list_for_each_entry_safe(p, tmp, &stream->prio_list, prio_sched) list_for_each_entry_safe(soute, souttmp, &p->active, prio_list) sctp_sched_prio_unsched(soute); } static const struct sctp_sched_ops sctp_sched_prio = { .set = sctp_sched_prio_set, .get = sctp_sched_prio_get, .init = sctp_sched_prio_init, .init_sid = sctp_sched_prio_init_sid, .free_sid = sctp_sched_prio_free_sid, .enqueue = sctp_sched_prio_enqueue, .dequeue = sctp_sched_prio_dequeue, .dequeue_done = sctp_sched_prio_dequeue_done, .sched_all = sctp_sched_prio_sched_all, .unsched_all = sctp_sched_prio_unsched_all, }; void sctp_sched_ops_prio_init(void) { sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio); }
1 3 3 2 2 4 4 1 1 5 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 #undef TRACE_SYSTEM #define TRACE_SYSTEM rtc #if !defined(_TRACE_RTC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RTC_H #include <linux/rtc.h> #include <linux/tracepoint.h> DECLARE_EVENT_CLASS(rtc_time_alarm_class, TP_PROTO(time64_t secs, int err), TP_ARGS(secs, err), TP_STRUCT__entry( __field(time64_t, secs) __field(int, err) ), TP_fast_assign( __entry->secs = secs; __entry->err = err; ), TP_printk("UTC (%lld) (%d)", __entry->secs, __entry->err ) ); DEFINE_EVENT(rtc_time_alarm_class, rtc_set_time, TP_PROTO(time64_t secs, int err), TP_ARGS(secs, err) ); DEFINE_EVENT(rtc_time_alarm_class, rtc_read_time, TP_PROTO(time64_t secs, int err), TP_ARGS(secs, err) ); DEFINE_EVENT(rtc_time_alarm_class, rtc_set_alarm, TP_PROTO(time64_t secs, int err), TP_ARGS(secs, err) ); DEFINE_EVENT(rtc_time_alarm_class, rtc_read_alarm, TP_PROTO(time64_t secs, int err), TP_ARGS(secs, err) ); TRACE_EVENT(rtc_irq_set_freq, TP_PROTO(int freq, int err), TP_ARGS(freq, err), TP_STRUCT__entry( __field(int, freq) __field(int, err) ), TP_fast_assign( __entry->freq = freq; __entry->err = err; ), TP_printk("set RTC periodic IRQ frequency:%u (%d)", __entry->freq, __entry->err ) ); TRACE_EVENT(rtc_irq_set_state, TP_PROTO(int enabled, int err), TP_ARGS(enabled, err), TP_STRUCT__entry( __field(int, enabled) __field(int, err) ), TP_fast_assign( __entry->enabled = enabled; __entry->err = err; ), TP_printk("%s RTC 2^N Hz periodic IRQs (%d)", __entry->enabled ? "enable" : "disable", __entry->err ) ); TRACE_EVENT(rtc_alarm_irq_enable, TP_PROTO(unsigned int enabled, int err), TP_ARGS(enabled, err), TP_STRUCT__entry( __field(unsigned int, enabled) __field(int, err) ), TP_fast_assign( __entry->enabled = enabled; __entry->err = err; ), TP_printk("%s RTC alarm IRQ (%d)", __entry->enabled ? "enable" : "disable", __entry->err ) ); DECLARE_EVENT_CLASS(rtc_offset_class, TP_PROTO(long offset, int err), TP_ARGS(offset, err), TP_STRUCT__entry( __field(long, offset) __field(int, err) ), TP_fast_assign( __entry->offset = offset; __entry->err = err; ), TP_printk("RTC offset: %ld (%d)", __entry->offset, __entry->err ) ); DEFINE_EVENT(rtc_offset_class, rtc_set_offset, TP_PROTO(long offset, int err), TP_ARGS(offset, err) ); DEFINE_EVENT(rtc_offset_class, rtc_read_offset, TP_PROTO(long offset, int err), TP_ARGS(offset, err) ); DECLARE_EVENT_CLASS(rtc_timer_class, TP_PROTO(struct rtc_timer *timer), TP_ARGS(timer), TP_STRUCT__entry( __field(struct rtc_timer *, timer) __field(ktime_t, expires) __field(ktime_t, period) ), TP_fast_assign( __entry->timer = timer; __entry->expires = timer->node.expires; __entry->period = timer->period; ), TP_printk("RTC timer:(%p) expires:%lld period:%lld", __entry->timer, __entry->expires, __entry->period ) ); DEFINE_EVENT(rtc_timer_class, rtc_timer_enqueue, TP_PROTO(struct rtc_timer *timer), TP_ARGS(timer) ); DEFINE_EVENT(rtc_timer_class, rtc_timer_dequeue, TP_PROTO(struct rtc_timer *timer), TP_ARGS(timer) ); DEFINE_EVENT(rtc_timer_class, rtc_timer_fired, TP_PROTO(struct rtc_timer *timer), TP_ARGS(timer) ); #endif /* _TRACE_RTC_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
5 4 5 2 5 5 1 1 10 10 9 7 7 1 1 1 8 10 10 6 6 6 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth virtual HCI driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/module.h> #include <linux/unaligned.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.5" static bool amp; struct vhci_data { struct hci_dev *hdev; wait_queue_head_t read_wait; struct sk_buff_head readq; struct mutex open_mutex; struct delayed_work open_timeout; struct work_struct suspend_work; bool suspended; bool wakeup; __u16 msft_opcode; bool aosp_capable; atomic_t initialized; }; static int vhci_open_dev(struct hci_dev *hdev) { return 0; } static int vhci_close_dev(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_flush(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct vhci_data *data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&data->readq, skb); if (atomic_read(&data->initialized)) wake_up_interruptible(&data->read_wait); return 0; } static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) { *data_path_id = 0; return 0; } static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data) { if (type != ESCO_LINK) return -EINVAL; *vnd_len = 0; *vnd_data = NULL; return 0; } static bool vhci_wakeup(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); return data->wakeup; } static ssize_t force_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->suspended ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static void vhci_suspend_work(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, suspend_work); if (data->suspended) hci_suspend_dev(data->hdev); else hci_resume_dev(data->hdev); } static ssize_t force_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->suspended == enable) return -EALREADY; data->suspended = enable; schedule_work(&data->suspend_work); return count; } static const struct file_operations force_suspend_fops = { .open = simple_open, .read = force_suspend_read, .write = force_suspend_write, .llseek = default_llseek, }; static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->wakeup ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_wakeup_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->wakeup == enable) return -EALREADY; data->wakeup = enable; return count; } static const struct file_operations force_wakeup_fops = { .open = simple_open, .read = force_wakeup_read, .write = force_wakeup_write, .llseek = default_llseek, }; static int msft_opcode_set(void *data, u64 val) { struct vhci_data *vhci = data; if (val > 0xffff || hci_opcode_ogf(val) != 0x3f) return -EINVAL; if (vhci->msft_opcode) return -EALREADY; vhci->msft_opcode = val; return 0; } static int msft_opcode_get(void *data, u64 *val) { struct vhci_data *vhci = data; *val = vhci->msft_opcode; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set, "%llu\n"); static ssize_t aosp_capable_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; char buf[3]; buf[0] = vhci->aosp_capable ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t aosp_capable_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (!enable) return -EINVAL; if (vhci->aosp_capable) return -EALREADY; vhci->aosp_capable = enable; return count; } static const struct file_operations aosp_capable_fops = { .open = simple_open, .read = aosp_capable_read, .write = aosp_capable_write, .llseek = default_llseek, }; static int vhci_setup(struct hci_dev *hdev) { struct vhci_data *vhci = hci_get_drvdata(hdev); if (vhci->msft_opcode) hci_set_msft_opcode(hdev, vhci->msft_opcode); if (vhci->aosp_capable) hci_set_aosp_capable(hdev); return 0; } static void vhci_coredump(struct hci_dev *hdev) { /* No need to do anything */ } static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { const char *buf; buf = "Controller Name: vhci_ctrl\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Firmware Version: vhci_fw\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Driver: vhci_drv\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Vendor: vhci\n"; skb_put_data(skb, buf, strlen(buf)); } #define MAX_COREDUMP_LINE_LEN 40 struct devcoredump_test_data { enum devcoredump_state state; unsigned int timeout; char data[MAX_COREDUMP_LINE_LEN]; }; static inline void force_devcd_timeout(struct hci_dev *hdev, unsigned int timeout) { #ifdef CONFIG_DEV_COREDUMP hdev->dump.timeout = secs_to_jiffies(timeout); #endif } static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; struct sk_buff *skb = NULL; struct devcoredump_test_data dump_data; size_t data_size; int ret; if (count < offsetof(struct devcoredump_test_data, data) || count > sizeof(dump_data)) return -EINVAL; if (copy_from_user(&dump_data, user_buf, count)) return -EFAULT; data_size = count - offsetof(struct devcoredump_test_data, data); skb = alloc_skb(data_size, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_put_data(skb, &dump_data.data, data_size); hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); /* Force the devcoredump timeout */ if (dump_data.timeout) force_devcd_timeout(hdev, dump_data.timeout); ret = hci_devcd_init(hdev, skb->len); if (ret) { BT_ERR("Failed to generate devcoredump"); kfree_skb(skb); return ret; } hci_devcd_append(hdev, skb); switch (dump_data.state) { case HCI_DEVCOREDUMP_DONE: hci_devcd_complete(hdev); break; case HCI_DEVCOREDUMP_ABORT: hci_devcd_abort(hdev); break; case HCI_DEVCOREDUMP_TIMEOUT: /* Do nothing */ break; default: return -EINVAL; } return count; } static const struct file_operations force_devcoredump_fops = { .open = simple_open, .write = force_devcd_write, }; static void vhci_debugfs_init(struct vhci_data *data) { struct hci_dev *hdev = data->hdev; debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, &force_suspend_fops); debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, &force_wakeup_fops); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, &msft_opcode_fops); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, &aosp_capable_fops); debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, &force_devcoredump_fops); } static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; if (data->hdev) return -EBADFD; /* bits 2-5 are reserved (must be zero) */ if (opcode & 0x3c) return -EINVAL; skb = bt_skb_alloc(4, GFP_KERNEL); if (!skb) return -ENOMEM; hdev = hci_alloc_dev(); if (!hdev) { kfree_skb(skb); return -ENOMEM; } data->hdev = hdev; hdev->bus = HCI_VIRTUAL; hci_set_drvdata(hdev, data); hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; hdev->get_data_path_id = vhci_get_data_path_id; hdev->get_codec_config_data = vhci_get_codec_config_data; hdev->wakeup = vhci_wakeup; hdev->setup = vhci_setup; hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP); hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED); /* bit 6 is for external configuration */ if (opcode & 0x40) hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG); /* bit 7 is for raw device */ if (opcode & 0x80) hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); data->hdev = NULL; kfree_skb(skb); return -EBUSY; } if (!IS_ERR_OR_NULL(hdev->debugfs)) vhci_debugfs_init(data); hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); skb_put_u8(skb, opcode); put_unaligned_le16(hdev->id, skb_put(skb, 2)); skb_queue_head(&data->readq, skb); atomic_inc(&data->initialized); wake_up_interruptible(&data->read_wait); return 0; } static int vhci_create_device(struct vhci_data *data, __u8 opcode) { int err; mutex_lock(&data->open_mutex); err = __vhci_create_device(data, opcode); mutex_unlock(&data->open_mutex); return err; } static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { size_t len = iov_iter_count(from); struct sk_buff *skb; __u8 pkt_type, opcode; int ret; if (len < 2 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; if (!copy_from_iter_full(skb_put(skb, len), len, from)) { kfree_skb(skb); return -EFAULT; } pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: if (!data->hdev) { kfree_skb(skb); return -ENODEV; } hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); skb_pull(skb, 1); if (skb->len > 0) { kfree_skb(skb); return -EINVAL; } kfree_skb(skb); ret = vhci_create_device(data, opcode); break; default: kfree_skb(skb); return -EINVAL; } return (ret < 0) ? ret : len; } static inline ssize_t vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) { char __user *ptr = buf; int len; len = min_t(unsigned int, skb->len, count); if (copy_to_user(ptr, skb->data, len)) return -EFAULT; if (!data->hdev) return len; data->hdev->stat.byte_tx += len; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: data->hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: data->hdev->stat.sco_tx++; break; } return len; } static ssize_t vhci_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; struct sk_buff *skb; ssize_t ret = 0; while (count) { skb = skb_dequeue(&data->readq); if (skb) { ret = vhci_put_user(data, skb, buf, count); if (ret < 0) skb_queue_head(&data->readq, skb); else kfree_skb(skb); break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } ret = wait_event_interruptible(data->read_wait, !skb_queue_empty(&data->readq)); if (ret < 0) break; } return ret; } static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhci_data *data = file->private_data; return vhci_get_user(data, from); } static __poll_t vhci_poll(struct file *file, poll_table *wait) { struct vhci_data *data = file->private_data; poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) return EPOLLIN | EPOLLRDNORM; return EPOLLOUT | EPOLLWRNORM; } static void vhci_open_timeout(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); vhci_create_device(data, 0x00); } static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); INIT_WORK(&data->suspend_work, vhci_suspend_work); file->private_data = data; nonseekable_open(inode, file); schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1)); return 0; } static void vhci_debugfs_remove(struct hci_dev *hdev) { debugfs_lookup_and_remove("force_suspend", hdev->debugfs); debugfs_lookup_and_remove("force_wakeup", hdev->debugfs); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_lookup_and_remove("msft_opcode", hdev->debugfs); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_lookup_and_remove("aosp_capable", hdev->debugfs); debugfs_lookup_and_remove("force_devcoredump", hdev->debugfs); } static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); flush_work(&data->suspend_work); hdev = data->hdev; if (hdev) { if (!IS_ERR_OR_NULL(hdev->debugfs)) vhci_debugfs_remove(hdev); hci_unregister_dev(hdev); hci_free_dev(hdev); } skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); return 0; } static const struct file_operations vhci_fops = { .owner = THIS_MODULE, .read = vhci_read, .write_iter = vhci_write, .poll = vhci_poll, .open = vhci_open, .release = vhci_release, }; static struct miscdevice vhci_miscdev = { .name = "vhci", .fops = &vhci_fops, .minor = VHCI_MINOR, }; module_misc_device(vhci_miscdev); module_param(amp, bool, 0644); MODULE_PARM_DESC(amp, "Create AMP controller device"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("devname:vhci"); MODULE_ALIAS_MISCDEV(VHCI_MINOR);
2 10 10 6 4 219 142 86 218 220 218 10 10 75 76 40 64 64 76 55 55 47 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/types.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <net/netns/generic.h> #include <net/route.h> #include <net/ip.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack.h> #endif #include <net/netfilter/nf_conntrack_zones.h> static DEFINE_MUTEX(defrag4_mutex); static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb, u_int32_t user) { int err; local_bh_disable(); err = ip_defrag(net, skb, user); local_bh_enable(); if (!err) skb->ignore_df = 1; return err; } static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, struct sk_buff *skb) { u16 zone_id = NF_CT_DEFAULT_ZONE_ID; #if IS_ENABLED(CONFIG_NF_CONNTRACK) if (skb_nfct(skb)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); } #endif if (nf_bridge_in_prerouting(skb)) return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id; if (hooknum == NF_INET_PRE_ROUTING) return IP_DEFRAG_CONNTRACK_IN + zone_id; else return IP_DEFRAG_CONNTRACK_OUT + zone_id; } static unsigned int ipv4_conntrack_defrag(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct sock *sk = skb->sk; if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) && inet_test_bit(NODEFRAG, sk)) return NF_ACCEPT; #if IS_ENABLED(CONFIG_NF_CONNTRACK) #if !IS_ENABLED(CONFIG_NF_NAT) /* Previously seen (loopback)? Ignore. Do this before fragment check. */ if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) return NF_ACCEPT; #endif if (skb->_nfct == IP_CT_UNTRACKED) return NF_ACCEPT; #endif /* Gather fragments. */ if (ip_is_fragment(ip_hdr(skb))) { enum ip_defrag_users user = nf_ct_defrag_user(state->hook, skb); if (nf_ct_ipv4_gather_frags(state->net, skb, user)) return NF_STOLEN; } return NF_ACCEPT; } static const struct nf_hook_ops ipv4_defrag_ops[] = { { .hook = ipv4_conntrack_defrag, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_CONNTRACK_DEFRAG, }, { .hook = ipv4_conntrack_defrag, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_CONNTRACK_DEFRAG, }, }; static void __net_exit defrag4_net_exit(struct net *net) { if (net->nf.defrag_ipv4_users) { nf_unregister_net_hooks(net, ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops)); net->nf.defrag_ipv4_users = 0; } } static const struct nf_defrag_hook defrag_hook = { .owner = THIS_MODULE, .enable = nf_defrag_ipv4_enable, .disable = nf_defrag_ipv4_disable, }; static struct pernet_operations defrag4_net_ops = { .exit = defrag4_net_exit, }; static int __init nf_defrag_init(void) { int err; err = register_pernet_subsys(&defrag4_net_ops); if (err) return err; rcu_assign_pointer(nf_defrag_v4_hook, &defrag_hook); return err; } static void __exit nf_defrag_fini(void) { rcu_assign_pointer(nf_defrag_v4_hook, NULL); unregister_pernet_subsys(&defrag4_net_ops); } int nf_defrag_ipv4_enable(struct net *net) { int err = 0; mutex_lock(&defrag4_mutex); if (net->nf.defrag_ipv4_users == UINT_MAX) { err = -EOVERFLOW; goto out_unlock; } if (net->nf.defrag_ipv4_users) { net->nf.defrag_ipv4_users++; goto out_unlock; } err = nf_register_net_hooks(net, ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops)); if (err == 0) net->nf.defrag_ipv4_users = 1; out_unlock: mutex_unlock(&defrag4_mutex); return err; } EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable); void nf_defrag_ipv4_disable(struct net *net) { mutex_lock(&defrag4_mutex); if (net->nf.defrag_ipv4_users) { net->nf.defrag_ipv4_users--; if (net->nf.defrag_ipv4_users == 0) nf_unregister_net_hooks(net, ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops)); } mutex_unlock(&defrag4_mutex); } EXPORT_SYMBOL_GPL(nf_defrag_ipv4_disable); module_init(nf_defrag_init); module_exit(nf_defrag_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IPv4 defragmentation support");
7 604 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PID_NS_H #define _LINUX_PID_NS_H #include <linux/sched.h> #include <linux/bug.h> #include <linux/mm.h> #include <linux/workqueue.h> #include <linux/threads.h> #include <linux/nsproxy.h> #include <linux/ns_common.h> #include <linux/idr.h> /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */ #define MAX_PID_NS_LEVEL 32 struct fs_pin; #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) /* modes for vm.memfd_noexec sysctl */ #define MEMFD_NOEXEC_SCOPE_EXEC 0 /* MFD_EXEC implied if unset */ #define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 /* MFD_NOEXEC_SEAL implied if unset */ #define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */ #endif struct pid_namespace { struct idr idr; struct rcu_head rcu; unsigned int pid_allocated; struct task_struct *child_reaper; struct kmem_cache *pid_cachep; unsigned int level; int pid_max; struct pid_namespace *parent; #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; #endif struct user_namespace *user_ns; struct ucounts *ucounts; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; struct work_struct work; #ifdef CONFIG_SYSCTL struct ctl_table_set set; struct ctl_table_header *sysctls; #if defined(CONFIG_MEMFD_CREATE) int memfd_noexec_scope; #endif #endif } __randomize_layout; extern struct pid_namespace init_pid_ns; #define PIDNS_ADDING (1U << 31) #ifdef CONFIG_PID_NS static inline struct pid_namespace *to_pid_ns(struct ns_common *ns) { return container_of(ns, struct pid_namespace, ns); } static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { ns_ref_inc(ns); return ns; } #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns) { int scope = MEMFD_NOEXEC_SCOPE_EXEC; for (; ns; ns = ns->parent) scope = max(scope, READ_ONCE(ns->memfd_noexec_scope)); return scope; } #else static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns) { return 0; } #endif extern struct pid_namespace *copy_pid_ns(u64 flags, struct user_namespace *user_ns, struct pid_namespace *ns); extern void zap_pid_ns_processes(struct pid_namespace *pid_ns); extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd); extern void put_pid_ns(struct pid_namespace *ns); extern bool pidns_is_ancestor(struct pid_namespace *child, struct pid_namespace *ancestor); #else /* !CONFIG_PID_NS */ #include <linux/err.h> static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns) { return ns; } static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns) { return 0; } static inline struct pid_namespace *copy_pid_ns(u64 flags, struct user_namespace *user_ns, struct pid_namespace *ns) { if (flags & CLONE_NEWPID) ns = ERR_PTR(-EINVAL); return ns; } static inline void put_pid_ns(struct pid_namespace *ns) { } static inline void zap_pid_ns_processes(struct pid_namespace *ns) { BUG(); } static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) { return 0; } static inline bool pidns_is_ancestor(struct pid_namespace *child, struct pid_namespace *ancestor) { return false; } #endif /* CONFIG_PID_NS */ extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); void pidhash_init(void); void pid_idr_init(void); int register_pidns_sysctls(struct pid_namespace *pidns); void unregister_pidns_sysctls(struct pid_namespace *pidns); static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) { return task_active_pid_ns(tsk) == &init_pid_ns; } #endif /* _LINUX_PID_NS_H */
29 3 159 212 1191 7 2 127 1187 1168 624 558 36 37 1181 1176 1187 631 633 858 778 434 886 259 36 36 24 12 36 36 36 110 110 110 110 110 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETFILTER_H #define __LINUX_NETFILTER_H #include <linux/init.h> #include <linux/skbuff.h> #include <linux/net.h> #include <linux/if.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/static_key.h> #include <linux/module.h> #include <linux/netfilter_defs.h> #include <linux/netdevice.h> #include <linux/sockptr.h> #include <net/net_namespace.h> static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); } static __always_inline int NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err) { BUILD_BUG_ON(err > 0xffff); kfree_skb_reason(skb, reason); return ((err << 16) | NF_STOLEN); } static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ul2 = (const unsigned long *)a2; return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; #else return a1->all[0] == a2->all[0] && a1->all[1] == a2->all[1] && a1->all[2] == a2->all[2] && a1->all[3] == a2->all[3]; #endif } static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, union nf_inet_addr *result, const union nf_inet_addr *mask) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ua = (const unsigned long *)a1; unsigned long *ur = (unsigned long *)result; const unsigned long *um = (const unsigned long *)mask; ur[0] = ua[0] & um[0]; ur[1] = ua[1] & um[1]; #else result->all[0] = a1->all[0] & mask->all[0]; result->all[1] = a1->all[1] & mask->all[1]; result->all[2] = a1->all[2] & mask->all[2]; result->all[3] = a1->all[3] & mask->all[3]; #endif } int netfilter_init(void); struct sk_buff; struct nf_hook_ops; struct sock; struct nf_hook_state { u8 hook; u8 pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); enum nf_hook_ops_type { NF_HOOK_OP_UNDEFINED, NF_HOOK_OP_NF_TABLES, NF_HOOK_OP_BPF, NF_HOOK_OP_NFT_FT, }; struct nf_hook_ops { struct list_head list; struct rcu_head rcu; /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; u8 pf; enum nf_hook_ops_type hook_ops_type:8; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; struct nf_hook_entry { nf_hookfn *hook; void *priv; }; struct nf_hook_entries_rcu_head { struct rcu_head head; void *allocation; }; struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; /* trailer: pointers to original orig_ops of each hook, * followed by rcu_head and scratch space used for freeing * the structure via call_rcu. * * This is not part of struct nf_hook_entry since its only * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] * * For the same reason, we store this at end -- its * only needed when a hook is deleted, not during * packet path processing: * struct nf_hook_entries_rcu_head head */ }; #ifdef CONFIG_NETFILTER static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) { unsigned int n = e->num_hook_entries; const void *hook_end; hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ return (struct nf_hook_ops **)hook_end; } static inline int nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, struct nf_hook_state *state) { return entry->hook(entry->priv, skb, state); } static inline void nf_hook_state_init(struct nf_hook_state *p, unsigned int hook, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, struct net *net, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; p->okfn = okfn; } struct nf_sockopt_ops { struct list_head list; u_int8_t pf; /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; int (*set)(struct sock *sk, int optval, sockptr_t arg, unsigned int len); int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); /* Use the module struct to lock set/get code in place */ struct module *owner; }; /* Function to register/unregister hook points. */ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); /* Functions to register get/setsockopt ranges (non-inclusive). You need to check permissions yourself! */ int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef CONFIG_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *e, unsigned int i); void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, const struct nf_hook_entries *e); /** * nf_hook - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) return 1; #endif rcu_read_lock(); switch (pf) { case NFPROTO_IPV4: hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); break; case NFPROTO_IPV6: hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; case NFPROTO_ARP: #ifdef CONFIG_NETFILTER_FAMILY_ARP if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) break; hook_head = rcu_dereference(net->nf.hooks_arp[hook]); #endif break; case NFPROTO_BRIDGE: #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); #endif break; default: WARN_ON_ONCE(1); break; } if (hook_head) { struct nf_hook_state state; nf_hook_state_init(&state, hook, pf, indev, outdev, sk, net, okfn); ret = nf_hook_slow(skb, &state, hook_head, 0); } rcu_read_unlock(); return ret; } /* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). Returns -ERRNO if packet dropped. Zero means queued, stolen or accepted. */ /* RR: > I don't want nf_hook to return anything because people might forget > about async and trust the return value to mean "packet was ok". AK: Just document it clearly, then you can expect some sense from kernel coders :) */ static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { int ret; if (!cond || ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) ret = okfn(net, sk, skb); return ret; } static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); if (ret == 1) ret = okfn(net, sk, skb); return ret; } static inline void NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entries *hook_head = NULL; #ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) return; #endif rcu_read_lock(); switch (pf) { case NFPROTO_IPV4: hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); break; case NFPROTO_IPV6: hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; default: WARN_ON_ONCE(1); break; } if (hook_head) { struct nf_hook_state state; nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn); nf_hook_slow_list(head, &state, hook_head); } rcu_read_unlock(); } /* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); struct flowi; struct nf_queue_entry; __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family); __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol, unsigned short family); int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family); #include <net/flow.h> struct nf_conn; enum nf_nat_manip_type; struct nlattr; struct nf_nat_hook { int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr); void (*decode_session)(struct sk_buff *skb, struct flowi *fl); void (*remove_nat_bysrc)(struct nf_conn *ct); }; extern const struct nf_nat_hook __rcu *nf_nat_hook; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #if IS_ENABLED(CONFIG_NF_NAT) const struct nf_nat_hook *nat_hook; rcu_read_lock(); nat_hook = rcu_dereference(nf_nat_hook); if (nat_hook && nat_hook->decode_session) nat_hook->decode_session(skb, fl); rcu_read_unlock(); #endif } #else /* !CONFIG_NETFILTER */ static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { return okfn(net, sk, skb); } static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return okfn(net, sk, skb); } static inline void NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { /* nothing to do */ } static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return 1; } struct flowi; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { } #endif /*CONFIG_NETFILTER*/ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_zones_common.h> void nf_ct_attach(struct sk_buff *, const struct sk_buff *); void nf_ct_set_closing(struct nf_conntrack *nfct); struct nf_conntrack_tuple; bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {} struct nf_conntrack_tuple; static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { return false; } #endif struct nf_conn; enum ip_conntrack_info; struct nf_ct_hook { int (*update)(struct net *net, struct sk_buff *skb); void (*destroy)(struct nf_conntrack *); bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb); void (*set_closing)(struct nf_conntrack *nfct); int (*confirm)(struct sk_buff *skb); u32 (*get_id)(const struct nf_conntrack *nfct); }; extern const struct nf_ct_hook __rcu *nf_ct_hook; struct nlattr; struct nfnl_ct_hook { size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, u_int16_t ct_attr, u_int16_t ct_info_attr); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; struct nf_defrag_hook { struct module *owner; int (*enable)(struct net *net); void (*disable)(struct net *net); }; extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook; extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook; /* * Contains bitmask of ctnetlink event subscribers, if any. * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag. */ extern u8 nf_ctnetlink_has_listener; #endif /*__LINUX_NETFILTER_H*/
2 3 2 3 2 2 2 1 1 3 3 1 3 3 25 25 25 24 1 25 2 1 1 2 2 2 2 2 2 2 27 27 27 27 24 3 3 3 2 2 2 2 2 2 2 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 /* * linux/drivers/video/fbcmap.c -- Colormap handling for frame buffer devices * * Created 15 Jun 1997 by Geert Uytterhoeven * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/export.h> #include <linux/string.h> #include <linux/module.h> #include <linux/fb.h> #include <linux/slab.h> #include <linux/uaccess.h> static u16 red2[] __read_mostly = { 0x0000, 0xaaaa }; static u16 green2[] __read_mostly = { 0x0000, 0xaaaa }; static u16 blue2[] __read_mostly = { 0x0000, 0xaaaa }; static u16 red4[] __read_mostly = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 green4[] __read_mostly = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 blue4[] __read_mostly = { 0x0000, 0xaaaa, 0x5555, 0xffff }; static u16 red8[] __read_mostly = { 0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa }; static u16 green8[] __read_mostly = { 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa }; static u16 blue8[] __read_mostly = { 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa }; static u16 red16[] __read_mostly = { 0x0000, 0x0000, 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa, 0x5555, 0x5555, 0x5555, 0x5555, 0xffff, 0xffff, 0xffff, 0xffff }; static u16 green16[] __read_mostly = { 0x0000, 0x0000, 0xaaaa, 0xaaaa, 0x0000, 0x0000, 0x5555, 0xaaaa, 0x5555, 0x5555, 0xffff, 0xffff, 0x5555, 0x5555, 0xffff, 0xffff }; static u16 blue16[] __read_mostly = { 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x0000, 0xaaaa, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff, 0x5555, 0xffff }; static const struct fb_cmap default_2_colors = { .len=2, .red=red2, .green=green2, .blue=blue2 }; static const struct fb_cmap default_8_colors = { .len=8, .red=red8, .green=green8, .blue=blue8 }; static const struct fb_cmap default_4_colors = { .len=4, .red=red4, .green=green4, .blue=blue4 }; static const struct fb_cmap default_16_colors = { .len=16, .red=red16, .green=green16, .blue=blue16 }; /** * fb_alloc_cmap_gfp - allocate a colormap * @cmap: frame buffer colormap structure * @len: length of @cmap * @transp: boolean, 1 if there is transparency, 0 otherwise * @flags: flags for kmalloc memory allocation * * Allocates memory for a colormap @cmap. @len is the * number of entries in the palette. * * Returns negative errno on error, or zero on success. * */ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) { int size = len * sizeof(u16); int ret = -ENOMEM; flags |= __GFP_NOWARN; if (cmap->len != len) { fb_dealloc_cmap(cmap); if (!len) return 0; cmap->red = kzalloc(size, flags); if (!cmap->red) goto fail; cmap->green = kzalloc(size, flags); if (!cmap->green) goto fail; cmap->blue = kzalloc(size, flags); if (!cmap->blue) goto fail; if (transp) { cmap->transp = kzalloc(size, flags); if (!cmap->transp) goto fail; } else { cmap->transp = NULL; } } cmap->start = 0; cmap->len = len; ret = fb_copy_cmap(fb_default_cmap(len), cmap); if (ret) goto fail; return 0; fail: fb_dealloc_cmap(cmap); return ret; } int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) { return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC); } /** * fb_dealloc_cmap - deallocate a colormap * @cmap: frame buffer colormap structure * * Deallocates a colormap that was previously allocated with * fb_alloc_cmap(). * */ void fb_dealloc_cmap(struct fb_cmap *cmap) { kfree(cmap->red); kfree(cmap->green); kfree(cmap->blue); kfree(cmap->transp); cmap->red = cmap->green = cmap->blue = cmap->transp = NULL; cmap->len = 0; } /** * fb_copy_cmap - copy a colormap * @from: frame buffer colormap structure * @to: frame buffer colormap structure * * Copy contents of colormap from @from to @to. */ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) { unsigned int tooff = 0, fromoff = 0; size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; if (fromoff >= from->len || tooff >= to->len) return -EINVAL; size = min_t(size_t, to->len - tooff, from->len - fromoff); if (size == 0) return -EINVAL; size *= sizeof(u16); memcpy(to->red+tooff, from->red+fromoff, size); memcpy(to->green+tooff, from->green+fromoff, size); memcpy(to->blue+tooff, from->blue+fromoff, size); if (from->transp && to->transp) memcpy(to->transp+tooff, from->transp+fromoff, size); return 0; } int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) { unsigned int tooff = 0, fromoff = 0; size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; if (fromoff >= from->len || tooff >= to->len) return -EINVAL; size = min_t(size_t, to->len - tooff, from->len - fromoff); if (size == 0) return -EINVAL; size *= sizeof(u16); if (copy_to_user(to->red+tooff, from->red+fromoff, size)) return -EFAULT; if (copy_to_user(to->green+tooff, from->green+fromoff, size)) return -EFAULT; if (copy_to_user(to->blue+tooff, from->blue+fromoff, size)) return -EFAULT; if (from->transp && to->transp) if (copy_to_user(to->transp+tooff, from->transp+fromoff, size)) return -EFAULT; return 0; } /** * fb_set_cmap - set the colormap * @cmap: frame buffer colormap structure * @info: frame buffer info structure * * Sets the colormap @cmap for a screen of device @info. * * Returns negative errno on error, or zero on success. * */ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info) { int i, start, rc = 0; u16 *red, *green, *blue, *transp; u_int hred, hgreen, hblue, htransp = 0xffff; red = cmap->red; green = cmap->green; blue = cmap->blue; transp = cmap->transp; start = cmap->start; if (start < 0 || (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap)) return -EINVAL; if (info->fbops->fb_setcmap) { rc = info->fbops->fb_setcmap(cmap, info); } else { for (i = 0; i < cmap->len; i++) { hred = *red++; hgreen = *green++; hblue = *blue++; if (transp) htransp = *transp++; if (info->fbops->fb_setcolreg(start++, hred, hgreen, hblue, htransp, info)) break; } } if (rc == 0) fb_copy_cmap(cmap, &info->cmap); return rc; } int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) { int rc, size = cmap->len * sizeof(u16); struct fb_cmap umap; if (size < 0 || size < cmap->len) return -E2BIG; memset(&umap, 0, sizeof(struct fb_cmap)); rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL, GFP_KERNEL); if (rc) return rc; if (copy_from_user(umap.red, cmap->red, size) || copy_from_user(umap.green, cmap->green, size) || copy_from_user(umap.blue, cmap->blue, size) || (cmap->transp && copy_from_user(umap.transp, cmap->transp, size))) { rc = -EFAULT; goto out; } umap.start = cmap->start; lock_fb_info(info); rc = fb_set_cmap(&umap, info); unlock_fb_info(info); out: fb_dealloc_cmap(&umap); return rc; } /** * fb_default_cmap - get default colormap * @len: size of palette for a depth * * Gets the default colormap for a specific screen depth. @len * is the size of the palette for a particular screen depth. * * Returns pointer to a frame buffer colormap structure. * */ const struct fb_cmap *fb_default_cmap(int len) { if (len <= 2) return &default_2_colors; if (len <= 4) return &default_4_colors; if (len <= 8) return &default_8_colors; return &default_16_colors; } /** * fb_invert_cmaps - invert all defaults colormaps * * Invert all default colormaps. * */ void fb_invert_cmaps(void) { u_int i; for (i = 0; i < ARRAY_SIZE(red2); i++) { red2[i] = ~red2[i]; green2[i] = ~green2[i]; blue2[i] = ~blue2[i]; } for (i = 0; i < ARRAY_SIZE(red4); i++) { red4[i] = ~red4[i]; green4[i] = ~green4[i]; blue4[i] = ~blue4[i]; } for (i = 0; i < ARRAY_SIZE(red8); i++) { red8[i] = ~red8[i]; green8[i] = ~green8[i]; blue8[i] = ~blue8[i]; } for (i = 0; i < ARRAY_SIZE(red16); i++) { red16[i] = ~red16[i]; green16[i] = ~green16[i]; blue16[i] = ~blue16[i]; } } /* * Visible symbols for modules */ EXPORT_SYMBOL(fb_alloc_cmap); EXPORT_SYMBOL(fb_dealloc_cmap); EXPORT_SYMBOL(fb_copy_cmap); EXPORT_SYMBOL(fb_set_cmap); EXPORT_SYMBOL(fb_default_cmap); EXPORT_SYMBOL(fb_invert_cmaps);
2 30 20 2 1 22 22 9 21 3 18 5 22 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3 4 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 /* * net/tipc/link.c: TIPC link code * * Copyright (c) 1996-2007, 2012-2016, Ericsson AB * Copyright (c) 2004-2007, 2010-2013, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "subscr.h" #include "link.h" #include "bcast.h" #include "socket.h" #include "name_distr.h" #include "discover.h" #include "netlink.h" #include "monitor.h" #include "trace.h" #include "crypto.h" #include <linux/pkt_sched.h> struct tipc_stats { u32 sent_pkts; u32 recv_pkts; u32 sent_states; u32 recv_states; u32 sent_probes; u32 recv_probes; u32 sent_nacks; u32 recv_nacks; u32 sent_acks; u32 sent_bundled; u32 sent_bundles; u32 recv_bundled; u32 recv_bundles; u32 retransmitted; u32 sent_fragmented; u32 sent_fragments; u32 recv_fragmented; u32 recv_fragments; u32 link_congs; /* # port sends blocked by congestion */ u32 deferred_recv; u32 duplicates; u32 max_queue_sz; /* send queue size high water mark */ u32 accu_queue_sz; /* used for send queue size profiling */ u32 queue_sz_counts; /* used for send queue size profiling */ u32 msg_length_counts; /* used for message length profiling */ u32 msg_lengths_total; /* used for message length profiling */ u32 msg_length_profile[7]; /* used for msg. length profiling */ }; /** * struct tipc_link - TIPC link data structure * @addr: network address of link's peer node * @name: link name character string * @net: pointer to namespace struct * @peer_session: link session # being used by peer end of link * @peer_bearer_id: bearer id used by link's peer endpoint * @bearer_id: local bearer id used by link * @tolerance: minimum link continuity loss needed to reset link [in ms] * @abort_limit: # of unacknowledged continuity probes needed to reset link * @state: current state of link FSM * @peer_caps: bitmap describing capabilities of peer node * @silent_intv_cnt: # of timer intervals without any reception from peer * @priority: current link priority * @net_plane: current link network plane ('A' through 'H') * @mon_state: cookie with information needed by link monitor * @mtu: current maximum packet size for this link * @advertised_mtu: advertised own mtu when link is being established * @backlogq: queue for messages waiting to be sent * @ackers: # of peers that needs to ack each packet before it can be released * @acked: # last packet acked by a certain peer. Used for broadcast. * @rcv_nxt: next sequence number to expect for inbound messages * @inputq: buffer queue for messages to be delivered upwards * @namedq: buffer queue for name table messages to be delivered upwards * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate * @reasm_buf: head of partially reassembled inbound message fragments * @stats: collects statistics regarding link activity * @session: session to be used by link * @snd_nxt_state: next send seq number * @rcv_nxt_state: next rcv seq number * @in_session: have received ACTIVATE_MSG from peer * @active: link is active * @if_name: associated interface name * @rst_cnt: link reset counter * @drop_point: seq number for failover handling (FIXME) * @failover_reasm_skb: saved failover msg ptr (FIXME) * @failover_deferdq: deferred message queue for failover processing (FIXME) * @transmq: the link's transmit queue * @backlog: link's backlog by priority (importance) * @snd_nxt: next sequence number to be used * @rcv_unacked: # messages read by user, but not yet acked back to peer * @deferdq: deferred receive queue * @window: sliding window size for congestion handling * @min_win: minimal send window to be used by link * @ssthresh: slow start threshold for congestion handling * @max_win: maximal send window to be used by link * @cong_acks: congestion acks for congestion avoidance (FIXME) * @checkpoint: seq number for congestion window size handling * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message * @last_gap: last gap ack blocks for bcast (FIXME) * @last_ga: ptr to gap ack blocks * @bc_rcvlink: the peer specific link used for broadcast reception * @bc_sndlink: the namespace global link used for broadcast sending * @nack_state: bcast nack state * @bc_peer_is_up: peer has acked the bcast init msg */ struct tipc_link { u32 addr; char name[TIPC_MAX_LINK_NAME]; struct net *net; /* Management and link supervision data */ u16 peer_session; u16 session; u16 snd_nxt_state; u16 rcv_nxt_state; u32 peer_bearer_id; u32 bearer_id; u32 tolerance; u32 abort_limit; u32 state; u16 peer_caps; bool in_session; bool active; u32 silent_intv_cnt; char if_name[TIPC_MAX_IF_NAME]; u32 priority; char net_plane; struct tipc_mon_state mon_state; u16 rst_cnt; /* Failover/synch */ u16 drop_point; struct sk_buff *failover_reasm_skb; struct sk_buff_head failover_deferdq; /* Max packet negotiation */ u16 mtu; u16 advertised_mtu; /* Sending */ struct sk_buff_head transmq; struct sk_buff_head backlogq; struct { u16 len; u16 limit; struct sk_buff *target_bskb; } backlog[5]; u16 snd_nxt; /* Reception */ u16 rcv_nxt; u32 rcv_unacked; struct sk_buff_head deferdq; struct sk_buff_head *inputq; struct sk_buff_head *namedq; /* Congestion handling */ struct sk_buff_head wakeupq; u16 window; u16 min_win; u16 ssthresh; u16 max_win; u16 cong_acks; u16 checkpoint; /* Fragmentation/reassembly */ struct sk_buff *reasm_buf; struct sk_buff *reasm_tnlmsg; /* Broadcast */ u16 ackers; u16 acked; u16 last_gap; struct tipc_gap_ack_blks *last_ga; struct tipc_link *bc_rcvlink; struct tipc_link *bc_sndlink; u8 nack_state; bool bc_peer_is_up; /* Statistics */ struct tipc_stats stats; }; /* * Error message prefixes */ static const char *link_co_err = "Link tunneling error, "; static const char *link_rst_msg = "Resetting link "; /* Send states for broadcast NACKs */ enum { BC_NACK_SND_CONDITIONAL, BC_NACK_SND_UNCONDITIONAL, BC_NACK_SND_SUPPRESS, }; #define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10)) #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1)) /* Link FSM states: */ enum { LINK_ESTABLISHED = 0xe, LINK_ESTABLISHING = 0xe << 4, LINK_RESET = 0x1 << 8, LINK_RESETTING = 0x2 << 12, LINK_PEER_RESET = 0xd << 16, LINK_FAILINGOVER = 0xf << 20, LINK_SYNCHING = 0xc << 24 }; static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq); static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, bool probe_reply, u16 rcvgap, int tolerance, int priority, struct sk_buff_head *xmitq); static void link_print(struct tipc_link *l, const char *str); static int tipc_link_build_nack_msg(struct tipc_link *l, struct sk_buff_head *xmitq); static void tipc_link_build_bc_init_msg(struct tipc_link *l, struct sk_buff_head *xmitq); static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga, struct tipc_link *l, u8 start_index); static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr); static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r, u16 acked, u16 gap, struct tipc_gap_ack_blks *ga, struct sk_buff_head *xmitq, bool *retransmitted, int *rc); static void tipc_link_update_cwin(struct tipc_link *l, int released, bool retransmitted); /* * Simple non-static link routines (i.e. referenced outside this file) */ bool tipc_link_is_up(struct tipc_link *l) { return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); } bool tipc_link_peer_is_down(struct tipc_link *l) { return l->state == LINK_PEER_RESET; } bool tipc_link_is_reset(struct tipc_link *l) { return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING); } bool tipc_link_is_establishing(struct tipc_link *l) { return l->state == LINK_ESTABLISHING; } bool tipc_link_is_synching(struct tipc_link *l) { return l->state == LINK_SYNCHING; } bool tipc_link_is_failingover(struct tipc_link *l) { return l->state == LINK_FAILINGOVER; } bool tipc_link_is_blocked(struct tipc_link *l) { return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); } static bool link_is_bc_sndlink(struct tipc_link *l) { return !l->bc_sndlink; } static bool link_is_bc_rcvlink(struct tipc_link *l) { return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l)); } void tipc_link_set_active(struct tipc_link *l, bool active) { l->active = active; } u32 tipc_link_id(struct tipc_link *l) { return l->peer_bearer_id << 16 | l->bearer_id; } int tipc_link_min_win(struct tipc_link *l) { return l->min_win; } int tipc_link_max_win(struct tipc_link *l) { return l->max_win; } int tipc_link_prio(struct tipc_link *l) { return l->priority; } unsigned long tipc_link_tolerance(struct tipc_link *l) { return l->tolerance; } struct sk_buff_head *tipc_link_inputq(struct tipc_link *l) { return l->inputq; } char tipc_link_plane(struct tipc_link *l) { return l->net_plane; } struct net *tipc_link_net(struct tipc_link *l) { return l->net; } void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) { l->peer_caps = capabilities; } void tipc_link_add_bc_peer(struct tipc_link *snd_l, struct tipc_link *uc_l, struct sk_buff_head *xmitq) { struct tipc_link *rcv_l = uc_l->bc_rcvlink; snd_l->ackers++; rcv_l->acked = snd_l->snd_nxt - 1; snd_l->state = LINK_ESTABLISHED; tipc_link_build_bc_init_msg(uc_l, xmitq); } void tipc_link_remove_bc_peer(struct tipc_link *snd_l, struct tipc_link *rcv_l, struct sk_buff_head *xmitq) { u16 ack = snd_l->snd_nxt - 1; snd_l->ackers--; rcv_l->bc_peer_is_up = true; rcv_l->state = LINK_ESTABLISHED; tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL); trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!"); tipc_link_reset(rcv_l); rcv_l->state = LINK_RESET; if (!snd_l->ackers) { trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!"); tipc_link_reset(snd_l); snd_l->state = LINK_RESET; __skb_queue_purge(xmitq); } } int tipc_link_bc_peers(struct tipc_link *l) { return l->ackers; } static u16 link_bc_rcv_gap(struct tipc_link *l) { struct sk_buff *skb = skb_peek(&l->deferdq); u16 gap = 0; if (more(l->snd_nxt, l->rcv_nxt)) gap = l->snd_nxt - l->rcv_nxt; if (skb) gap = buf_seqno(skb) - l->rcv_nxt; return gap; } void tipc_link_set_mtu(struct tipc_link *l, int mtu) { l->mtu = mtu; } int tipc_link_mtu(struct tipc_link *l) { return l->mtu; } int tipc_link_mss(struct tipc_link *l) { #ifdef CONFIG_TIPC_CRYPTO return l->mtu - INT_H_SIZE - EMSG_OVERHEAD; #else return l->mtu - INT_H_SIZE; #endif } u16 tipc_link_rcv_nxt(struct tipc_link *l) { return l->rcv_nxt; } u16 tipc_link_acked(struct tipc_link *l) { return l->acked; } char *tipc_link_name(struct tipc_link *l) { return l->name; } u32 tipc_link_state(struct tipc_link *l) { return l->state; } /** * tipc_link_create - create a new link * @net: pointer to associated network namespace * @if_name: associated interface name * @bearer_id: id (index) of associated bearer * @tolerance: link tolerance to be used by link * @net_plane: network plane (A,B,c..) this link belongs to * @mtu: mtu to be advertised by link * @priority: priority to be used by link * @min_win: minimal send window to be used by link * @max_win: maximal send window to be used by link * @session: session to be used by link * @peer: node id of peer node * @peer_caps: bitmap describing peer node capabilities * @bc_sndlink: the namespace global link used for broadcast sending * @bc_rcvlink: the peer specific link used for broadcast reception * @inputq: queue to put messages ready for delivery * @namedq: queue to put binding table update messages ready for delivery * @link: return value, pointer to put the created link * @self: local unicast link id * @peer_id: 128-bit ID of peer * * Return: true if link was created, otherwise false */ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, int tolerance, char net_plane, u32 mtu, int priority, u32 min_win, u32 max_win, u32 session, u32 self, u32 peer, u8 *peer_id, u16 peer_caps, struct tipc_link *bc_sndlink, struct tipc_link *bc_rcvlink, struct sk_buff_head *inputq, struct sk_buff_head *namedq, struct tipc_link **link) { char peer_str[NODE_ID_STR_LEN] = {0,}; char self_str[NODE_ID_STR_LEN] = {0,}; struct tipc_link *l; l = kzalloc(sizeof(*l), GFP_ATOMIC); if (!l) return false; *link = l; l->session = session; /* Set link name for unicast links only */ if (peer_id) { if (tipc_nodeid2string(self_str, tipc_own_id(net)) > NODE_ID_LEN) sprintf(self_str, "%x", self); if (tipc_nodeid2string(peer_str, peer_id) > NODE_ID_LEN) sprintf(peer_str, "%x", peer); } /* Peer i/f name will be completed by reset/activate message */ snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown", self_str, if_name, peer_str); strcpy(l->if_name, if_name); l->addr = peer; l->peer_caps = peer_caps; l->net = net; l->in_session = false; l->bearer_id = bearer_id; l->tolerance = tolerance; if (bc_rcvlink) bc_rcvlink->tolerance = tolerance; l->net_plane = net_plane; l->advertised_mtu = mtu; l->mtu = mtu; l->priority = priority; tipc_link_set_queue_limits(l, min_win, max_win); l->ackers = 1; l->bc_sndlink = bc_sndlink; l->bc_rcvlink = bc_rcvlink; l->inputq = inputq; l->namedq = namedq; l->state = LINK_RESETTING; __skb_queue_head_init(&l->transmq); __skb_queue_head_init(&l->backlogq); __skb_queue_head_init(&l->deferdq); __skb_queue_head_init(&l->failover_deferdq); skb_queue_head_init(&l->wakeupq); skb_queue_head_init(l->inputq); return true; } /** * tipc_link_bc_create - create new link to be used for broadcast * @net: pointer to associated network namespace * @mtu: mtu to be used initially if no peers * @min_win: minimal send window to be used by link * @max_win: maximal send window to be used by link * @inputq: queue to put messages ready for delivery * @namedq: queue to put binding table update messages ready for delivery * @link: return value, pointer to put the created link * @ownnode: identity of own node * @peer: node id of peer node * @peer_id: 128-bit ID of peer * @peer_caps: bitmap describing peer node capabilities * @bc_sndlink: the namespace global link used for broadcast sending * * Return: true if link was created, otherwise false */ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id, int mtu, u32 min_win, u32 max_win, u16 peer_caps, struct sk_buff_head *inputq, struct sk_buff_head *namedq, struct tipc_link *bc_sndlink, struct tipc_link **link) { struct tipc_link *l; if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win, max_win, 0, ownnode, peer, NULL, peer_caps, bc_sndlink, NULL, inputq, namedq, link)) return false; l = *link; if (peer_id) { char peer_str[NODE_ID_STR_LEN] = {0,}; if (tipc_nodeid2string(peer_str, peer_id) > NODE_ID_LEN) sprintf(peer_str, "%x", peer); /* Broadcast receiver link name: "broadcast-link:<peer>" */ snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name, peer_str); } else { strcpy(l->name, tipc_bclink_name); } trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!"); tipc_link_reset(l); l->state = LINK_RESET; l->ackers = 0; l->bc_rcvlink = l; /* Broadcast send link is always up */ if (link_is_bc_sndlink(l)) l->state = LINK_ESTABLISHED; /* Disable replicast if even a single peer doesn't support it */ if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST)) tipc_bcast_toggle_rcast(net, false); return true; } /** * tipc_link_fsm_evt - link finite state machine * @l: pointer to link * @evt: state machine event to be processed */ int tipc_link_fsm_evt(struct tipc_link *l, int evt) { int rc = 0; int old_state = l->state; switch (l->state) { case LINK_RESETTING: switch (evt) { case LINK_PEER_RESET_EVT: l->state = LINK_PEER_RESET; break; case LINK_RESET_EVT: l->state = LINK_RESET; break; case LINK_FAILURE_EVT: case LINK_FAILOVER_BEGIN_EVT: case LINK_ESTABLISH_EVT: case LINK_FAILOVER_END_EVT: case LINK_SYNCH_BEGIN_EVT: case LINK_SYNCH_END_EVT: default: goto illegal_evt; } break; case LINK_RESET: switch (evt) { case LINK_PEER_RESET_EVT: l->state = LINK_ESTABLISHING; break; case LINK_FAILOVER_BEGIN_EVT: l->state = LINK_FAILINGOVER; break; case LINK_FAILURE_EVT: case LINK_RESET_EVT: case LINK_ESTABLISH_EVT: case LINK_FAILOVER_END_EVT: break; case LINK_SYNCH_BEGIN_EVT: case LINK_SYNCH_END_EVT: default: goto illegal_evt; } break; case LINK_PEER_RESET: switch (evt) { case LINK_RESET_EVT: l->state = LINK_ESTABLISHING; break; case LINK_PEER_RESET_EVT: case LINK_ESTABLISH_EVT: case LINK_FAILURE_EVT: break; case LINK_SYNCH_BEGIN_EVT: case LINK_SYNCH_END_EVT: case LINK_FAILOVER_BEGIN_EVT: case LINK_FAILOVER_END_EVT: default: goto illegal_evt; } break; case LINK_FAILINGOVER: switch (evt) { case LINK_FAILOVER_END_EVT: l->state = LINK_RESET; break; case LINK_PEER_RESET_EVT: case LINK_RESET_EVT: case LINK_ESTABLISH_EVT: case LINK_FAILURE_EVT: break; case LINK_FAILOVER_BEGIN_EVT: case LINK_SYNCH_BEGIN_EVT: case LINK_SYNCH_END_EVT: default: goto illegal_evt; } break; case LINK_ESTABLISHING: switch (evt) { case LINK_ESTABLISH_EVT: l->state = LINK_ESTABLISHED; break; case LINK_FAILOVER_BEGIN_EVT: l->state = LINK_FAILINGOVER; break; case LINK_RESET_EVT: l->state = LINK_RESET; break; case LINK_FAILURE_EVT: case LINK_PEER_RESET_EVT: case LINK_SYNCH_BEGIN_EVT: case LINK_FAILOVER_END_EVT: break; case LINK_SYNCH_END_EVT: default: goto illegal_evt; } break; case LINK_ESTABLISHED: switch (evt) { case LINK_PEER_RESET_EVT: l->state = LINK_PEER_RESET; rc |= TIPC_LINK_DOWN_EVT; break; case LINK_FAILURE_EVT: l->state = LINK_RESETTING; rc |= TIPC_LINK_DOWN_EVT; break; case LINK_RESET_EVT: l->state = LINK_RESET; break; case LINK_ESTABLISH_EVT: case LINK_SYNCH_END_EVT: break; case LINK_SYNCH_BEGIN_EVT: l->state = LINK_SYNCHING; break; case LINK_FAILOVER_BEGIN_EVT: case LINK_FAILOVER_END_EVT: default: goto illegal_evt; } break; case LINK_SYNCHING: switch (evt) { case LINK_PEER_RESET_EVT: l->state = LINK_PEER_RESET; rc |= TIPC_LINK_DOWN_EVT; break; case LINK_FAILURE_EVT: l->state = LINK_RESETTING; rc |= TIPC_LINK_DOWN_EVT; break; case LINK_RESET_EVT: l->state = LINK_RESET; break; case LINK_ESTABLISH_EVT: case LINK_SYNCH_BEGIN_EVT: break; case LINK_SYNCH_END_EVT: l->state = LINK_ESTABLISHED; break; case LINK_FAILOVER_BEGIN_EVT: case LINK_FAILOVER_END_EVT: default: goto illegal_evt; } break; default: pr_err("Unknown FSM state %x in %s\n", l->state, l->name); } trace_tipc_link_fsm(l->name, old_state, l->state, evt); return rc; illegal_evt: pr_err("Illegal FSM event %x in state %x on link %s\n", evt, l->state, l->name); trace_tipc_link_fsm(l->name, old_state, l->state, evt); return rc; } /* link_profile_stats - update statistical profiling of traffic */ static void link_profile_stats(struct tipc_link *l) { struct sk_buff *skb; struct tipc_msg *msg; int length; /* Update counters used in statistical profiling of send traffic */ l->stats.accu_queue_sz += skb_queue_len(&l->transmq); l->stats.queue_sz_counts++; skb = skb_peek(&l->transmq); if (!skb) return; msg = buf_msg(skb); length = msg_size(msg); if (msg_user(msg) == MSG_FRAGMENTER) { if (msg_type(msg) != FIRST_FRAGMENT) return; length = msg_size(msg_inner_hdr(msg)); } l->stats.msg_lengths_total += length; l->stats.msg_length_counts++; if (length <= 64) l->stats.msg_length_profile[0]++; else if (length <= 256) l->stats.msg_length_profile[1]++; else if (length <= 1024) l->stats.msg_length_profile[2]++; else if (length <= 4096) l->stats.msg_length_profile[3]++; else if (length <= 16384) l->stats.msg_length_profile[4]++; else if (length <= 32768) l->stats.msg_length_profile[5]++; else l->stats.msg_length_profile[6]++; } /** * tipc_link_too_silent - check if link is "too silent" * @l: tipc link to be checked * * Return: true if the link 'silent_intv_cnt' is about to reach the * 'abort_limit' value, otherwise false */ bool tipc_link_too_silent(struct tipc_link *l) { return (l->silent_intv_cnt + 2 > l->abort_limit); } /* tipc_link_timeout - perform periodic task as instructed from node timeout */ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) { int mtyp = 0; int rc = 0; bool state = false; bool probe = false; bool setup = false; u16 bc_snt = l->bc_sndlink->snd_nxt - 1; u16 bc_acked = l->bc_rcvlink->acked; struct tipc_mon_state *mstate = &l->mon_state; trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " "); trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " "); switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: mtyp = STATE_MSG; link_profile_stats(l); tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); state = bc_acked != bc_snt; state |= l->bc_rcvlink->rcv_unacked; state |= l->rcv_unacked; state |= !skb_queue_empty(&l->transmq); probe = mstate->probing; probe |= l->silent_intv_cnt; if (probe || mstate->monitoring) l->silent_intv_cnt++; probe |= !skb_queue_empty(&l->deferdq); if (l->snd_nxt == l->checkpoint) { tipc_link_update_cwin(l, 0, 0); probe = true; } l->checkpoint = l->snd_nxt; break; case LINK_RESET: setup = l->rst_cnt++ <= 4; setup |= !(l->rst_cnt % 16); mtyp = RESET_MSG; break; case LINK_ESTABLISHING: setup = true; mtyp = ACTIVATE_MSG; break; case LINK_PEER_RESET: case LINK_RESETTING: case LINK_FAILINGOVER: break; default: break; } if (state || probe || setup) tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq); return rc; } /** * link_schedule_user - schedule a message sender for wakeup after congestion * @l: congested link * @hdr: header of message that is being sent * Create pseudo msg to send back to user when congestion abates */ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr) { u32 dnode = tipc_own_addr(l->net); u32 dport = msg_origport(hdr); struct sk_buff *skb; /* Create and schedule wakeup pseudo message */ skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, dnode, l->addr, dport, 0, 0); if (!skb) return -ENOBUFS; msg_set_dest_droppable(buf_msg(skb), true); TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); skb_queue_tail(&l->wakeupq, skb); l->stats.link_congs++; trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!"); return -ELINKCONG; } /** * link_prepare_wakeup - prepare users for wakeup after congestion * @l: congested link * Wake up a number of waiting users, as permitted by available space * in the send queue */ static void link_prepare_wakeup(struct tipc_link *l) { struct sk_buff_head *wakeupq = &l->wakeupq; struct sk_buff_head *inputq = l->inputq; struct sk_buff *skb, *tmp; struct sk_buff_head tmpq; int avail[5] = {0,}; int imp = 0; __skb_queue_head_init(&tmpq); for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) avail[imp] = l->backlog[imp].limit - l->backlog[imp].len; skb_queue_walk_safe(wakeupq, skb, tmp) { imp = TIPC_SKB_CB(skb)->chain_imp; if (avail[imp] <= 0) continue; avail[imp]--; __skb_unlink(skb, wakeupq); __skb_queue_tail(&tmpq, skb); } spin_lock_bh(&inputq->lock); skb_queue_splice_tail(&tmpq, inputq); spin_unlock_bh(&inputq->lock); } /** * tipc_link_set_skb_retransmit_time - set the time at which retransmission of * the given skb should be next attempted * @skb: skb to set a future retransmission time for * @l: link the skb will be transmitted on */ static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb, struct tipc_link *l) { if (link_is_bc_sndlink(l)) TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; else TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; } void tipc_link_reset(struct tipc_link *l) { struct sk_buff_head list; u32 imp; __skb_queue_head_init(&list); l->in_session = false; /* Force re-synch of peer session number before establishing */ l->peer_session--; l->session++; l->mtu = l->advertised_mtu; spin_lock_bh(&l->wakeupq.lock); skb_queue_splice_init(&l->wakeupq, &list); spin_unlock_bh(&l->wakeupq.lock); spin_lock_bh(&l->inputq->lock); skb_queue_splice_init(&list, l->inputq); spin_unlock_bh(&l->inputq->lock); __skb_queue_purge(&l->transmq); __skb_queue_purge(&l->deferdq); __skb_queue_purge(&l->backlogq); __skb_queue_purge(&l->failover_deferdq); for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) { l->backlog[imp].len = 0; l->backlog[imp].target_bskb = NULL; } kfree_skb(l->reasm_buf); kfree_skb(l->reasm_tnlmsg); kfree_skb(l->failover_reasm_skb); l->reasm_buf = NULL; l->reasm_tnlmsg = NULL; l->failover_reasm_skb = NULL; l->rcv_unacked = 0; l->snd_nxt = 1; l->rcv_nxt = 1; l->snd_nxt_state = 1; l->rcv_nxt_state = 1; l->acked = 0; l->last_gap = 0; kfree(l->last_ga); l->last_ga = NULL; l->silent_intv_cnt = 0; l->rst_cnt = 0; l->bc_peer_is_up = false; memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); } /** * tipc_link_xmit(): enqueue buffer list according to queue situation * @l: link to use * @list: chain of buffers containing message * @xmitq: returned list of packets to be sent by caller * * Consumes the buffer chain. * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS */ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *xmitq) { struct sk_buff_head *backlogq = &l->backlogq; struct sk_buff_head *transmq = &l->transmq; struct sk_buff *skb, *_skb; u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; u16 ack = l->rcv_nxt - 1; u16 seqno = l->snd_nxt; int pkt_cnt = skb_queue_len(list); unsigned int mss = tipc_link_mss(l); unsigned int cwin = l->window; unsigned int mtu = l->mtu; struct tipc_msg *hdr; bool new_bundle; int rc = 0; int imp; if (pkt_cnt <= 0) return 0; hdr = buf_msg(skb_peek(list)); if (unlikely(msg_size(hdr) > mtu)) { pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n", skb_queue_len(list), msg_user(hdr), msg_type(hdr), msg_size(hdr), mtu); __skb_queue_purge(list); return -EMSGSIZE; } imp = msg_importance(hdr); /* Allow oversubscription of one data msg per source at congestion */ if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { if (imp == TIPC_SYSTEM_IMPORTANCE) { pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); __skb_queue_purge(list); return -ENOBUFS; } rc = link_schedule_user(l, hdr); } if (pkt_cnt > 1) { l->stats.sent_fragmented++; l->stats.sent_fragments += pkt_cnt; } /* Prepare each packet for sending, and add to relevant queue: */ while ((skb = __skb_dequeue(list))) { if (likely(skb_queue_len(transmq) < cwin)) { hdr = buf_msg(skb); msg_set_seqno(hdr, seqno); msg_set_ack(hdr, ack); msg_set_bcast_ack(hdr, bc_ack); _skb = skb_clone(skb, GFP_ATOMIC); if (!_skb) { kfree_skb(skb); __skb_queue_purge(list); return -ENOBUFS; } __skb_queue_tail(transmq, skb); tipc_link_set_skb_retransmit_time(skb, l); __skb_queue_tail(xmitq, _skb); TIPC_SKB_CB(skb)->ackers = l->ackers; l->rcv_unacked = 0; l->stats.sent_pkts++; seqno++; continue; } if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb, mss, l->addr, &new_bundle)) { if (skb) { /* Keep a ref. to the skb for next try */ l->backlog[imp].target_bskb = skb; l->backlog[imp].len++; __skb_queue_tail(backlogq, skb); } else { if (new_bundle) { l->stats.sent_bundles++; l->stats.sent_bundled++; } l->stats.sent_bundled++; } continue; } l->backlog[imp].target_bskb = NULL; l->backlog[imp].len += (1 + skb_queue_len(list)); __skb_queue_tail(backlogq, skb); skb_queue_splice_tail_init(list, backlogq); } l->snd_nxt = seqno; return rc; } static void tipc_link_update_cwin(struct tipc_link *l, int released, bool retransmitted) { int bklog_len = skb_queue_len(&l->backlogq); struct sk_buff_head *txq = &l->transmq; int txq_len = skb_queue_len(txq); u16 cwin = l->window; /* Enter fast recovery */ if (unlikely(retransmitted)) { l->ssthresh = max_t(u16, l->window / 2, 300); l->window = min_t(u16, l->ssthresh, l->window); return; } /* Enter slow start */ if (unlikely(!released)) { l->ssthresh = max_t(u16, l->window / 2, 300); l->window = l->min_win; return; } /* Don't increase window if no pressure on the transmit queue */ if (txq_len + bklog_len < cwin) return; /* Don't increase window if there are holes the transmit queue */ if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len) return; l->cong_acks += released; /* Slow start */ if (cwin <= l->ssthresh) { l->window = min_t(u16, cwin + released, l->max_win); return; } /* Congestion avoidance */ if (l->cong_acks < cwin) return; l->window = min_t(u16, ++cwin, l->max_win); l->cong_acks = 0; } static void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) { u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; struct sk_buff_head *txq = &l->transmq; struct sk_buff *skb, *_skb; u16 ack = l->rcv_nxt - 1; u16 seqno = l->snd_nxt; struct tipc_msg *hdr; u16 cwin = l->window; u32 imp; while (skb_queue_len(txq) < cwin) { skb = skb_peek(&l->backlogq); if (!skb) break; _skb = skb_clone(skb, GFP_ATOMIC); if (!_skb) break; __skb_dequeue(&l->backlogq); hdr = buf_msg(skb); imp = msg_importance(hdr); l->backlog[imp].len--; if (unlikely(skb == l->backlog[imp].target_bskb)) l->backlog[imp].target_bskb = NULL; __skb_queue_tail(&l->transmq, skb); tipc_link_set_skb_retransmit_time(skb, l); __skb_queue_tail(xmitq, _skb); TIPC_SKB_CB(skb)->ackers = l->ackers; msg_set_seqno(hdr, seqno); msg_set_ack(hdr, ack); msg_set_bcast_ack(hdr, bc_ack); l->rcv_unacked = 0; l->stats.sent_pkts++; seqno++; } l->snd_nxt = seqno; } /** * link_retransmit_failure() - Detect repeated retransmit failures * @l: tipc link sender * @r: tipc link receiver (= l in case of unicast) * @rc: returned code * * Return: true if the repeated retransmit failures happens, otherwise * false */ static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, int *rc) { struct sk_buff *skb = skb_peek(&l->transmq); struct tipc_msg *hdr; if (!skb) return false; if (!TIPC_SKB_CB(skb)->retr_cnt) return false; if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp + msecs_to_jiffies(r->tolerance * 10))) return false; hdr = buf_msg(skb); if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr))) return false; pr_warn("Retransmission failure on link <%s>\n", l->name); link_print(l, "State of link "); pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); pr_info("sqno %u, prev: %x, dest: %x\n", msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr)); pr_info("retr_stamp %d, retr_cnt %d\n", jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp), TIPC_SKB_CB(skb)->retr_cnt); trace_tipc_list_dump(&l->transmq, true, "retrans failure!"); trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!"); trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!"); if (link_is_bc_sndlink(l)) { r->state = LINK_RESET; *rc |= TIPC_LINK_DOWN_EVT; } else { *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } return true; } /* tipc_data_input - deliver data and name distr msgs to upper layer * * Consumes buffer if message is of right type * Node lock must be held */ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *inputq) { struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq; struct tipc_msg *hdr = buf_msg(skb); switch (msg_user(hdr)) { case TIPC_LOW_IMPORTANCE: case TIPC_MEDIUM_IMPORTANCE: case TIPC_HIGH_IMPORTANCE: case TIPC_CRITICAL_IMPORTANCE: if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) { skb_queue_tail(mc_inputq, skb); return true; } fallthrough; case CONN_MANAGER: skb_queue_tail(inputq, skb); return true; case GROUP_PROTOCOL: skb_queue_tail(mc_inputq, skb); return true; case NAME_DISTRIBUTOR: l->bc_rcvlink->state = LINK_ESTABLISHED; skb_queue_tail(l->namedq, skb); return true; case MSG_BUNDLER: case TUNNEL_PROTOCOL: case MSG_FRAGMENTER: case BCAST_PROTOCOL: return false; #ifdef CONFIG_TIPC_CRYPTO case MSG_CRYPTO: if (sysctl_tipc_key_exchange_enabled && TIPC_SKB_CB(skb)->decrypted) { tipc_crypto_msg_rcv(l->net, skb); return true; } fallthrough; #endif default: pr_warn("Dropping received illegal msg type\n"); kfree_skb(skb); return true; } } /* tipc_link_input - process packet that has passed link protocol check * * Consumes buffer */ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *inputq, struct sk_buff **reasm_skb) { struct tipc_msg *hdr = buf_msg(skb); struct sk_buff *iskb; struct sk_buff_head tmpq; int usr = msg_user(hdr); int pos = 0; if (usr == MSG_BUNDLER) { skb_queue_head_init(&tmpq); l->stats.recv_bundles++; l->stats.recv_bundled += msg_msgcnt(hdr); while (tipc_msg_extract(skb, &iskb, &pos)) tipc_data_input(l, iskb, &tmpq); tipc_skb_queue_splice_tail(&tmpq, inputq); return 0; } else if (usr == MSG_FRAGMENTER) { l->stats.recv_fragments++; if (tipc_buf_append(reasm_skb, &skb)) { l->stats.recv_fragmented++; tipc_data_input(l, skb, inputq); } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) { pr_warn_ratelimited("Unable to build fragment list\n"); return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } return 0; } else if (usr == BCAST_PROTOCOL) { tipc_bcast_lock(l->net); tipc_link_bc_init_rcv(l->bc_rcvlink, hdr); tipc_bcast_unlock(l->net); } kfree_skb(skb); return 0; } /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the * inner message along with the ones in the old link's * deferdq * @l: tunnel link * @skb: TUNNEL_PROTOCOL message * @inputq: queue to put messages ready for delivery */ static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *inputq) { struct sk_buff **reasm_skb = &l->failover_reasm_skb; struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg; struct sk_buff_head *fdefq = &l->failover_deferdq; struct tipc_msg *hdr = buf_msg(skb); struct sk_buff *iskb; int ipos = 0; int rc = 0; u16 seqno; if (msg_type(hdr) == SYNCH_MSG) { kfree_skb(skb); return 0; } /* Not a fragment? */ if (likely(!msg_nof_fragms(hdr))) { if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) { pr_warn_ratelimited("Unable to extract msg, defq: %d\n", skb_queue_len(fdefq)); return 0; } kfree_skb(skb); } else { /* Set fragment type for buf_append */ if (msg_fragm_no(hdr) == 1) msg_set_type(hdr, FIRST_FRAGMENT); else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr)) msg_set_type(hdr, FRAGMENT); else msg_set_type(hdr, LAST_FRAGMENT); if (!tipc_buf_append(reasm_tnlmsg, &skb)) { /* Successful but non-complete reassembly? */ if (*reasm_tnlmsg || link_is_bc_rcvlink(l)) return 0; pr_warn_ratelimited("Unable to reassemble tunnel msg\n"); return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } iskb = skb; } do { seqno = buf_seqno(iskb); if (unlikely(less(seqno, l->drop_point))) { kfree_skb(iskb); continue; } if (unlikely(seqno != l->drop_point)) { __tipc_skb_queue_sorted(fdefq, seqno, iskb); continue; } l->drop_point++; if (!tipc_data_input(l, iskb, inputq)) rc |= tipc_link_input(l, iskb, inputq, reasm_skb); if (unlikely(rc)) break; } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point))); return rc; } /** * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG * @ga: returned pointer to the Gap ACK blocks if any * @l: the tipc link * @hdr: the PROTOCOL/STATE_MSG header * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0) * * Return: the total Gap ACK blocks size */ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l, struct tipc_msg *hdr, bool uc) { struct tipc_gap_ack_blks *p; u16 sz = 0; /* Does peer support the Gap ACK blocks feature? */ if (l->peer_caps & TIPC_GAP_ACK_BLOCK) { p = (struct tipc_gap_ack_blks *)msg_data(hdr); sz = ntohs(p->len); /* Sanity check */ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) { /* Good, check if the desired type exists */ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt)) goto ok; /* Backward compatible: peer might not support bc, but uc? */ } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) { if (p->ugack_cnt) { p->bgack_cnt = 0; goto ok; } } } /* Other cases: ignore! */ p = NULL; ok: *ga = p; return sz; } static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga, struct tipc_link *l, u8 start_index) { struct tipc_gap_ack *gacks = &ga->gacks[start_index]; struct sk_buff *skb = skb_peek(&l->deferdq); u16 expect, seqno = 0; u8 n = 0; if (!skb) return 0; expect = buf_seqno(skb); skb_queue_walk(&l->deferdq, skb) { seqno = buf_seqno(skb); if (unlikely(more(seqno, expect))) { gacks[n].ack = htons(expect - 1); gacks[n].gap = htons(seqno - expect); if (++n >= MAX_GAP_ACK_BLKS / 2) { pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n", l->name, n, skb_queue_len(&l->deferdq)); return n; } } else if (unlikely(less(seqno, expect))) { pr_warn("Unexpected skb in deferdq!\n"); continue; } expect = seqno + 1; } /* last block */ gacks[n].ack = htons(seqno); gacks[n].gap = 0; n++; return n; } /* tipc_build_gap_ack_blks - build Gap ACK blocks * @l: tipc unicast link * @hdr: the tipc message buffer to store the Gap ACK blocks after built * * The function builds Gap ACK blocks for both the unicast & broadcast receiver * links of a certain peer, the buffer after built has the network data format * as found at the struct tipc_gap_ack_blks definition. * * returns the actual allocated memory size */ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr) { struct tipc_link *bcl = l->bc_rcvlink; struct tipc_gap_ack_blks *ga; u16 len; ga = (struct tipc_gap_ack_blks *)msg_data(hdr); /* Start with broadcast link first */ tipc_bcast_lock(bcl->net); msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl)); ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0); tipc_bcast_unlock(bcl->net); /* Now for unicast link, but an explicit NACK only (???) */ ga->ugack_cnt = (msg_seq_gap(hdr)) ? __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0; /* Total len */ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt)); ga->len = htons(len); return len; } /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing * acked packets, also doing retransmissions if * gaps found * @l: tipc link with transmq queue to be advanced * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast) * @acked: seqno of last packet acked by peer without any gaps before * @gap: # of gap packets * @ga: buffer pointer to Gap ACK blocks from peer * @xmitq: queue for accumulating the retransmitted packets if any * @retransmitted: returned boolean value if a retransmission is really issued * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures * happens (- unlikely case) * * Return: the number of packets released from the link transmq */ static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r, u16 acked, u16 gap, struct tipc_gap_ack_blks *ga, struct sk_buff_head *xmitq, bool *retransmitted, int *rc) { struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL; struct tipc_gap_ack *gacks = NULL; struct sk_buff *skb, *_skb, *tmp; struct tipc_msg *hdr; u32 qlen = skb_queue_len(&l->transmq); u16 nacked = acked, ngap = gap, gack_cnt = 0; u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; u16 ack = l->rcv_nxt - 1; u16 seqno, n = 0; u16 end = r->acked, start = end, offset = r->last_gap; u16 si = (last_ga) ? last_ga->start_index : 0; bool is_uc = !link_is_bc_sndlink(l); bool bc_has_acked = false; trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq); /* Determine Gap ACK blocks if any for the particular link */ if (ga && is_uc) { /* Get the Gap ACKs, uc part */ gack_cnt = ga->ugack_cnt; gacks = &ga->gacks[ga->bgack_cnt]; } else if (ga) { /* Copy the Gap ACKs, bc part, for later renewal if needed */ this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt), GFP_ATOMIC); if (likely(this_ga)) { this_ga->start_index = 0; /* Start with the bc Gap ACKs */ gack_cnt = this_ga->bgack_cnt; gacks = &this_ga->gacks[0]; } else { /* Hmm, we can get in trouble..., simply ignore it */ pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n"); } } /* Advance the link transmq */ skb_queue_walk_safe(&l->transmq, skb, tmp) { seqno = buf_seqno(skb); next_gap_ack: if (less_eq(seqno, nacked)) { if (is_uc) goto release; /* Skip packets peer has already acked */ if (!more(seqno, r->acked)) continue; /* Get the next of last Gap ACK blocks */ while (more(seqno, end)) { if (!last_ga || si >= last_ga->bgack_cnt) break; start = end + offset + 1; end = ntohs(last_ga->gacks[si].ack); offset = ntohs(last_ga->gacks[si].gap); si++; WARN_ONCE(more(start, end) || (!offset && si < last_ga->bgack_cnt) || si > MAX_GAP_ACK_BLKS, "Corrupted Gap ACK: %d %d %d %d %d\n", start, end, offset, si, last_ga->bgack_cnt); } /* Check against the last Gap ACK block */ if (tipc_in_range(seqno, start, end)) continue; /* Update/release the packet peer is acking */ bc_has_acked = true; if (--TIPC_SKB_CB(skb)->ackers) continue; release: /* release skb */ __skb_unlink(skb, &l->transmq); kfree_skb(skb); } else if (less_eq(seqno, nacked + ngap)) { /* First gap: check if repeated retrans failures? */ if (unlikely(seqno == acked + 1 && link_retransmit_failure(l, r, rc))) { /* Ignore this bc Gap ACKs if any */ kfree(this_ga); this_ga = NULL; break; } /* retransmit skb if unrestricted*/ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) continue; tipc_link_set_skb_retransmit_time(skb, l); _skb = pskb_copy(skb, GFP_ATOMIC); if (!_skb) continue; hdr = buf_msg(_skb); msg_set_ack(hdr, ack); msg_set_bcast_ack(hdr, bc_ack); _skb->priority = TC_PRIO_CONTROL; __skb_queue_tail(xmitq, _skb); l->stats.retransmitted++; if (!is_uc) r->stats.retransmitted++; *retransmitted = true; /* Increase actual retrans counter & mark first time */ if (!TIPC_SKB_CB(skb)->retr_cnt++) TIPC_SKB_CB(skb)->retr_stamp = jiffies; } else { /* retry with Gap ACK blocks if any */ if (n >= gack_cnt) break; nacked = ntohs(gacks[n].ack); ngap = ntohs(gacks[n].gap); n++; goto next_gap_ack; } } /* Renew last Gap ACK blocks for bc if needed */ if (bc_has_acked) { if (this_ga) { kfree(last_ga); r->last_ga = this_ga; r->last_gap = gap; } else if (last_ga) { if (less(acked, start)) { si--; offset = start - acked - 1; } else if (less(acked, end)) { acked = end; } if (si < last_ga->bgack_cnt) { last_ga->start_index = si; r->last_gap = offset; } else { kfree(last_ga); r->last_ga = NULL; r->last_gap = 0; } } else { r->last_gap = 0; } r->acked = acked; } else { kfree(this_ga); } return qlen - skb_queue_len(&l->transmq); } /* tipc_link_build_state_msg: prepare link state message for transmission * * Note that sending of broadcast ack is coordinated among nodes, to reduce * risk of ack storms towards the sender */ int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { if (!l) return 0; /* Broadcast ACK must be sent via a unicast link => defer to caller */ if (link_is_bc_rcvlink(l)) { if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf) return 0; l->rcv_unacked = 0; /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */ l->snd_nxt = l->rcv_nxt; return TIPC_LINK_SND_STATE; } /* Unicast ACK */ l->rcv_unacked = 0; l->stats.sent_acks++; tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq); return 0; } /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message */ void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { int mtyp = RESET_MSG; struct sk_buff *skb; if (l->state == LINK_ESTABLISHING) mtyp = ACTIVATE_MSG; tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq); /* Inform peer that this endpoint is going down if applicable */ skb = skb_peek_tail(xmitq); if (skb && (l->state == LINK_RESET)) msg_set_peer_stopping(buf_msg(skb), 1); } /* tipc_link_build_nack_msg: prepare link nack message for transmission * Note that sending of broadcast NACK is coordinated among nodes, to * reduce the risk of NACK storms towards the sender */ static int tipc_link_build_nack_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { u32 def_cnt = ++l->stats.deferred_recv; struct sk_buff_head *dfq = &l->deferdq; u32 defq_len = skb_queue_len(dfq); int match1, match2; if (link_is_bc_rcvlink(l)) { match1 = def_cnt & 0xf; match2 = tipc_own_addr(l->net) & 0xf; if (match1 == match2) return TIPC_LINK_SND_STATE; return 0; } if (defq_len >= 3 && !((defq_len - 3) % 16)) { u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, rcvgap, 0, 0, xmitq); } return 0; } /* tipc_link_rcv - process TIPC packets/messages arriving from off-node * @l: the link that should handle the message * @skb: TIPC packet * @xmitq: queue to place packets to be sent after this call */ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq) { struct sk_buff_head *defq = &l->deferdq; struct tipc_msg *hdr = buf_msg(skb); u16 seqno, rcv_nxt, win_lim; int released = 0; int rc = 0; /* Verify and update link state */ if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) return tipc_link_proto_rcv(l, skb, xmitq); /* Don't send probe at next timeout expiration */ l->silent_intv_cnt = 0; do { hdr = buf_msg(skb); seqno = msg_seqno(hdr); rcv_nxt = l->rcv_nxt; win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; if (unlikely(!tipc_link_is_up(l))) { if (l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; kfree_skb(skb); break; } /* Drop if outside receive window */ if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) { l->stats.duplicates++; kfree_skb(skb); break; } released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0, NULL, NULL, NULL, NULL); /* Defer delivery if sequence gap */ if (unlikely(seqno != rcv_nxt)) { if (!__tipc_skb_queue_sorted(defq, seqno, skb)) l->stats.duplicates++; rc |= tipc_link_build_nack_msg(l, xmitq); break; } /* Deliver packet */ l->rcv_nxt++; l->stats.recv_pkts++; if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL)) rc |= tipc_link_tnl_rcv(l, skb, l->inputq); else if (!tipc_data_input(l, skb, l->inputq)) rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf); if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) rc |= tipc_link_build_state_msg(l, xmitq); if (unlikely(rc & ~TIPC_LINK_SND_STATE)) break; } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt))); /* Forward queues and wake up waiting users */ if (released) { tipc_link_update_cwin(l, released, 0); tipc_link_advance_backlog(l, xmitq); if (unlikely(!skb_queue_empty(&l->wakeupq))) link_prepare_wakeup(l); } return rc; } static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, bool probe_reply, u16 rcvgap, int tolerance, int priority, struct sk_buff_head *xmitq) { struct tipc_mon_state *mstate = &l->mon_state; struct sk_buff_head *dfq = &l->deferdq; struct tipc_link *bcl = l->bc_rcvlink; struct tipc_msg *hdr; struct sk_buff *skb; bool node_up = tipc_link_is_up(bcl); u16 glen = 0, bc_rcvgap = 0; int dlen = 0; void *data; /* Don't send protocol message during reset or link failover */ if (tipc_link_is_blocked(l)) return; if (!tipc_link_is_up(l) && (mtyp == STATE_MSG)) return; if ((probe || probe_reply) && !skb_queue_empty(dfq)) rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt; skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!skb) return; hdr = buf_msg(skb); data = msg_data(hdr); msg_set_session(hdr, l->session); msg_set_bearer_id(hdr, l->bearer_id); msg_set_net_plane(hdr, l->net_plane); msg_set_next_sent(hdr, l->snd_nxt); msg_set_ack(hdr, l->rcv_nxt - 1); msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1); msg_set_bc_ack_invalid(hdr, !node_up); msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); msg_set_link_tolerance(hdr, tolerance); msg_set_linkprio(hdr, priority); msg_set_redundant_link(hdr, node_up); msg_set_seq_gap(hdr, 0); msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2); if (mtyp == STATE_MSG) { if (l->peer_caps & TIPC_LINK_PROTO_SEQNO) msg_set_seqno(hdr, l->snd_nxt_state++); msg_set_seq_gap(hdr, rcvgap); bc_rcvgap = link_bc_rcv_gap(bcl); msg_set_bc_gap(hdr, bc_rcvgap); msg_set_probe(hdr, probe); msg_set_is_keepalive(hdr, probe || probe_reply); if (l->peer_caps & TIPC_GAP_ACK_BLOCK) glen = tipc_build_gap_ack_blks(l, hdr); tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id); msg_set_size(hdr, INT_H_SIZE + glen + dlen); skb_trim(skb, INT_H_SIZE + glen + dlen); l->stats.sent_states++; l->rcv_unacked = 0; } else { /* RESET_MSG or ACTIVATE_MSG */ if (mtyp == ACTIVATE_MSG) { msg_set_dest_session_valid(hdr, 1); msg_set_dest_session(hdr, l->peer_session); } msg_set_max_pkt(hdr, l->advertised_mtu); strcpy(data, l->if_name); msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); } if (probe) l->stats.sent_probes++; if (rcvgap) l->stats.sent_nacks++; if (bc_rcvgap) bcl->stats.sent_nacks++; skb->priority = TC_PRIO_CONTROL; __skb_queue_tail(xmitq, skb); trace_tipc_proto_build(skb, false, l->name); } void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { u32 onode = tipc_own_addr(l->net); struct tipc_msg *hdr, *ihdr; struct sk_buff_head tnlq; struct sk_buff *skb; u32 dnode = l->addr; __skb_queue_head_init(&tnlq); skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG, INT_H_SIZE, BASIC_H_SIZE, dnode, onode, 0, 0, 0); if (!skb) { pr_warn("%sunable to create tunnel packet\n", link_co_err); return; } hdr = buf_msg(skb); msg_set_msgcnt(hdr, 1); msg_set_bearer_id(hdr, l->peer_bearer_id); ihdr = (struct tipc_msg *)msg_data(hdr); tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, BASIC_H_SIZE, dnode); msg_set_errcode(ihdr, TIPC_ERR_NO_PORT); __skb_queue_tail(&tnlq, skb); tipc_link_xmit(l, &tnlq, xmitq); } /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets * with contents of the link's transmit and backlog queues. */ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, int mtyp, struct sk_buff_head *xmitq) { struct sk_buff *skb, *tnlskb; struct tipc_msg *hdr, tnlhdr; struct sk_buff_head *queue = &l->transmq; struct sk_buff_head tmpxq, tnlq, frags; u16 pktlen, pktcnt, seqno = l->snd_nxt; bool pktcnt_need_update = false; u16 syncpt; int rc; if (!tnl) return; __skb_queue_head_init(&tnlq); /* Link Synching: * From now on, send only one single ("dummy") SYNCH message * to peer. The SYNCH message does not contain any data, just * a header conveying the synch point to the peer. */ if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) { tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG, INT_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!tnlskb) { pr_warn("%sunable to create dummy SYNCH_MSG\n", link_co_err); return; } hdr = buf_msg(tnlskb); syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1; msg_set_syncpt(hdr, syncpt); msg_set_bearer_id(hdr, l->peer_bearer_id); __skb_queue_tail(&tnlq, tnlskb); tipc_link_xmit(tnl, &tnlq, xmitq); return; } __skb_queue_head_init(&tmpxq); __skb_queue_head_init(&frags); /* At least one packet required for safe algorithm => add dummy */ skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG, BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 0, 0, TIPC_ERR_NO_PORT); if (!skb) { pr_warn("%sunable to create tunnel packet\n", link_co_err); return; } __skb_queue_tail(&tnlq, skb); tipc_link_xmit(l, &tnlq, &tmpxq); __skb_queue_purge(&tmpxq); /* Initialize reusable tunnel packet header */ tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL, mtyp, INT_H_SIZE, l->addr); if (mtyp == SYNCH_MSG) pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq)); else pktcnt = skb_queue_len(&l->transmq); pktcnt += skb_queue_len(&l->backlogq); msg_set_msgcnt(&tnlhdr, pktcnt); msg_set_bearer_id(&tnlhdr, l->peer_bearer_id); tnl: /* Wrap each packet into a tunnel packet */ skb_queue_walk(queue, skb) { hdr = buf_msg(skb); if (queue == &l->backlogq) msg_set_seqno(hdr, seqno++); pktlen = msg_size(hdr); /* Tunnel link MTU is not large enough? This could be * due to: * 1) Link MTU has just changed or set differently; * 2) Or FAILOVER on the top of a SYNCH message * * The 2nd case should not happen if peer supports * TIPC_TUNNEL_ENHANCED */ if (pktlen > tnl->mtu - INT_H_SIZE) { if (mtyp == FAILOVER_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) { rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu, &frags); if (rc) { pr_warn("%sunable to frag msg: rc %d\n", link_co_err, rc); return; } pktcnt += skb_queue_len(&frags) - 1; pktcnt_need_update = true; skb_queue_splice_tail_init(&frags, &tnlq); continue; } /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED * => Just warn it and return! */ pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n", link_co_err, msg_user(hdr), msg_type(hdr), msg_size(hdr)); return; } msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); if (!tnlskb) { pr_warn("%sunable to send packet\n", link_co_err); return; } skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE); skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen); __skb_queue_tail(&tnlq, tnlskb); } if (queue != &l->backlogq) { queue = &l->backlogq; goto tnl; } if (pktcnt_need_update) skb_queue_walk(&tnlq, skb) { hdr = buf_msg(skb); msg_set_msgcnt(hdr, pktcnt); } tipc_link_xmit(tnl, &tnlq, xmitq); if (mtyp == FAILOVER_MSG) { struct sk_buff_head *fdefq = &tnl->failover_deferdq; tnl->drop_point = l->rcv_nxt; tnl->failover_reasm_skb = l->reasm_buf; l->reasm_buf = NULL; /* Failover the link's deferdq */ if (unlikely(!skb_queue_empty(fdefq))) { pr_warn("Link failover deferdq not empty: %d!\n", skb_queue_len(fdefq)); __skb_queue_purge(fdefq); } skb_queue_splice_init(&l->deferdq, fdefq); } } /** * tipc_link_failover_prepare() - prepare tnl for link failover * * This is a special version of the precursor - tipc_link_tnl_prepare(), * see the tipc_node_link_failover() for details * * @l: failover link * @tnl: tunnel link * @xmitq: queue for messages to be xmited */ void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl, struct sk_buff_head *xmitq) { struct sk_buff_head *fdefq = &tnl->failover_deferdq; tipc_link_create_dummy_tnl_msg(tnl, xmitq); /* This failover link endpoint was never established before, * so it has not received anything from peer. * Otherwise, it must be a normal failover situation or the * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes * would have to start over from scratch instead. */ tnl->drop_point = 1; tnl->failover_reasm_skb = NULL; /* Initiate the link's failover deferdq */ if (unlikely(!skb_queue_empty(fdefq))) { pr_warn("Link failover deferdq not empty: %d!\n", skb_queue_len(fdefq)); __skb_queue_purge(fdefq); } } /* tipc_link_validate_msg(): validate message against current link state * Returns true if message should be accepted, otherwise false */ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) { u16 curr_session = l->peer_session; u16 session = msg_session(hdr); int mtyp = msg_type(hdr); if (msg_user(hdr) != LINK_PROTOCOL) return true; switch (mtyp) { case RESET_MSG: if (!l->in_session) return true; /* Accept only RESET with new session number */ return more(session, curr_session); case ACTIVATE_MSG: if (!l->in_session) return true; /* Accept only ACTIVATE with new or current session number */ return !less(session, curr_session); case STATE_MSG: /* Accept only STATE with current session number */ if (!l->in_session) return false; if (session != curr_session) return false; /* Extra sanity check */ if (!tipc_link_is_up(l) && msg_ack(hdr)) return false; if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) return true; /* Accept only STATE with new sequence number */ return !less(msg_seqno(hdr), l->rcv_nxt_state); default: return false; } } /* tipc_link_proto_rcv(): receive link level protocol message : * Note that network plane id propagates through the network, and may * change at any time. The node with lowest numerical id determines * network plane */ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq) { struct tipc_msg *hdr = buf_msg(skb); struct tipc_gap_ack_blks *ga = NULL; bool reply = msg_probe(hdr), retransmitted = false; u32 dlen = msg_data_sz(hdr), glen = 0, msg_max; u16 peers_snd_nxt = msg_next_sent(hdr); u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 gap = msg_seq_gap(hdr); u16 ack = msg_ack(hdr); u16 rcv_nxt = l->rcv_nxt; u16 rcvgap = 0; int mtyp = msg_type(hdr); int rc = 0, released; char *if_name; void *data; trace_tipc_proto_rcv(skb, false, l->name); if (dlen > U16_MAX) goto exit; if (tipc_link_is_blocked(l) || !xmitq) goto exit; if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr); if (skb_linearize(skb)) goto exit; hdr = buf_msg(skb); data = msg_data(hdr); if (!tipc_link_validate_msg(l, hdr)) { trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!"); trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!"); goto exit; } switch (mtyp) { case RESET_MSG: case ACTIVATE_MSG: msg_max = msg_max_pkt(hdr); if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id)) break; /* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME) break; if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) break; strscpy(if_name, data, TIPC_MAX_IF_NAME); /* Update own tolerance if peer indicates a non-zero value */ if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { l->tolerance = peers_tol; l->bc_rcvlink->tolerance = peers_tol; } /* Update own priority if peer's priority is higher */ if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI)) l->priority = peers_prio; /* If peer is going down we want full re-establish cycle */ if (msg_peer_stopping(hdr)) { rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); break; } /* If this endpoint was re-created while peer was ESTABLISHING * it doesn't know current session number. Force re-synch. */ if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && l->session != msg_dest_session(hdr)) { if (less(l->session, msg_dest_session(hdr))) l->session = msg_dest_session(hdr) + 1; break; } /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ if (mtyp == RESET_MSG || !tipc_link_is_up(l)) rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); /* ACTIVATE_MSG takes up link if it was already locally reset */ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; l->peer_session = msg_session(hdr); l->in_session = true; l->peer_bearer_id = msg_bearer_id(hdr); if (l->mtu > msg_max) l->mtu = msg_max; break; case STATE_MSG: /* Validate Gap ACK blocks, drop if invalid */ glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); if (glen > dlen) break; l->rcv_nxt_state = msg_seqno(hdr) + 1; /* Update own tolerance if peer indicates a non-zero value */ if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) { l->tolerance = peers_tol; l->bc_rcvlink->tolerance = peers_tol; } /* Update own prio if peer indicates a different value */ if ((peers_prio != l->priority) && tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { l->priority = peers_prio; rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } l->silent_intv_cnt = 0; l->stats.recv_states++; if (msg_probe(hdr)) l->stats.recv_probes++; if (!tipc_link_is_up(l)) { if (l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; break; } tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, &l->mon_state, l->bearer_id); /* Send NACK if peer has sent pkts we haven't received yet */ if ((reply || msg_is_keepalive(hdr)) && more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l) && skb_queue_empty(&l->deferdq)) rcvgap = peers_snd_nxt - l->rcv_nxt; if (rcvgap || reply) tipc_link_build_proto_msg(l, STATE_MSG, 0, reply, rcvgap, 0, 0, xmitq); released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq, &retransmitted, &rc); if (gap) l->stats.recv_nacks++; if (released || retransmitted) tipc_link_update_cwin(l, released, retransmitted); if (released) tipc_link_advance_backlog(l, xmitq); if (unlikely(!skb_queue_empty(&l->wakeupq))) link_prepare_wakeup(l); } exit: kfree_skb(skb); return rc; } /* tipc_link_build_bc_proto_msg() - create broadcast protocol message */ static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast, u16 peers_snd_nxt, struct sk_buff_head *xmitq) { struct sk_buff *skb; struct tipc_msg *hdr; struct sk_buff *dfrd_skb = skb_peek(&l->deferdq); u16 ack = l->rcv_nxt - 1; u16 gap_to = peers_snd_nxt - 1; skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, 0, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!skb) return false; hdr = buf_msg(skb); msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1); msg_set_bcast_ack(hdr, ack); msg_set_bcgap_after(hdr, ack); if (dfrd_skb) gap_to = buf_seqno(dfrd_skb) - 1; msg_set_bcgap_to(hdr, gap_to); msg_set_non_seq(hdr, bcast); __skb_queue_tail(xmitq, skb); return true; } /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints. * * Give a newly added peer node the sequence number where it should * start receiving and acking broadcast packets. */ static void tipc_link_build_bc_init_msg(struct tipc_link *l, struct sk_buff_head *xmitq) { struct sk_buff_head list; __skb_queue_head_init(&list); if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list)) return; msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true); tipc_link_xmit(l, &list, xmitq); } /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer */ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) { int mtyp = msg_type(hdr); u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); if (tipc_link_is_up(l)) return; if (msg_user(hdr) == BCAST_PROTOCOL) { l->rcv_nxt = peers_snd_nxt; l->state = LINK_ESTABLISHED; return; } if (l->peer_caps & TIPC_BCAST_SYNCH) return; if (msg_peer_node_is_up(hdr)) return; /* Compatibility: accept older, less safe initial synch data */ if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG)) l->rcv_nxt = peers_snd_nxt; } /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state */ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, struct sk_buff_head *xmitq) { u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); int rc = 0; if (!tipc_link_is_up(l)) return rc; if (!msg_peer_node_is_up(hdr)) return rc; /* Open when peer acknowledges our bcast init msg (pkt #1) */ if (msg_ack(hdr)) l->bc_peer_is_up = true; if (!l->bc_peer_is_up) return rc; /* Ignore if peers_snd_nxt goes beyond receive window */ if (more(peers_snd_nxt, l->rcv_nxt + l->window)) return rc; l->snd_nxt = peers_snd_nxt; if (link_bc_rcv_gap(l)) rc |= TIPC_LINK_SND_STATE; /* Return now if sender supports nack via STATE messages */ if (l->peer_caps & TIPC_BCAST_STATE_NACK) return rc; /* Otherwise, be backwards compatible */ if (!more(peers_snd_nxt, l->rcv_nxt)) { l->nack_state = BC_NACK_SND_CONDITIONAL; return 0; } /* Don't NACK if one was recently sent or peeked */ if (l->nack_state == BC_NACK_SND_SUPPRESS) { l->nack_state = BC_NACK_SND_UNCONDITIONAL; return 0; } /* Conditionally delay NACK sending until next synch rcv */ if (l->nack_state == BC_NACK_SND_CONDITIONAL) { l->nack_state = BC_NACK_SND_UNCONDITIONAL; if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN) return 0; } /* Send NACK now but suppress next one */ tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq); l->nack_state = BC_NACK_SND_SUPPRESS; return 0; } int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap, struct tipc_gap_ack_blks *ga, struct sk_buff_head *xmitq, struct sk_buff_head *retrq) { struct tipc_link *l = r->bc_sndlink; bool unused = false; int rc = 0; if (!tipc_link_is_up(r) || !r->bc_peer_is_up) return 0; if (gap) { l->stats.recv_nacks++; r->stats.recv_nacks++; } if (less(acked, r->acked) || (acked == r->acked && !gap && !ga)) return 0; trace_tipc_link_bc_ack(r, acked, gap, &l->transmq); tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc); tipc_link_advance_backlog(l, xmitq); if (unlikely(!skb_queue_empty(&l->wakeupq))) link_prepare_wakeup(l); return rc; } /* tipc_link_bc_nack_rcv(): receive broadcast nack message * This function is here for backwards compatibility, since * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5. */ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq) { struct tipc_msg *hdr = buf_msg(skb); u32 dnode = msg_destnode(hdr); int mtyp = msg_type(hdr); u16 acked = msg_bcast_ack(hdr); u16 from = acked + 1; u16 to = msg_bcgap_to(hdr); u16 peers_snd_nxt = to + 1; int rc = 0; kfree_skb(skb); if (!tipc_link_is_up(l) || !l->bc_peer_is_up) return 0; if (mtyp != STATE_MSG) return 0; if (dnode == tipc_own_addr(l->net)) { rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq, xmitq); l->stats.recv_nacks++; return rc; } /* Msg for other node => suppress own NACK at next sync if applicable */ if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) l->nack_state = BC_NACK_SND_SUPPRESS; return 0; } void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win) { int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE); l->min_win = min_win; l->ssthresh = max_win; l->max_win = max_win; l->window = min_win; l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2; l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4; l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6; l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8; l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; } /** * tipc_link_reset_stats - reset link statistics * @l: pointer to link */ void tipc_link_reset_stats(struct tipc_link *l) { memset(&l->stats, 0, sizeof(l->stats)); } static void link_print(struct tipc_link *l, const char *str) { struct sk_buff *hskb = skb_peek(&l->transmq); u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; u16 tail = l->snd_nxt - 1; pr_info("%s Link <%s> state %x\n", str, l->name, l->state); pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n", skb_queue_len(&l->transmq), head, tail, skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt); } /* Parse and validate nested (link) properties valid for media, bearer and link */ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) { int err; err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop, tipc_nl_prop_policy, NULL); if (err) return err; if (props[TIPC_NLA_PROP_PRIO]) { u32 prio; prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); if (prio > TIPC_MAX_LINK_PRI) return -EINVAL; } if (props[TIPC_NLA_PROP_TOL]) { u32 tol; tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) return -EINVAL; } if (props[TIPC_NLA_PROP_WIN]) { u32 max_win; max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN) return -EINVAL; } return 0; } static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) { int i; struct nlattr *stats; struct nla_map { u32 key; u32 val; }; struct nla_map map[] = { {TIPC_NLA_STATS_RX_INFO, 0}, {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, {TIPC_NLA_STATS_TX_INFO, 0}, {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? s->msg_length_counts : 1}, {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, {TIPC_NLA_STATS_RX_STATES, s->recv_states}, {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, {TIPC_NLA_STATS_TX_STATES, s->sent_states}, {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? (s->accu_queue_sz / s->queue_sz_counts) : 0} }; stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS); if (!stats) return -EMSGSIZE; for (i = 0; i < ARRAY_SIZE(map); i++) if (nla_put_u32(skb, map[i].key, map[i].val)) goto msg_full; nla_nest_end(skb, stats); return 0; msg_full: nla_nest_cancel(skb, stats); return -EMSGSIZE; } /* Caller should hold appropriate locks to protect the link */ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, struct tipc_link *link, int nlflags) { u32 self = tipc_own_addr(net); struct nlattr *attrs; struct nlattr *prop; void *hdr; int err; hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, nlflags, TIPC_NL_LINK_GET); if (!hdr) return -EMSGSIZE; attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK); if (!attrs) goto msg_full; if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self))) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts)) goto attr_msg_full; if (tipc_link_is_up(link)) if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) goto attr_msg_full; if (link->active) if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) goto attr_msg_full; prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP); if (!prop) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) goto prop_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) goto prop_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, link->window)) goto prop_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) goto prop_msg_full; nla_nest_end(msg->skb, prop); err = __tipc_nl_add_stats(msg->skb, &link->stats); if (err) goto attr_msg_full; nla_nest_end(msg->skb, attrs); genlmsg_end(msg->skb, hdr); return 0; prop_msg_full: nla_nest_cancel(msg->skb, prop); attr_msg_full: nla_nest_cancel(msg->skb, attrs); msg_full: genlmsg_cancel(msg->skb, hdr); return -EMSGSIZE; } static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, struct tipc_stats *stats) { int i; struct nlattr *nest; struct nla_map { __u32 key; __u32 val; }; struct nla_map map[] = { {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts}, {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts}, {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? (stats->accu_queue_sz / stats->queue_sz_counts) : 0} }; nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS); if (!nest) return -EMSGSIZE; for (i = 0; i < ARRAY_SIZE(map); i++) if (nla_put_u32(skb, map[i].key, map[i].val)) goto msg_full; nla_nest_end(skb, nest); return 0; msg_full: nla_nest_cancel(skb, nest); return -EMSGSIZE; } int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, struct tipc_link *bcl) { int err; void *hdr; struct nlattr *attrs; struct nlattr *prop; u32 bc_mode = tipc_bcast_get_mode(net); u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); if (!bcl) return 0; tipc_bcast_lock(net); hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_LINK_GET); if (!hdr) { tipc_bcast_unlock(net); return -EMSGSIZE; } attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK); if (!attrs) goto msg_full; /* The broadcast link is always up */ if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) goto attr_msg_full; if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) goto attr_msg_full; if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0)) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) goto attr_msg_full; prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP); if (!prop) goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win)) goto prop_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode)) goto prop_msg_full; if (bc_mode & BCLINK_MODE_SEL) if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO, bc_ratio)) goto prop_msg_full; nla_nest_end(msg->skb, prop); err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); if (err) goto attr_msg_full; tipc_bcast_unlock(net); nla_nest_end(msg->skb, attrs); genlmsg_end(msg->skb, hdr); return 0; prop_msg_full: nla_nest_cancel(msg->skb, prop); attr_msg_full: nla_nest_cancel(msg->skb, attrs); msg_full: tipc_bcast_unlock(net); genlmsg_cancel(msg->skb, hdr); return -EMSGSIZE; } void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, struct sk_buff_head *xmitq) { l->tolerance = tol; if (l->bc_rcvlink) l->bc_rcvlink->tolerance = tol; if (tipc_link_is_up(l)) tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); } void tipc_link_set_prio(struct tipc_link *l, u32 prio, struct sk_buff_head *xmitq) { l->priority = prio; tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq); } void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit) { l->abort_limit = limit; } /** * tipc_link_dump - dump TIPC link data * @l: tipc link to be dumped * @dqueues: bitmask to decide if any link queue to be dumped? * - TIPC_DUMP_NONE: don't dump link queues * - TIPC_DUMP_TRANSMQ: dump link transmq queue * - TIPC_DUMP_BACKLOGQ: dump link backlog queue * - TIPC_DUMP_DEFERDQ: dump link deferd queue * - TIPC_DUMP_INPUTQ: dump link input queue * - TIPC_DUMP_WAKEUP: dump link wakeup queue * - TIPC_DUMP_ALL: dump all the link queues above * @buf: returned buffer of dump data in format */ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf) { int i = 0; size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN; struct sk_buff_head *list; struct sk_buff *hskb, *tskb; u32 len; if (!l) { i += scnprintf(buf, sz, "link data: (null)\n"); return i; } i += scnprintf(buf, sz, "link data: %x", l->addr); i += scnprintf(buf + i, sz - i, " %x", l->state); i += scnprintf(buf + i, sz - i, " %u", l->in_session); i += scnprintf(buf + i, sz - i, " %u", l->session); i += scnprintf(buf + i, sz - i, " %u", l->peer_session); i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt); i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt); i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state); i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state); i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); i += scnprintf(buf + i, sz - i, " %u", 0); i += scnprintf(buf + i, sz - i, " %u", 0); i += scnprintf(buf + i, sz - i, " %u", l->acked); list = &l->transmq; len = skb_queue_len(list); hskb = skb_peek(list); tskb = skb_peek_tail(list); i += scnprintf(buf + i, sz - i, " | %u %u %u", len, (hskb) ? msg_seqno(buf_msg(hskb)) : 0, (tskb) ? msg_seqno(buf_msg(tskb)) : 0); list = &l->deferdq; len = skb_queue_len(list); hskb = skb_peek(list); tskb = skb_peek_tail(list); i += scnprintf(buf + i, sz - i, " | %u %u %u", len, (hskb) ? msg_seqno(buf_msg(hskb)) : 0, (tskb) ? msg_seqno(buf_msg(tskb)) : 0); list = &l->backlogq; len = skb_queue_len(list); hskb = skb_peek(list); tskb = skb_peek_tail(list); i += scnprintf(buf + i, sz - i, " | %u %u %u", len, (hskb) ? msg_seqno(buf_msg(hskb)) : 0, (tskb) ? msg_seqno(buf_msg(tskb)) : 0); list = l->inputq; len = skb_queue_len(list); hskb = skb_peek(list); tskb = skb_peek_tail(list); i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len, (hskb) ? msg_seqno(buf_msg(hskb)) : 0, (tskb) ? msg_seqno(buf_msg(tskb)) : 0); if (dqueues & TIPC_DUMP_TRANSMQ) { i += scnprintf(buf + i, sz - i, "transmq: "); i += tipc_list_dump(&l->transmq, false, buf + i); } if (dqueues & TIPC_DUMP_BACKLOGQ) { i += scnprintf(buf + i, sz - i, "backlogq: <%u %u %u %u %u>, ", l->backlog[TIPC_LOW_IMPORTANCE].len, l->backlog[TIPC_MEDIUM_IMPORTANCE].len, l->backlog[TIPC_HIGH_IMPORTANCE].len, l->backlog[TIPC_CRITICAL_IMPORTANCE].len, l->backlog[TIPC_SYSTEM_IMPORTANCE].len); i += tipc_list_dump(&l->backlogq, false, buf + i); } if (dqueues & TIPC_DUMP_DEFERDQ) { i += scnprintf(buf + i, sz - i, "deferdq: "); i += tipc_list_dump(&l->deferdq, false, buf + i); } if (dqueues & TIPC_DUMP_INPUTQ) { i += scnprintf(buf + i, sz - i, "inputq: "); i += tipc_list_dump(l->inputq, false, buf + i); } if (dqueues & TIPC_DUMP_WAKEUP) { i += scnprintf(buf + i, sz - i, "wakeup: "); i += tipc_list_dump(&l->wakeupq, false, buf + i); } return i; }
2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 /* * llc_c_ac.c - actions performed during connection state transition. * * Description: * Functions in this module are implementation of connection component actions * Details of actions can be found in IEEE-802.2 standard document. * All functions have one connection and one event as input argument. All of * them return 0 On success and 1 otherwise. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/netdevice.h> #include <linux/slab.h> #include <net/llc_conn.h> #include <net/llc_sap.h> #include <net/sock.h> #include <net/llc_c_ev.h> #include <net/llc_c_ac.h> #include <net/llc_c_st.h> #include <net/llc_pdu.h> #include <net/llc.h> static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb); static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *ev); static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb); #define INCORRECT 0 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->remote_busy_flag) { u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc->remote_busy_flag = 0; timer_delete(&llc->busy_state_timer.timer); nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); } return 0; } int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->ind_prim = LLC_CONN_PRIM; return 0; } int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->cfm_prim = LLC_CONN_PRIM; return 0; } static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->cfm_prim = LLC_DATA_PRIM; return 0; } int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb) { llc_conn_rtn_pdu(sk, skb); return 0; } int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); u8 reason = 0; int rc = 0; if (ev->type == LLC_CONN_EV_TYPE_PDU) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM) reason = LLC_DISC_REASON_RX_DM_RSP_PDU; else if (LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC) reason = LLC_DISC_REASON_RX_DISC_CMD_PDU; } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR) reason = LLC_DISC_REASON_ACK_TMR_EXP; else rc = -EINVAL; if (!rc) { ev->reason = reason; ev->ind_prim = LLC_DISC_PRIM; } return rc; } int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->reason = ev->status; ev->cfm_prim = LLC_DISC_PRIM; return 0; } int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) { u8 reason = 0; int rc = 1; struct llc_conn_state_ev *ev = llc_conn_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); struct llc_sock *llc = llc_sk(sk); switch (ev->type) { case LLC_CONN_EV_TYPE_PDU: if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR) { reason = LLC_RESET_REASON_LOCAL; rc = 0; } else if (LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME) { reason = LLC_RESET_REASON_REMOTE; rc = 0; } break; case LLC_CONN_EV_TYPE_ACK_TMR: case LLC_CONN_EV_TYPE_P_TMR: case LLC_CONN_EV_TYPE_REJ_TMR: case LLC_CONN_EV_TYPE_BUSY_TMR: if (llc->retry_count > llc->n2) { reason = LLC_RESET_REASON_LOCAL; rc = 0; } break; } if (!rc) { ev->reason = reason; ev->ind_prim = LLC_RESET_PRIM; } return rc; } int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->reason = 0; ev->cfm_prim = LLC_RESET_PRIM; return 0; } int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_1(pdu) && llc_sk(sk)->ack_pf) llc_conn_ac_clear_remote_busy(sk, skb); return 0; } int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->data_flag == 2) timer_delete(&llc->rej_sent_timer.timer); return 0; } int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_disc_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_ac_set_p_flag_1(sk, skb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); struct llc_sock *llc = llc_sk(sk); llc->rx_pdu_hdr = *((u32 *)pdu); if (LLC_PDU_IS_CMD(pdu)) llc_pdu_decode_pf_bit(skb, &f_bit); else f_bit = 0; nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = (struct llc_pdu_sn *)&llc->rx_pdu_hdr; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, 0, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &f_bit); nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return 0; } int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); return 0; } int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, struct sk_buff *skb) { u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) llc_conn_send_pdu(sk, nskb); else kfree_skb(skb); } if (rc) { nr = LLC_I_GET_NR(pdu); rc = 0; llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); } return rc; } int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_rsp(sk, nr, 1); return 0; } int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rej_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rej_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->remote_busy_flag) { llc->remote_busy_flag = 1; mod_timer(&llc->busy_state_timer.timer, jiffies + llc->busy_state_timer.expire); } return 0; } int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 f_bit = 1; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } void llc_conn_set_p_flag(struct sock *sk, u8 value) { int state_changed = llc_sk(sk)->p_flag && !value; llc_sk(sk)->p_flag = value; if (state_changed) sk->sk_state_change(sk); } int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; const u8 *dmac = llc->daddr.mac; if (llc->dev->flags & IFF_LOOPBACK) dmac = llc->dev->dev_addr; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_sabme_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_set_p_flag(sk, 1); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_pdu_decode_pf_bit(skb, &f_bit); if (nskb) { struct llc_sap *sap = llc->sap; nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_ua_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->s_flag = 0; return 0; } int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->s_flag = 1; return 0; } int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); llc_conn_set_p_flag(sk, 1); mod_timer(&llc->pf_cycle_timer.timer, jiffies + llc->pf_cycle_timer.expire); return 0; } /** * llc_conn_ac_send_ack_if_needed - check if ack is needed * @sk: current connection structure * @skb: current event * * Checks number of received PDUs which have not been acknowledged, yet, * If number of them reaches to "npta"(Number of PDUs To Acknowledge) then * sends an RR response as acknowledgement for them. Returns 0 for * success, 1 otherwise. */ int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb) { u8 pf_bit; struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &pf_bit); llc->ack_pf |= pf_bit & 1; if (!llc->ack_must_be_send) { llc->first_pdu_Ns = llc->vR; llc->ack_must_be_send = 1; llc->ack_pf = pf_bit & 1; } if (((llc->vR - llc->first_pdu_Ns + 1 + LLC_2_SEQ_NBR_MODULO) % LLC_2_SEQ_NBR_MODULO) >= llc->npta) { llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0; llc->ack_pf = 0; llc_conn_ac_inc_npta_value(sk, skb); } return 0; } /** * llc_conn_ac_rst_sendack_flag - resets ack_must_be_send flag * @sk: current connection structure * @skb: current event * * This action resets ack_must_be_send flag of given connection, this flag * indicates if there is any PDU which has not been acknowledged yet. * Returns 0 for success, 1 otherwise. */ int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->ack_must_be_send = llc_sk(sk)->ack_pf = 0; return 0; } /** * llc_conn_ac_send_i_rsp_f_set_ackpf - acknowledge received PDUs * @sk: current connection structure * @skb: current event * * Sends an I response PDU with f-bit set to ack_pf flag as acknowledge to * all received PDUs which have not been acknowledged, yet. ack_pf flag is * set to one if one PDU with p-bit set to one is received. Returns 0 for * success, 1 otherwise. */ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } /** * llc_conn_ac_send_i_as_ack - sends an I-format PDU to acknowledge rx PDUs * @sk: current connection structure. * @skb: current event. * * This action sends an I-format PDU as acknowledge to received PDUs which * have not been acknowledged, yet, if there is any. By using of this * action number of acknowledgements decreases, this technic is called * piggy backing. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); int ret; if (llc->ack_must_be_send) { ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0 ; llc->ack_pf = 0; } else { ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb); } return ret; } /** * llc_conn_ac_send_rr_rsp_f_set_ackpf - ack all rx PDUs not yet acked * @sk: current connection structure. * @skb: current event. * * This action sends an RR response with f-bit set to ack_pf flag as * acknowledge to all received PDUs which have not been acknowledged, yet, * if there is any. ack_pf flag indicates if a PDU has been received with * p-bit set to one. Returns 0 for success, 1 otherwise. */ static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } /** * llc_conn_ac_inc_npta_value - tries to make value of npta greater * @sk: current connection structure. * @skb: current event. * * After "inc_cntr" times calling of this action, "npta" increase by one. * this action tries to make vale of "npta" greater as possible; number of * acknowledgements decreases by increasing of "npta". Returns 0 for * success, 1 otherwise. */ static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->inc_cntr) { llc->dec_step = 0; llc->dec_cntr = llc->inc_cntr = 2; ++llc->npta; if (llc->npta > (u8) ~LLC_2_SEQ_NBR_MODULO) llc->npta = (u8) ~LLC_2_SEQ_NBR_MODULO; } else --llc->inc_cntr; return 0; } /** * llc_conn_ac_adjust_npta_by_rr - decreases "npta" by one * @sk: current connection structure. * @skb: current event. * * After receiving "dec_cntr" times RR command, this action decreases * "npta" by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->connect_step && !llc->remote_busy_flag) { if (!llc->dec_step) { if (!llc->dec_cntr) { llc->inc_cntr = llc->dec_cntr = 2; if (llc->npta > 0) llc->npta = llc->npta - 1; } else llc->dec_cntr -=1; } } else llc->connect_step = 0 ; return 0; } /** * llc_conn_ac_adjust_npta_by_rnr - decreases "npta" by one * @sk: current connection structure. * @skb: current event. * * After receiving "dec_cntr" times RNR command, this action decreases * "npta" by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->remote_busy_flag) if (!llc->dec_step) { if (!llc->dec_cntr) { llc->inc_cntr = llc->dec_cntr = 2; if (llc->npta > 0) --llc->npta; } else --llc->dec_cntr; } return 0; } /** * llc_conn_ac_dec_tx_win_size - decreases tx window size * @sk: current connection structure. * @skb: current event. * * After receiving of a REJ command or response, transmit window size is * decreased by number of PDUs which are outstanding yet. Returns 0 for * success, 1 otherwise. */ int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); u8 unacked_pdu = skb_queue_len(&llc->pdu_unack_q); if (llc->k - unacked_pdu < 1) llc->k = 1; else llc->k -= unacked_pdu; return 0; } /** * llc_conn_ac_inc_tx_win_size - tx window size is inc by 1 * @sk: current connection structure. * @skb: current event. * * After receiving an RR response with f-bit set to one, transmit window * size is increased by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); llc->k += 1; if (llc->k > (u8) ~LLC_2_SEQ_NBR_MODULO) llc->k = (u8) ~LLC_2_SEQ_NBR_MODULO; return 0; } int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) { llc_sk_stop_all_timers(sk, false); return 0; } int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); timer_delete(&llc->rej_sent_timer.timer); timer_delete(&llc->pf_cycle_timer.timer); timer_delete(&llc->busy_state_timer.timer); llc->ack_must_be_send = 0; llc->ack_pf = 0; return 0; } int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->rej_sent_timer.timer, jiffies + llc->rej_sent_timer.expire); return 0; } int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!timer_pending(&llc->ack_timer.timer)) mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) { timer_delete(&llc_sk(sk)->ack_timer.timer); return 0; } int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); timer_delete(&llc->pf_cycle_timer.timer); llc_conn_set_p_flag(sk, 0); return 0; } int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) { timer_delete(&llc_sk(sk)->rej_sent_timer.timer); return 0; } int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) { int acked; u16 unacked = 0; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); struct llc_sock *llc = llc_sk(sk); llc->last_nr = PDU_SUPV_GET_Nr(pdu); acked = llc_conn_remove_acked_pdus(sk, llc->last_nr, &unacked); /* On loopback we don't queue I frames in unack_pdu_q queue. */ if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) { llc->retry_count = 0; timer_delete(&llc->ack_timer.timer); if (llc->failed_data_req) { /* already, we did not accept data from upper layer * (tx_window full or unacceptable state). Now, we * can send data and must inform to upper layer. */ llc->failed_data_req = 0; llc_conn_ac_data_confirm(sk, skb); } if (unacked) mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); } else if (llc->failed_data_req) { u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); if (f_bit == 1) { llc->failed_data_req = 0; llc_conn_ac_data_confirm(sk, skb); } } return 0; } int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_RSP(pdu)) { u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); if (f_bit) { llc_conn_set_p_flag(sk, 0); llc_conn_ac_stop_p_timer(sk, skb); } } return 0; } int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 2; return 0; } int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 0; return 0; } int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 1; return 0; } int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, struct sk_buff *skb) { if (!llc_sk(sk)->data_flag) llc_sk(sk)->data_flag = 1; return 0; } int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb) { llc_conn_set_p_flag(sk, 0); return 0; } static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb) { llc_conn_set_p_flag(sk, 1); return 0; } int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->remote_busy_flag = 0; return 0; } int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->cause_flag = 0; return 0; } int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->cause_flag = 1; return 0; } int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->retry_count = 0; return 0; } int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->retry_count++; return 0; } int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vR = 0; return 0; } int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vR = PDU_GET_NEXT_Vr(llc_sk(sk)->vR); return 0; } int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = 0; return 0; } int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = llc_sk(sk)->last_nr; return 0; } static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO; return 0; } static void llc_conn_tmr_common_cb(struct sock *sk, u8 type) { struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); bh_lock_sock(sk); if (skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); skb_set_owner_r(skb, sk); ev->type = type; llc_process_tmr_ev(sk, skb); } bh_unlock_sock(sk); } void llc_conn_pf_cycle_tmr_cb(struct timer_list *t) { struct llc_sock *llc = timer_container_of(llc, t, pf_cycle_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_P_TMR); } void llc_conn_busy_tmr_cb(struct timer_list *t) { struct llc_sock *llc = timer_container_of(llc, t, busy_state_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_BUSY_TMR); } void llc_conn_ack_tmr_cb(struct timer_list *t) { struct llc_sock *llc = timer_container_of(llc, t, ack_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_ACK_TMR); } void llc_conn_rej_tmr_cb(struct timer_list *t) { struct llc_sock *llc = timer_container_of(llc, t, rej_sent_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_REJ_TMR); } int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->X = llc_sk(sk)->vS; llc_conn_ac_set_vs_nr(sk, skb); return 0; } int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = PDU_SUPV_GET_Nr(pdu); if (llc_circular_between(llc_sk(sk)->vS, nr, llc_sk(sk)->X)) llc_conn_ac_set_vs_nr(sk, skb); return 0; } /* * Non-standard actions; these not contained in IEEE specification; for * our own usage */ /** * llc_conn_disc - removes connection from SAP list and frees it * @sk: closed connection * @skb: occurred event */ int llc_conn_disc(struct sock *sk, struct sk_buff *skb) { /* FIXME: this thing seems to want to die */ return 0; } /** * llc_conn_reset - resets connection * @sk : reseting connection. * @skb: occurred event. * * Stop all timers, empty all queues and reset all flags. */ int llc_conn_reset(struct sock *sk, struct sk_buff *skb) { llc_sk_reset(sk); return 0; } /** * llc_circular_between - designates that b is between a and c or not * @a: lower bound * @b: element to see if is between a and b * @c: upper bound * * This function designates that b is between a and c or not (for example, * 0 is between 127 and 1). Returns 1 if b is between a and c, 0 * otherwise. */ u8 llc_circular_between(u8 a, u8 b, u8 c) { b = b - a; c = c - a; return b <= c; } /** * llc_process_tmr_ev - timer backend * @sk: active connection * @skb: occurred event * * This function is called from timer callback functions. When connection * is busy (during sending a data frame) timer expiration event must be * queued. Otherwise this event can be sent to connection state machine. * Queued events will process by llc_backlog_rcv function after sending * data frame. */ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) { if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { printk(KERN_WARNING "%s: timer called on closed connection\n", __func__); kfree_skb(skb); } else { if (!sock_owned_by_user(sk)) llc_conn_state_process(sk, skb); else { llc_set_backlog_type(skb, LLC_EVENT); __sk_add_backlog(sk, skb); } } }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Framework and drivers for configuring and reading different PHYs * Based on code in sungem_phy.c and (long-removed) gianfar_phy.c * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. */ #ifndef __PHY_H #define __PHY_H #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/leds.h> #include <linux/linkmode.h> #include <linux/netlink.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/mii_timestamper.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/mod_devicetable.h> #include <linux/u64_stats_sync.h> #include <linux/irqreturn.h> #include <linux/iopoll.h> #include <linux/refcount.h> #include <linux/atomic.h> #include <net/eee.h> extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init; #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) #define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features) #define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features) #define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features) #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) #define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features) #define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features) extern const int phy_basic_ports_array[3]; /* * Set phydev->irq to PHY_POLL if interrupts are not supported, * or not desired for this PHY. Set to PHY_MAC_INTERRUPT if * the attached MAC driver handles the interrupt */ #define PHY_POLL -1 #define PHY_MAC_INTERRUPT -2 #define PHY_IS_INTERNAL 0x00000001 #define PHY_RST_AFTER_CLK_EN 0x00000002 #define PHY_POLL_CABLE_TEST 0x00000004 #define PHY_ALWAYS_CALL_SUSPEND 0x00000008 #define MDIO_DEVICE_IS_PHY 0x80000000 /** * enum phy_interface_t - Interface Mode definitions * * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined * @PHY_INTERFACE_MODE_MII: Media-independent interface * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal TX delay * @PHY_INTERFACE_MODE_RTBI: Reduced TBI * @PHY_INTERFACE_MODE_SMII: Serial MII * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII * @PHY_INTERFACE_MODE_100BASEX: 100 BaseX * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX * @PHY_INTERFACE_MODE_5GBASER: 5G BaseR * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII * @PHY_INTERFACE_MODE_50GBASER: 50GBase-R - with Clause 134 FEC * @PHY_INTERFACE_MODE_LAUI: 50 Gigabit Attachment Unit Interface * @PHY_INTERFACE_MODE_100GBASEP: 100GBase-P - with Clause 134 FEC * @PHY_INTERFACE_MODE_MIILITE: MII-Lite - MII without RXER TXER CRS COL * @PHY_INTERFACE_MODE_MAX: Book keeping * * Describes the interface between the MAC and PHY. */ typedef enum { PHY_INTERFACE_MODE_NA, PHY_INTERFACE_MODE_INTERNAL, PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_SGMII, PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_REVMII, PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_REVRMII, PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_RGMII_ID, PHY_INTERFACE_MODE_RGMII_RXID, PHY_INTERFACE_MODE_RGMII_TXID, PHY_INTERFACE_MODE_RTBI, PHY_INTERFACE_MODE_SMII, PHY_INTERFACE_MODE_XGMII, PHY_INTERFACE_MODE_XLGMII, PHY_INTERFACE_MODE_MOCA, PHY_INTERFACE_MODE_PSGMII, PHY_INTERFACE_MODE_QSGMII, PHY_INTERFACE_MODE_TRGMII, PHY_INTERFACE_MODE_100BASEX, PHY_INTERFACE_MODE_1000BASEX, PHY_INTERFACE_MODE_2500BASEX, PHY_INTERFACE_MODE_5GBASER, PHY_INTERFACE_MODE_RXAUI, PHY_INTERFACE_MODE_XAUI, /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */ PHY_INTERFACE_MODE_10GBASER, PHY_INTERFACE_MODE_25GBASER, PHY_INTERFACE_MODE_USXGMII, /* 10GBASE-KR - with Clause 73 AN */ PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_QUSGMII, PHY_INTERFACE_MODE_1000BASEKX, PHY_INTERFACE_MODE_10G_QXGMII, PHY_INTERFACE_MODE_50GBASER, PHY_INTERFACE_MODE_LAUI, PHY_INTERFACE_MODE_100GBASEP, PHY_INTERFACE_MODE_MIILITE, PHY_INTERFACE_MODE_MAX, } phy_interface_t; /* PHY interface mode bitmap handling */ #define DECLARE_PHY_INTERFACE_MASK(name) \ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX) static inline void phy_interface_zero(unsigned long *intf) { bitmap_zero(intf, PHY_INTERFACE_MODE_MAX); } static inline bool phy_interface_empty(const unsigned long *intf) { return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX); } static inline void phy_interface_copy(unsigned long *d, const unsigned long *s) { bitmap_copy(d, s, PHY_INTERFACE_MODE_MAX); } static inline unsigned int phy_interface_weight(const unsigned long *intf) { return bitmap_weight(intf, PHY_INTERFACE_MODE_MAX); } static inline void phy_interface_and(unsigned long *dst, const unsigned long *a, const unsigned long *b) { bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX); } static inline void phy_interface_or(unsigned long *dst, const unsigned long *a, const unsigned long *b) { bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX); } static inline void phy_interface_set_rgmii(unsigned long *intf) { __set_bit(PHY_INTERFACE_MODE_RGMII, intf); __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf); __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf); __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf); } /** * phy_modes - map phy_interface_t enum to device tree binding of phy-mode * @interface: enum phy_interface_t value * * Description: maps enum &phy_interface_t defined in this file * into the device tree binding of 'phy-mode', so that Ethernet * device driver can get PHY interface from device tree. */ static inline const char *phy_modes(phy_interface_t interface) { switch (interface) { case PHY_INTERFACE_MODE_NA: return ""; case PHY_INTERFACE_MODE_INTERNAL: return "internal"; case PHY_INTERFACE_MODE_MII: return "mii"; case PHY_INTERFACE_MODE_GMII: return "gmii"; case PHY_INTERFACE_MODE_SGMII: return "sgmii"; case PHY_INTERFACE_MODE_TBI: return "tbi"; case PHY_INTERFACE_MODE_REVMII: return "rev-mii"; case PHY_INTERFACE_MODE_RMII: return "rmii"; case PHY_INTERFACE_MODE_REVRMII: return "rev-rmii"; case PHY_INTERFACE_MODE_RGMII: return "rgmii"; case PHY_INTERFACE_MODE_RGMII_ID: return "rgmii-id"; case PHY_INTERFACE_MODE_RGMII_RXID: return "rgmii-rxid"; case PHY_INTERFACE_MODE_RGMII_TXID: return "rgmii-txid"; case PHY_INTERFACE_MODE_RTBI: return "rtbi"; case PHY_INTERFACE_MODE_SMII: return "smii"; case PHY_INTERFACE_MODE_XGMII: return "xgmii"; case PHY_INTERFACE_MODE_XLGMII: return "xlgmii"; case PHY_INTERFACE_MODE_MOCA: return "moca"; case PHY_INTERFACE_MODE_PSGMII: return "psgmii"; case PHY_INTERFACE_MODE_QSGMII: return "qsgmii"; case PHY_INTERFACE_MODE_TRGMII: return "trgmii"; case PHY_INTERFACE_MODE_1000BASEX: return "1000base-x"; case PHY_INTERFACE_MODE_1000BASEKX: return "1000base-kx"; case PHY_INTERFACE_MODE_2500BASEX: return "2500base-x"; case PHY_INTERFACE_MODE_5GBASER: return "5gbase-r"; case PHY_INTERFACE_MODE_RXAUI: return "rxaui"; case PHY_INTERFACE_MODE_XAUI: return "xaui"; case PHY_INTERFACE_MODE_10GBASER: return "10gbase-r"; case PHY_INTERFACE_MODE_25GBASER: return "25gbase-r"; case PHY_INTERFACE_MODE_USXGMII: return "usxgmii"; case PHY_INTERFACE_MODE_10GKR: return "10gbase-kr"; case PHY_INTERFACE_MODE_100BASEX: return "100base-x"; case PHY_INTERFACE_MODE_QUSGMII: return "qusgmii"; case PHY_INTERFACE_MODE_10G_QXGMII: return "10g-qxgmii"; case PHY_INTERFACE_MODE_50GBASER: return "50gbase-r"; case PHY_INTERFACE_MODE_LAUI: return "laui"; case PHY_INTERFACE_MODE_100GBASEP: return "100gbase-p"; case PHY_INTERFACE_MODE_MIILITE: return "mii-lite"; default: return "unknown"; } } /** * rgmii_clock - map link speed to the clock rate * @speed: link speed value * * Description: maps RGMII supported link speeds into the clock rates. * This can also be used for MII, GMII, and RMII interface modes as the * clock rates are identical, but the caller must be aware that errors * for unsupported clock rates will not be signalled. * * Returns: clock rate or negative errno */ static inline long rgmii_clock(int speed) { switch (speed) { case SPEED_10: return 2500000; case SPEED_100: return 25000000; case SPEED_1000: return 125000000; default: return -EINVAL; } } #define PHY_MAX_ADDR 32 /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" #define PHY_ID_SIZE (MII_BUS_ID_SIZE + 3) #define MII_BUS_ID_SIZE 61 struct device; struct kernel_hwtstamp_config; struct phylink; struct sfp_bus; struct sfp_upstream_ops; struct sk_buff; /** * struct mdio_bus_stats - Statistics counters for MDIO busses * @transfers: Total number of transfers, i.e. @writes + @reads * @errors: Number of MDIO transfers that returned an error * @writes: Number of write transfers * @reads: Number of read transfers * @syncp: Synchronisation for incrementing statistics */ struct mdio_bus_stats { u64_stats_t transfers; u64_stats_t errors; u64_stats_t writes; u64_stats_t reads; /* Must be last, add new statistics above */ struct u64_stats_sync syncp; }; /** * struct mii_bus - Represents an MDIO bus * * @owner: Who owns this device * @name: User friendly name for this MDIO device, or driver name * @id: Unique identifier for this bus, typical from bus hierarchy * @priv: Driver private data * * The Bus class for PHYs. Devices which provide access to * PHYs should register using this structure */ struct mii_bus { struct module *owner; const char *name; char id[MII_BUS_ID_SIZE]; void *priv; /** @read: Perform a read transfer on the bus */ int (*read)(struct mii_bus *bus, int addr, int regnum); /** @write: Perform a write transfer on the bus */ int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val); /** @read_c45: Perform a C45 read transfer on the bus */ int (*read_c45)(struct mii_bus *bus, int addr, int devnum, int regnum); /** @write_c45: Perform a C45 write transfer on the bus */ int (*write_c45)(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val); /** @reset: Perform a reset of the bus */ int (*reset)(struct mii_bus *bus); /** @stats: Statistic counters per device on the bus */ struct mdio_bus_stats stats[PHY_MAX_ADDR]; /** * @mdio_lock: A lock to ensure that only one thing can read/write * the MDIO bus at a time */ struct mutex mdio_lock; /** @parent: Parent device of this bus */ struct device *parent; /** @state: State of bus structure */ enum { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED, MDIOBUS_UNREGISTERED, MDIOBUS_RELEASED, } state; /** @dev: Kernel device representation */ struct device dev; /** @mdio_map: list of all MDIO devices on bus */ struct mdio_device *mdio_map[PHY_MAX_ADDR]; /** @phy_mask: PHY addresses to be ignored when probing */ u32 phy_mask; /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */ u32 phy_ignore_ta_mask; /** * @irq: An array of interrupts, each PHY's interrupt at the index * matching its address */ int irq[PHY_MAX_ADDR]; /** @reset_delay_us: GPIO reset pulse width in microseconds */ int reset_delay_us; /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */ int reset_post_delay_us; /** @reset_gpiod: Reset GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; /** @shared_lock: protect access to the shared element */ struct mutex shared_lock; #if IS_ENABLED(CONFIG_PHY_PACKAGE) /** @shared: shared state across different PHYs */ struct phy_package_shared *shared[PHY_MAX_ADDR]; #endif }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) struct mii_bus *mdiobus_alloc_size(size_t size); /** * mdiobus_alloc - Allocate an MDIO bus structure * * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready * for the driver to register the bus. */ static inline struct mii_bus *mdiobus_alloc(void) { return mdiobus_alloc_size(0); } int __mdiobus_register(struct mii_bus *bus, struct module *owner); int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) #define devm_mdiobus_register(dev, bus) \ __devm_mdiobus_register(dev, bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) { return devm_mdiobus_alloc_size(dev, 0); } struct mii_bus *mdio_find_bus(const char *mdio_name); struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr); #define PHY_INTERRUPT_DISABLED false #define PHY_INTERRUPT_ENABLED true /** * enum phy_state - PHY state machine states: * * @PHY_DOWN: PHY device and driver are not ready for anything. probe * should be called if and only if the PHY is in this state, * given that the PHY device exists. * - PHY driver probe function will set the state to @PHY_READY * * @PHY_READY: PHY is ready to send and receive packets, but the * controller is not. By default, PHYs which do not implement * probe will be set to this state by phy_probe(). * - start will set the state to UP * * @PHY_UP: The PHY and attached device are ready to do work. * Interrupts should be started here. * - timer moves to @PHY_NOLINK or @PHY_RUNNING * * @PHY_NOLINK: PHY is up, but not currently plugged in. * - irq or timer will set @PHY_RUNNING if link comes back * - phy_stop moves to @PHY_HALTED * * @PHY_RUNNING: PHY is currently up, running, and possibly sending * and/or receiving packets * - irq or timer will set @PHY_NOLINK if link goes down * - phy_stop moves to @PHY_HALTED * * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending * is not expected to work, carrier will be indicated as down. PHY will be * poll once per second, or on interrupt for it current state. * Once complete, move to UP to restart the PHY. * - phy_stop aborts the running test and moves to @PHY_HALTED * * @PHY_HALTED: PHY is up, but no polling or interrupts are done. * - phy_start moves to @PHY_UP * * @PHY_ERROR: PHY is up, but is in an error state. * - phy_stop moves to @PHY_HALTED */ enum phy_state { PHY_DOWN = 0, PHY_READY, PHY_HALTED, PHY_ERROR, PHY_UP, PHY_RUNNING, PHY_NOLINK, PHY_CABLETEST, }; #define MDIO_MMD_NUM 32 /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers * @devices_in_package: IEEE 802.3 devices in package register value. * @mmds_present: bit vector of MMDs present. * @device_ids: The device identifier for each present device. */ struct phy_c45_device_ids { u32 devices_in_package; u32 mmds_present; u32 device_ids[MDIO_MMD_NUM]; }; struct macsec_context; struct macsec_ops; /** * struct phy_oatc14_sqi_capability - SQI capability information for OATC14 * 10Base-T1S PHY * @updated: Indicates whether the SQI capability fields have been updated. * @sqi_max: Maximum supported Signal Quality Indicator (SQI) level reported by * the PHY. * @sqiplus_bits: Bits for SQI+ levels supported by the PHY. * 0 - SQI+ is not supported * 3 - SQI+ is supported, using 3 bits (8 levels) * 4 - SQI+ is supported, using 4 bits (16 levels) * 5 - SQI+ is supported, using 5 bits (32 levels) * 6 - SQI+ is supported, using 6 bits (64 levels) * 7 - SQI+ is supported, using 7 bits (128 levels) * 8 - SQI+ is supported, using 8 bits (256 levels) * * This structure is used by the OATC14 10Base-T1S PHY driver to store the SQI * and SQI+ capability information retrieved from the PHY. */ struct phy_oatc14_sqi_capability { bool updated; int sqi_max; u8 sqiplus_bits; }; /** * struct phy_device - An instance of a PHY * * @mdio: MDIO bus this PHY is on * @drv: Pointer to the driver for this PHY instance * @devlink: Create a link between phy dev and mac dev, if the external phy * used by current mac interface is managed by another mac interface. * @phyindex: Unique id across the phy's parent tree of phys to address the PHY * from userspace, similar to ifindex. A zero index means the PHY * wasn't assigned an id yet. * @phy_id: UID for this device found during discovery * @c45_ids: 802.3-c45 Device Identifiers if is_c45. * @is_c45: Set to true if this PHY uses clause 45 addressing. * @is_internal: Set to true if this PHY is internal to a MAC. * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc. * @is_gigabit_capable: Set to true if PHY supports 1000Mbps * @has_fixups: Set to true if this PHY has fixups/quirks. * @suspended: Set to true if this PHY has been suspended successfully. * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus. * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal. * @loopback_enabled: Set true if this PHY has been loopbacked successfully. * @downshifted_rate: Set true if link speed has been downshifted. * @is_on_sfp_module: Set true if PHY is located on an SFP module. * @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY * @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN * enabled. * @is_genphy_driven: PHY is driven by one of the generic PHY drivers * @state: State of the PHY for management purposes * @dev_flags: Device-specific flags used by the PHY driver. * * - Bits [15:0] are free to use by the PHY driver to communicate * driver specific behavior. * - Bits [23:16] are currently reserved for future use. * - Bits [31:24] are reserved for defining generic * PHY driver behavior. * @irq: IRQ number of the PHY's interrupt (-1 if none) * @phylink: Pointer to phylink instance for this PHY * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached * @sfp_bus: SFP bus attached to this PHY's fiber port * @attached_dev: The attached enet driver's device instance ptr * @adjust_link: Callback for the enet controller to respond to changes: in the * link state. * @phy_link_change: Callback for phylink for notification of link change * @macsec_ops: MACsec offloading ops. * * @speed: Current link speed * @duplex: Current duplex * @port: Current port * @pause: Current pause * @asym_pause: Current asymmetric pause * @supported: Combined MAC/PHY supported linkmodes * @advertising: Currently advertised linkmodes * @adv_old: Saved advertised while power saving for WoL * @supported_eee: supported PHY EEE linkmodes * @advertising_eee: Currently advertised EEE linkmodes * @enable_tx_lpi: When True, MAC should transmit LPI to PHY * @eee_active: phylib private state, indicating that EEE has been negotiated * @eee_cfg: User configuration of EEE * @lp_advertising: Current link partner advertised linkmodes * @host_interfaces: PHY interface modes supported by host * @eee_disabled_modes: Energy efficient ethernet modes not to be advertised * @autoneg: Flag autoneg being used * @rate_matching: Current rate matching mode * @link: Current link state * @autoneg_complete: Flag auto negotiation of the link has completed * @mdix: Current crossover * @mdix_ctrl: User setting of crossover * @pma_extable: Cached value of PMA/PMD Extended Abilities Register * @interrupts: Flag interrupts have been enabled * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt * handling shall be postponed until PHY has resumed * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended, * requiring a rerun of the interrupt handler after resume * @default_timestamp: Flag indicating whether we are using the phy * timestamp as the default one * @interface: enum phy_interface_t value * @possible_interfaces: bitmap if interface modes that the attached PHY * will switch between depending on media speed. * @skb: Netlink message for cable diagnostics * @nest: Netlink nest used for cable diagnostics * @ehdr: nNtlink header for cable diagnostics * @phy_led_triggers: Array of LED triggers * @phy_num_led_triggers: Number of triggers in @phy_led_triggers * @led_link_trigger: LED trigger for link up/down * @last_triggered: last LED trigger for link speed * @leds: list of PHY LED structures * @master_slave_set: User requested master/slave configuration * @master_slave_get: Current master/slave advertisement * @master_slave_state: Current master/slave configuration * @mii_ts: Pointer to time stamper callbacks * @psec: Pointer to Power Sourcing Equipment control struct * @lock: Mutex for serialization access to PHY * @state_queue: Work queue for state machine * @link_down_events: Number of times link was lost * @shared: Pointer to private data shared by phys in one package * @priv: Pointer to driver private data * @oatc14_sqi_capability: SQI capability information for OATC14 10Base-T1S PHY * * interrupts currently only supports enabled or disabled, * but could be changed in the future to support enabling * and disabling specific interrupts * * Contains some infrastructure for polling and interrupt * handling, as well as handling shifts in PHY hardware state */ struct phy_device { struct mdio_device mdio; /* Information about the PHY type */ /* And management functions */ const struct phy_driver *drv; struct device_link *devlink; u32 phyindex; u32 phy_id; struct phy_c45_device_ids c45_ids; unsigned is_c45:1; unsigned is_internal:1; unsigned is_pseudo_fixed_link:1; unsigned is_gigabit_capable:1; unsigned has_fixups:1; unsigned suspended:1; unsigned suspended_by_mdio_bus:1; unsigned sysfs_links:1; unsigned loopback_enabled:1; unsigned downshifted_rate:1; unsigned is_on_sfp_module:1; unsigned mac_managed_pm:1; unsigned wol_enabled:1; unsigned is_genphy_driven:1; unsigned autoneg:1; /* The most recently read link state */ unsigned link:1; unsigned autoneg_complete:1; bool pause:1; bool asym_pause:1; /* Interrupts are enabled */ unsigned interrupts:1; unsigned irq_suspended:1; unsigned irq_rerun:1; unsigned default_timestamp:1; int rate_matching; enum phy_state state; u32 dev_flags; phy_interface_t interface; DECLARE_PHY_INTERFACE_MASK(possible_interfaces); /* * forced speed & duplex (no autoneg) * partner speed & duplex & pause (autoneg) */ int speed; int duplex; int port; u8 master_slave_get; u8 master_slave_set; u8 master_slave_state; /* Union of PHY and Attached devices' supported link modes */ /* See ethtool.h for more info */ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); /* used with phy_speed_down */ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); /* used for eee validation and configuration*/ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee); /* Energy efficient ethernet modes which should be prohibited */ __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_disabled_modes); bool enable_tx_lpi; bool eee_active; struct eee_config eee_cfg; /* Host supported PHY interface types. Should be ignored if empty. */ DECLARE_PHY_INTERFACE_MASK(host_interfaces); #ifdef CONFIG_LED_TRIGGER_PHY struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; struct phy_led_trigger *last_triggered; struct phy_led_trigger *led_link_trigger; #endif struct list_head leds; /* * Interrupt number for this PHY * -1 means no interrupt */ int irq; /* private data pointer */ /* For use by PHYs to maintain extra state */ void *priv; #if IS_ENABLED(CONFIG_PHY_PACKAGE) /* shared data pointer */ /* For use by PHYs inside the same package that need a shared state. */ struct phy_package_shared *shared; #endif /* Reporting cable test results */ struct sk_buff *skb; void *ehdr; struct nlattr *nest; /* Interrupt and Polling infrastructure */ struct delayed_work state_queue; struct mutex lock; /* This may be modified under the rtnl lock */ bool sfp_bus_attached; struct sfp_bus *sfp_bus; struct phylink *phylink; struct net_device *attached_dev; struct mii_timestamper *mii_ts; struct pse_control *psec; u8 mdix; u8 mdix_ctrl; int pma_extable; unsigned int link_down_events; void (*phy_link_change)(struct phy_device *phydev, bool up); void (*adjust_link)(struct net_device *dev); #if IS_ENABLED(CONFIG_MACSEC) /* MACsec management functions */ const struct macsec_ops *macsec_ops; #endif struct phy_oatc14_sqi_capability oatc14_sqi_capability; }; /* Generic phy_device::dev_flags */ #define PHY_F_NO_IRQ 0x80000000 #define PHY_F_RXC_ALWAYS_ON 0x40000000 #define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio) /** * struct phy_tdr_config - Configuration of a TDR raw test * * @first: Distance for first data collection point * @last: Distance for last data collection point * @step: Step between data collection points * @pair: Bitmap of cable pairs to collect data for * * A structure containing possible configuration parameters * for a TDR cable test. The driver does not need to implement * all the parameters, but should report what is actually used. * All distances are in centimeters. */ struct phy_tdr_config { u32 first; u32 last; u32 step; s8 pair; }; #define PHY_PAIR_ALL -1 /** * enum link_inband_signalling - in-band signalling modes that are supported * * @LINK_INBAND_DISABLE: in-band signalling can be disabled * @LINK_INBAND_ENABLE: in-band signalling can be enabled without bypass * @LINK_INBAND_BYPASS: in-band signalling can be enabled with bypass * * The possible and required bits can only be used if the valid bit is set. * If possible is clear, that means inband signalling can not be used. * Required is only valid when possible is set, and means that inband * signalling must be used. */ enum link_inband_signalling { LINK_INBAND_DISABLE = BIT(0), LINK_INBAND_ENABLE = BIT(1), LINK_INBAND_BYPASS = BIT(2), }; /** * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision * Avoidance) Reconciliation Sublayer. * * @version: read-only PLCA register map version. -1 = not available. Ignored * when setting the configuration. Format is the same as reported by the PLCA * IDVER register (31.CA00). -1 = not available. * @enabled: PLCA configured mode (enabled/disabled). -1 = not available / don't * set. 0 = disabled, anything else = enabled. * @node_id: the PLCA local node identifier. -1 = not available / don't set. * Allowed values [0 .. 254]. 255 = node disabled. * @node_cnt: the PLCA node count (maximum number of nodes having a TO). Only * meaningful for the coordinator (node_id = 0). -1 = not available / don't * set. Allowed values [1 .. 255]. * @to_tmr: The value of the PLCA to_timer in bit-times, which determines the * PLCA transmit opportunity window opening. See IEEE802.3 Clause 148 for * more details. The to_timer shall be set equal over all nodes. * -1 = not available / don't set. Allowed values [0 .. 255]. * @burst_cnt: controls how many additional frames a node is allowed to send in * single transmit opportunity (TO). The default value of 0 means that the * node is allowed exactly one frame per TO. A value of 1 allows two frames * per TO, and so on. -1 = not available / don't set. * Allowed values [0 .. 255]. * @burst_tmr: controls how many bit times to wait for the MAC to send a new * frame before interrupting the burst. This value should be set to a value * greater than the MAC inter-packet gap (which is typically 96 bits). * -1 = not available / don't set. Allowed values [0 .. 255]. * * A structure containing configuration parameters for setting/getting the PLCA * RS configuration. The driver does not need to implement all the parameters, * but should report what is actually used. */ struct phy_plca_cfg { int version; int enabled; int node_id; int node_cnt; int to_tmr; int burst_cnt; int burst_tmr; }; /** * struct phy_plca_status - Status of the PLCA (Physical Layer Collision * Avoidance) Reconciliation Sublayer. * * @pst: The PLCA status as reported by the PST bit in the PLCA STATUS * register(31.CA03), indicating BEACON activity. * * A structure containing status information of the PLCA RS configuration. * The driver does not need to implement all the parameters, but should report * what is actually used. */ struct phy_plca_status { bool pst; }; /* Modes for PHY LED configuration */ enum phy_led_modes { PHY_LED_ACTIVE_HIGH = 0, PHY_LED_ACTIVE_LOW = 1, PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2, /* keep it last */ __PHY_LED_MODES_NUM, }; /** * struct phy_led: An LED driven by the PHY * * @list: List of LEDs * @phydev: PHY this LED is attached to * @led_cdev: Standard LED class structure * @index: Number of the LED */ struct phy_led { struct list_head list; struct phy_device *phydev; struct led_classdev led_cdev; u8 index; }; #define to_phy_led(d) container_of(d, struct phy_led, led_cdev) /* * PHY_MSE_CAP_* - Bitmask flags for Mean Square Error (MSE) capabilities * * These flags describe which MSE metrics and selectors are implemented * by the PHY for the current link mode. They are used in * struct phy_mse_capability.supported_caps. * * Standardization: * The OPEN Alliance (OA) defines the presence of MSE/SQI/pMSE but not their * numeric scaling, update intervals, or aggregation windows. See: * OA 100BASE-T1 TC1 v1.0, sections 6.1.1-6.1.3 * OA 1000BASE-T1 TC12 v2.2, sections 6.1.1-6.1.2 * * Description of flags: * * PHY_MSE_CAP_CHANNEL_A * Per-pair diagnostics for Channel A are supported. Mapping to the * physical wire pair may depend on MDI/MDI-X polarity. * * PHY_MSE_CAP_CHANNEL_B, _C, _D * Same as above for channels B-D. * * PHY_MSE_CAP_WORST_CHANNEL * The PHY or driver can identify and report the single worst-performing * channel without querying each one individually. * * PHY_MSE_CAP_LINK * The PHY provides only a link-wide aggregate measurement or cannot map * results to a specific pair (for example 100BASE-TX with unknown * MDI/MDI-X). * * PHY_MSE_CAP_AVG * Average MSE (mean DCQ metric) is supported. For 100/1000BASE-T1 the OA * recommends 2^16 symbols, scaled 0..511, but the exact scaling is * vendor-specific. * * PHY_MSE_CAP_PEAK * Peak MSE (current peak within the measurement window) is supported. * Defined as pMSE for 100BASE-T1; vendor-specific for others. * * PHY_MSE_CAP_WORST_PEAK * Latched worst-case peak MSE since the last read (read-to-clear if * implemented). Optional in OA 100BASE-T1 TC1 6.1.3. */ #define PHY_MSE_CAP_CHANNEL_A BIT(0) #define PHY_MSE_CAP_CHANNEL_B BIT(1) #define PHY_MSE_CAP_CHANNEL_C BIT(2) #define PHY_MSE_CAP_CHANNEL_D BIT(3) #define PHY_MSE_CAP_WORST_CHANNEL BIT(4) #define PHY_MSE_CAP_LINK BIT(5) #define PHY_MSE_CAP_AVG BIT(6) #define PHY_MSE_CAP_PEAK BIT(7) #define PHY_MSE_CAP_WORST_PEAK BIT(8) /* * enum phy_mse_channel - Identifiers for selecting MSE measurement channels * * PHY_MSE_CHANNEL_A - PHY_MSE_CHANNEL_D * Select per-pair measurement for the corresponding channel. * * PHY_MSE_CHANNEL_WORST * Select the single worst-performing channel reported by hardware. * * PHY_MSE_CHANNEL_LINK * Select link-wide aggregate data (used when per-pair results are * unavailable). */ enum phy_mse_channel { PHY_MSE_CHANNEL_A, PHY_MSE_CHANNEL_B, PHY_MSE_CHANNEL_C, PHY_MSE_CHANNEL_D, PHY_MSE_CHANNEL_WORST, PHY_MSE_CHANNEL_LINK, }; /** * struct phy_mse_capability - Capabilities of Mean Square Error (MSE) * measurement interface * * Standardization notes: * * - Presence of MSE/SQI/pMSE is defined by OPEN Alliance specs, but numeric * scaling, refresh/update rate and aggregation windows are not fixed and * are vendor-/product-specific. (OA 100BASE-T1 TC1 v1.0 6.1.*; * OA 1000BASE-T1 TC12 v2.2 6.1.*) * * - Typical recommendations: 2^16 symbols and 0..511 scaling for MSE; pMSE only * defined for 100BASE-T1 (sliding window example), others are vendor * extensions. Drivers must report actual scale/limits here. * * Describes the MSE measurement capabilities for the current link mode. These * properties are dynamic and may change when link settings are modified. * Callers should re-query this capability after any link state change to * ensure they have the most up-to-date information. * * Callers should only request measurements for channels and types that are * indicated as supported by the @supported_caps bitmask. If @supported_caps * is 0, the device provides no MSE diagnostics, and driver operations should * typically return -EOPNOTSUPP. * * Snapshot values for average and peak MSE can be normalized to a 0..1 ratio * by dividing the raw snapshot by the corresponding @max_average_mse or * @max_peak_mse value. * * @max_average_mse: The maximum value for an average MSE snapshot. This * defines the scale for the measurement. If the PHY_MSE_CAP_AVG capability is * supported, this value MUST be greater than 0. (vendor-specific units). * @max_peak_mse: The maximum value for a peak MSE snapshot. If either * PHY_MSE_CAP_PEAK or PHY_MSE_CAP_WORST_PEAK is supported, this value MUST * be greater than 0. (vendor-specific units). * @refresh_rate_ps: The typical interval, in picoseconds, between hardware * updates of the MSE values. This is an estimate, and callers should not * assume synchronous sampling. (vendor-specific units). * @num_symbols: The number of symbols aggregated per hardware sample to * calculate the MSE. (vendor-specific units). * @supported_caps: A bitmask of PHY_MSE_CAP_* values indicating which * measurement types (e.g., average, peak) and channels * (e.g., per-pair or link-wide) are supported. */ struct phy_mse_capability { u64 max_average_mse; u64 max_peak_mse; u64 refresh_rate_ps; u64 num_symbols; u32 supported_caps; }; /** * struct phy_mse_snapshot - A snapshot of Mean Square Error (MSE) diagnostics * * Holds a set of MSE diagnostic values that were all captured from a single * measurement window. * * Values are raw, device-scaled and not normalized. Use struct * phy_mse_capability to interpret the scale and sampling window. * * @average_mse: The average MSE value over the measurement window. * OPEN Alliance references MSE as a DCQ metric; recommends 2^16 symbols and * 0..511 scaling. Exact scale and refresh are vendor-specific. * (100BASE-T1 TC1 v1.0 6.1.1; 1000BASE-T1 TC12 v2.2 6.1.1). * * @peak_mse: The peak MSE value observed within the measurement window. * For 100BASE-T1, "pMSE" is optional and may be implemented via a sliding * 128-symbol window with periodic capture; not standardized for 1000BASE-T1. * (100BASE-T1 TC1 v1.0 6.1.3, Table "DCQ.peakMSE"). * * @worst_peak_mse: A latched high-water mark of the peak MSE since last read * (read-to-clear if implemented). OPEN Alliance shows a latched "worst case * peak MSE" for 100BASE-T1 pMSE; availability/semantics outside that are * vendor-specific. (100BASE-T1 TC1 v1.0 6.1.3, DCQ.peakMSE high byte; * 1000BASE-T1 TC12 v2.2 treats DCQ details as vendor-specific.) */ struct phy_mse_snapshot { u64 average_mse; u64 peak_mse; u64 worst_peak_mse; }; /** * struct phy_driver - Driver structure for a particular PHY type * * @mdiodrv: Data common to all MDIO devices * @phy_id: The result of reading the UID registers of this PHY * type, and ANDing them with the phy_id_mask. This driver * only works for PHYs with IDs which match this field * @name: The friendly name of this PHY type * @phy_id_mask: Defines the important bits of the phy_id * @features: A mandatory list of features (speed, duplex, etc) * supported by this PHY * @flags: A bitfield defining certain other features this PHY * supports (like interrupts) * @driver_data: Static driver data * * All functions are optional. If config_aneg or read_status * are not implemented, the phy core uses the genphy versions. * Note that none of these functions should be called from * interrupt time. The goal is for the bus read/write functions * to be able to block when the bus transaction is happening, * and be freed up by an interrupt (The MPC85xx has this ability, * though it is not currently supported in the driver). */ struct phy_driver { struct mdio_driver_common mdiodrv; u32 phy_id; char *name; u32 phy_id_mask; const unsigned long * const features; u32 flags; const void *driver_data; /** * @soft_reset: Called to issue a PHY software reset */ int (*soft_reset)(struct phy_device *phydev); /** * @config_init: Called to initialize the PHY, * including after a reset */ int (*config_init)(struct phy_device *phydev); /** * @probe: Called during discovery. Used to set * up device-specific structures, if any */ int (*probe)(struct phy_device *phydev); /** * @get_features: Probe the hardware to determine what * abilities it has. Should only set phydev->supported. */ int (*get_features)(struct phy_device *phydev); /** * @inband_caps: query whether in-band is supported for the given PHY * interface mode. Returns a bitmask of bits defined by enum * link_inband_signalling. */ unsigned int (*inband_caps)(struct phy_device *phydev, phy_interface_t interface); /** * @config_inband: configure in-band mode for the PHY */ int (*config_inband)(struct phy_device *phydev, unsigned int modes); /** * @get_rate_matching: Get the supported type of rate matching for a * particular phy interface. This is used by phy consumers to determine * whether to advertise lower-speed modes for that interface. It is * assumed that if a rate matching mode is supported on an interface, * then that interface's rate can be adapted to all slower link speeds * supported by the phy. If the interface is not supported, this should * return %RATE_MATCH_NONE. */ int (*get_rate_matching)(struct phy_device *phydev, phy_interface_t iface); /* PHY Power Management */ /** @suspend: Suspend the hardware, saving state if needed */ int (*suspend)(struct phy_device *phydev); /** @resume: Resume the hardware, restoring state if needed */ int (*resume)(struct phy_device *phydev); /** * @config_aneg: Configures the advertisement and resets * autonegotiation if phydev->autoneg is on, * forces the speed to the current settings in phydev * if phydev->autoneg is off */ int (*config_aneg)(struct phy_device *phydev); /** @aneg_done: Determines the auto negotiation result */ int (*aneg_done)(struct phy_device *phydev); /** @read_status: Determines the negotiated speed and duplex */ int (*read_status)(struct phy_device *phydev); /** * @config_intr: Enables or disables interrupts. * It should also clear any pending interrupts prior to enabling the * IRQs and after disabling them. */ int (*config_intr)(struct phy_device *phydev); /** @handle_interrupt: Override default interrupt handling */ irqreturn_t (*handle_interrupt)(struct phy_device *phydev); /** @remove: Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); /** * @match_phy_device: Returns true if this is a suitable * driver for the given phydev. If NULL, matching is based on * phy_id and phy_id_mask. */ int (*match_phy_device)(struct phy_device *phydev, const struct phy_driver *phydrv); /** * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY * register changes to enable Wake on LAN, so set_wol is * provided to be called in the ethernet driver's set_wol * function. */ int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); /** * @get_wol: See set_wol, but for checking whether Wake on LAN * is enabled. */ void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); /** * @link_change_notify: Called to inform a PHY device driver * when the core is about to change the link state. This * callback is supposed to be used as fixup hook for drivers * that need to take action when the link state * changes. Drivers are by no means allowed to mess with the * PHY device structure in their implementations. */ void (*link_change_notify)(struct phy_device *dev); /** * @read_mmd: PHY specific driver override for reading a MMD * register. This function is optional for PHY specific * drivers. When not provided, the default MMD read function * will be used by phy_read_mmd(), which will use either a * direct read for Clause 45 PHYs or an indirect read for * Clause 22 PHYs. devnum is the MMD device number within the * PHY device, regnum is the register within the selected MMD * device. */ int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum); /** * @write_mmd: PHY specific driver override for writing a MMD * register. This function is optional for PHY specific * drivers. When not provided, the default MMD write function * will be used by phy_write_mmd(), which will use either a * direct write for Clause 45 PHYs, or an indirect write for * Clause 22 PHYs. devnum is the MMD device number within the * PHY device, regnum is the register within the selected MMD * device. val is the value to be written. */ int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, u16 val); /** @read_page: Return the current PHY register page number */ int (*read_page)(struct phy_device *dev); /** @write_page: Set the current PHY register page number */ int (*write_page)(struct phy_device *dev, int page); /** * @module_info: Get the size and type of the eeprom contained * within a plug-in module */ int (*module_info)(struct phy_device *dev, struct ethtool_modinfo *modinfo); /** * @module_eeprom: Get the eeprom information from the plug-in * module */ int (*module_eeprom)(struct phy_device *dev, struct ethtool_eeprom *ee, u8 *data); /** @cable_test_start: Start a cable test */ int (*cable_test_start)(struct phy_device *dev); /** @cable_test_tdr_start: Start a raw TDR cable test */ int (*cable_test_tdr_start)(struct phy_device *dev, const struct phy_tdr_config *config); /** * @cable_test_get_status: Once per second, or on interrupt, * request the status of the test. */ int (*cable_test_get_status)(struct phy_device *dev, bool *finished); /* Get statistics from the PHY using ethtool */ /** * @get_phy_stats: Retrieve PHY statistics. * @dev: The PHY device for which the statistics are retrieved. * @eth_stats: structure where Ethernet PHY stats will be stored. * @stats: structure where additional PHY-specific stats will be stored. * * Retrieves the supported PHY statistics and populates the provided * structures. The input structures are pre-initialized with * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members * corresponding to supported statistics. Unmodified members will remain * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace. */ void (*get_phy_stats)(struct phy_device *dev, struct ethtool_eth_phy_stats *eth_stats, struct ethtool_phy_stats *stats); /** * @get_link_stats: Retrieve link statistics. * @dev: The PHY device for which the statistics are retrieved. * @link_stats: structure where link-specific stats will be stored. * * Retrieves link-related statistics for the given PHY device. The input * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the * driver must only modify members corresponding to supported * statistics. Unmodified members will remain set to * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace. */ void (*get_link_stats)(struct phy_device *dev, struct ethtool_link_ext_stats *link_stats); /** * @update_stats: Trigger periodic statistics updates. * @dev: The PHY device for which statistics updates are triggered. * * Periodically gathers statistics from the PHY device to update locally * maintained 64-bit counters. This is necessary for PHYs that implement * reduced-width counters (e.g., 16-bit or 32-bit) which can overflow * more frequently compared to 64-bit counters. By invoking this * callback, drivers can fetch the current counter values, handle * overflow detection, and accumulate the results into local 64-bit * counters for accurate reporting through the `get_phy_stats` and * `get_link_stats` interfaces. * * Return: 0 on success or a negative error code on failure. */ int (*update_stats)(struct phy_device *dev); /** @get_sset_count: Number of statistic counters */ int (*get_sset_count)(struct phy_device *dev); /** @get_strings: Names of the statistic counters */ void (*get_strings)(struct phy_device *dev, u8 *data); /** @get_stats: Return the statistic counter values */ void (*get_stats)(struct phy_device *dev, struct ethtool_stats *stats, u64 *data); /* Get and Set PHY tunables */ /** @get_tunable: Return the value of a tunable */ int (*get_tunable)(struct phy_device *dev, struct ethtool_tunable *tuna, void *data); /** @set_tunable: Set the value of a tunable */ int (*set_tunable)(struct phy_device *dev, struct ethtool_tunable *tuna, const void *data); /** * @set_loopback: Set the loopback mode of the PHY * enable selects if the loopback mode is enabled or disabled. If the * loopback mode is enabled, then the speed of the loopback mode can be * requested with the speed argument. If the speed argument is zero, * then any speed can be selected. If the speed argument is > 0, then * this speed shall be selected for the loopback mode or EOPNOTSUPP * shall be returned if speed selection is not supported. */ int (*set_loopback)(struct phy_device *dev, bool enable, int speed); /** @get_sqi: Get the signal quality indication */ int (*get_sqi)(struct phy_device *dev); /** @get_sqi_max: Get the maximum signal quality indication */ int (*get_sqi_max)(struct phy_device *dev); /** * @get_mse_capability: Get capabilities and scale of MSE measurement * @dev: PHY device * @cap: Output (filled on success) * * Fill @cap with the PHY's MSE capability for the current * link mode: scale limits (max_average_mse, max_peak_mse), update * interval (refresh_rate_ps), sample length (num_symbols) and the * capability bitmask (supported_caps). * * Implementations may defer capability report until hardware has * converged; in that case they should return -EAGAIN and allow the * caller to retry later. * * Return: 0 on success. On failure, returns a negative errno code, such * as -EOPNOTSUPP if MSE measurement is not supported by the PHY or in * the current link mode, or -EAGAIN if the capability information is * not yet available. */ int (*get_mse_capability)(struct phy_device *dev, struct phy_mse_capability *cap); /** * @get_mse_snapshot: Retrieve a snapshot of MSE diagnostic values * @dev: PHY device * @channel: Channel identifier (PHY_MSE_CHANNEL_*) * @snapshot: Output (filled on success) * * Fill @snapshot with a correlated set of MSE values from the most * recent measurement window. * * Callers must validate @channel against supported_caps returned by * get_mse_capability(). Drivers must not coerce @channel; if the * requested selector is not implemented by the device or current link * mode, the operation must fail. * * worst_peak_mse is latched and must be treated as read-to-clear. * * Return: 0 on success. On failure, returns a negative errno code, such * as -EOPNOTSUPP if MSE measurement is not supported by the PHY or in * the current link mode, or -EAGAIN if measurements are not yet * available. */ int (*get_mse_snapshot)(struct phy_device *dev, enum phy_mse_channel channel, struct phy_mse_snapshot *snapshot); /* PLCA RS interface */ /** @get_plca_cfg: Return the current PLCA configuration */ int (*get_plca_cfg)(struct phy_device *dev, struct phy_plca_cfg *plca_cfg); /** @set_plca_cfg: Set the PLCA configuration */ int (*set_plca_cfg)(struct phy_device *dev, const struct phy_plca_cfg *plca_cfg); /** @get_plca_status: Return the current PLCA status info */ int (*get_plca_status)(struct phy_device *dev, struct phy_plca_status *plca_st); /** * @led_brightness_set: Set a PHY LED brightness. Index * indicates which of the PHYs led should be set. Value * follows the standard LED class meaning, e.g. LED_OFF, * LED_HALF, LED_FULL. */ int (*led_brightness_set)(struct phy_device *dev, u8 index, enum led_brightness value); /** * @led_blink_set: Set a PHY LED blinking. Index indicates * which of the PHYs led should be configured to blink. Delays * are in milliseconds and if both are zero then a sensible * default should be chosen. The call should adjust the * timings in that case and if it can't match the values * specified exactly. */ int (*led_blink_set)(struct phy_device *dev, u8 index, unsigned long *delay_on, unsigned long *delay_off); /** * @led_hw_is_supported: Can the HW support the given rules. * @dev: PHY device which has the LED * @index: Which LED of the PHY device * @rules The core is interested in these rules * * Return 0 if yes, -EOPNOTSUPP if not, or an error code. */ int (*led_hw_is_supported)(struct phy_device *dev, u8 index, unsigned long rules); /** * @led_hw_control_set: Set the HW to control the LED * @dev: PHY device which has the LED * @index: Which LED of the PHY device * @rules The rules used to control the LED * * Returns 0, or a an error code. */ int (*led_hw_control_set)(struct phy_device *dev, u8 index, unsigned long rules); /** * @led_hw_control_get: Get how the HW is controlling the LED * @dev: PHY device which has the LED * @index: Which LED of the PHY device * @rules Pointer to the rules used to control the LED * * Set *@rules to how the HW is currently blinking. Returns 0 * on success, or a error code if the current blinking cannot * be represented in rules, or some other error happens. */ int (*led_hw_control_get)(struct phy_device *dev, u8 index, unsigned long *rules); /** * @led_polarity_set: Set the LED polarity modes * @dev: PHY device which has the LED * @index: Which LED of the PHY device * @modes: bitmap of LED polarity modes * * Configure LED with all the required polarity modes in @modes * to make it correctly turn ON or OFF. * * Returns 0, or an error code. */ int (*led_polarity_set)(struct phy_device *dev, int index, unsigned long modes); /** * @get_next_update_time: Get the time until the next update event * @dev: PHY device * * Callback to determine the time (in jiffies) until the next * update event for the PHY state machine. Allows PHY drivers to * dynamically adjust polling intervals based on link state or other * conditions. * * Returns the time in jiffies until the next update event. */ unsigned int (*get_next_update_time)(struct phy_device *dev); }; #define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) #define PHY_ID_MATCH_EXTACT_MASK GENMASK(31, 0) #define PHY_ID_MATCH_MODEL_MASK GENMASK(31, 4) #define PHY_ID_MATCH_VENDOR_MASK GENMASK(31, 10) #define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_EXTACT_MASK #define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_MODEL_MASK #define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_VENDOR_MASK /** * phy_id_compare - compare @id1 with @id2 taking account of @mask * @id1: first PHY ID * @id2: second PHY ID * @mask: the PHY ID mask, set bits are significant in matching * * Return true if the bits from @id1 and @id2 specified by @mask match. * This uses an equivalent test to (@id & @mask) == (@phy_id & @mask). */ static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask) { return !((id1 ^ id2) & mask); } /** * phy_id_compare_vendor - compare @id with @vendor mask * @id: PHY ID * @vendor_mask: PHY Vendor mask * * Return: true if the bits from @id match @vendor using the * generic PHY Vendor mask. */ static inline bool phy_id_compare_vendor(u32 id, u32 vendor_mask) { return phy_id_compare(id, vendor_mask, PHY_ID_MATCH_VENDOR_MASK); } /** * phy_id_compare_model - compare @id with @model mask * @id: PHY ID * @model_mask: PHY Model mask * * Return: true if the bits from @id match @model using the * generic PHY Model mask. */ static inline bool phy_id_compare_model(u32 id, u32 model_mask) { return phy_id_compare(id, model_mask, PHY_ID_MATCH_MODEL_MASK); } /** * phydev_id_compare - compare @id with the PHY's Clause 22 ID * @phydev: the PHY device * @id: the PHY ID to be matched * * Compare the @phydev clause 22 ID with the provided @id and return true or * false depending whether it matches, using the bound driver mask. The * @phydev must be bound to a driver. */ static inline bool phydev_id_compare(struct phy_device *phydev, u32 id) { return phy_id_compare(id, phydev->phy_id, phydev->drv->phy_id_mask); } const char *phy_speed_to_str(int speed); const char *phy_duplex_to_str(unsigned int duplex); const char *phy_rate_matching_to_str(int rate_matching); int phy_interface_num_ports(phy_interface_t interface); /** * phy_is_started - Convenience function to check whether PHY is started * @phydev: The phy_device struct */ static inline bool phy_is_started(struct phy_device *phydev) { return phydev->state >= PHY_UP; } /** * phy_driver_is_genphy - Convenience function to check whether PHY is driven * by one of the generic PHY drivers * @phydev: The phy_device struct * Return: true if PHY is driven by one of the genphy drivers */ static inline bool phy_driver_is_genphy(struct phy_device *phydev) { return phydev->is_genphy_driven; } /** * phy_disable_eee_mode - Don't advertise an EEE mode. * @phydev: The phy_device struct * @link_mode: The EEE mode to be disabled */ static inline void phy_disable_eee_mode(struct phy_device *phydev, u32 link_mode) { WARN_ON(phy_is_started(phydev)); linkmode_set_bit(link_mode, phydev->eee_disabled_modes); linkmode_clear_bit(link_mode, phydev->advertising_eee); } /** * phy_can_wakeup() - indicate whether PHY has driver model wakeup capabilities * @phydev: The phy_device struct * * Returns: true/false depending on the PHY driver's device_set_wakeup_capable() * setting. */ static inline bool phy_can_wakeup(struct phy_device *phydev) { return device_can_wakeup(&phydev->mdio.dev); } /** * phy_may_wakeup() - indicate whether PHY has wakeup enabled * @phydev: The phy_device struct * * Returns: true/false depending on the PHY driver's device_set_wakeup_enabled() * setting if using the driver model, otherwise the legacy determination. */ bool phy_may_wakeup(struct phy_device *phydev); void phy_resolve_aneg_pause(struct phy_device *phydev); void phy_resolve_aneg_linkmode(struct phy_device *phydev); /** * phy_read - Convenience function for reading a given PHY register * @phydev: the phy_device struct * @regnum: register number to read * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ static inline int phy_read(struct phy_device *phydev, u32 regnum) { return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } #define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \ timeout_us, sleep_before_read) \ ({ \ int __ret, __val; \ __ret = read_poll_timeout(__val = phy_read, val, \ __val < 0 || (cond), \ sleep_us, timeout_us, sleep_before_read, phydev, regnum); \ if (__val < 0) \ __ret = __val; \ if (__ret) \ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ __ret; \ }) /** * __phy_read - convenience function for reading a given PHY register * @phydev: the phy_device struct * @regnum: register number to read * * The caller must have taken the MDIO bus lock. */ static inline int __phy_read(struct phy_device *phydev, u32 regnum) { return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); } /** * phy_write - Convenience function for writing a given PHY register * @phydev: the phy_device struct * @regnum: register number to write * @val: value to write to @regnum * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) { return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } /** * __phy_write - Convenience function for writing a given PHY register * @phydev: the phy_device struct * @regnum: register number to write * @val: value to write to @regnum * * The caller must have taken the MDIO bus lock. */ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) { return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val); } /** * __phy_modify_changed() - Convenience function for modifying a PHY register * @phydev: a pointer to a &struct phy_device * @regnum: register number * @mask: bit mask of bits to clear * @set: bit mask of bits to set * * Unlocked helper function which allows a PHY register to be modified as * new register value = (old register value & ~mask) | set * * Returns negative errno, 0 if there was no change, and 1 in case of change */ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) { return __mdiobus_modify_changed(phydev->mdio.bus, phydev->mdio.addr, regnum, mask, set); } /* * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. */ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); /** * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a * condition is met or a timeout occurs * * @phydev: The phy_device struct * @devaddr: The MMD to read from * @regnum: The register on the MMD to read * @val: Variable to read the register into * @cond: Break condition (usually involving @val) * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please * read usleep_range() function description for details and * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @sleep_before_read: if it is true, sleep @sleep_us before read. * * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either * case, the last read value at @args is stored in @val. Must not * be called from atomic context if sleep_us or timeout_us are used. */ #define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \ sleep_us, timeout_us, sleep_before_read) \ ({ \ int __ret, __val; \ __ret = read_poll_timeout(__val = phy_read_mmd, val, \ __val < 0 || (cond), \ sleep_us, timeout_us, sleep_before_read, \ phydev, devaddr, regnum); \ if (__val < 0) \ __ret = __val; \ if (__ret) \ phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \ __ret; \ }) /* * __phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. */ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); /* * phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. */ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); /* * __phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. */ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, u16 mask, u16 set); int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, u16 mask, u16 set); int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 mask, u16 set); int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 mask, u16 set); /** * __phy_set_bits - Convenience function for setting bits in a PHY register * @phydev: the phy_device struct * @regnum: register number to write * @val: bits to set * * The caller must have taken the MDIO bus lock. */ static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) { return __phy_modify(phydev, regnum, 0, val); } /** * __phy_clear_bits - Convenience function for clearing bits in a PHY register * @phydev: the phy_device struct * @regnum: register number to write * @val: bits to clear * * The caller must have taken the MDIO bus lock. */ static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) { return __phy_modify(phydev, regnum, val, 0); } /** * phy_set_bits - Convenience function for setting bits in a PHY register * @phydev: the phy_device struct * @regnum: register number to write * @val: bits to set */ static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) { return phy_modify(phydev, regnum, 0, val); } /** * phy_clear_bits - Convenience function for clearing bits in a PHY register * @phydev: the phy_device struct * @regnum: register number to write * @val: bits to clear */ static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) { return phy_modify(phydev, regnum, val, 0); } /** * __phy_set_bits_mmd - Convenience function for setting bits in a register * on MMD * @phydev: the phy_device struct * @devad: the MMD containing register to modify * @regnum: register number to modify * @val: bits to set * * The caller must have taken the MDIO bus lock. */ static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) { return __phy_modify_mmd(phydev, devad, regnum, 0, val); } /** * __phy_clear_bits_mmd - Convenience function for clearing bits in a register * on MMD * @phydev: the phy_device struct * @devad: the MMD containing register to modify * @regnum: register number to modify * @val: bits to clear * * The caller must have taken the MDIO bus lock. */ static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) { return __phy_modify_mmd(phydev, devad, regnum, val, 0); } /** * phy_set_bits_mmd - Convenience function for setting bits in a register * on MMD * @phydev: the phy_device struct * @devad: the MMD containing register to modify * @regnum: register number to modify * @val: bits to set */ static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) { return phy_modify_mmd(phydev, devad, regnum, 0, val); } /** * phy_clear_bits_mmd - Convenience function for clearing bits in a register * on MMD * @phydev: the phy_device struct * @devad: the MMD containing register to modify * @regnum: register number to modify * @val: bits to clear */ static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) { return phy_modify_mmd(phydev, devad, regnum, val, 0); } /** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct * * NOTE: must be kept in sync with addition/removal of PHY_POLL and * PHY_MAC_INTERRUPT */ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) { return phydev->irq != PHY_POLL && phydev->irq != PHY_MAC_INTERRUPT; } /** * phy_polling_mode - Convenience function for testing whether polling is * used to detect PHY status changes * @phydev: the phy_device struct */ static inline bool phy_polling_mode(struct phy_device *phydev) { if (phydev->state == PHY_CABLETEST) if (phydev->drv->flags & PHY_POLL_CABLE_TEST) return true; if (phydev->drv->update_stats) return true; return phydev->irq == PHY_POLL; } /** * phy_has_hwtstamp - Tests whether a PHY time stamp configuration. * @phydev: the phy_device struct */ static inline bool phy_has_hwtstamp(struct phy_device *phydev) { return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp_set; } /** * phy_has_rxtstamp - Tests whether a PHY supports receive time stamping. * @phydev: the phy_device struct */ static inline bool phy_has_rxtstamp(struct phy_device *phydev) { return phydev && phydev->mii_ts && phydev->mii_ts->rxtstamp; } /** * phy_has_tsinfo - Tests whether a PHY reports time stamping and/or * PTP hardware clock capabilities. * @phydev: the phy_device struct */ static inline bool phy_has_tsinfo(struct phy_device *phydev) { return phydev && phydev->mii_ts && phydev->mii_ts->ts_info; } /** * phy_has_txtstamp - Tests whether a PHY supports transmit time stamping. * @phydev: the phy_device struct */ static inline bool phy_has_txtstamp(struct phy_device *phydev) { return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp; } static inline int phy_hwtstamp(struct phy_device *phydev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { return phydev->mii_ts->hwtstamp_set(phydev->mii_ts, cfg, extack); } static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb, int type) { return phydev->mii_ts->rxtstamp(phydev->mii_ts, skb, type); } static inline int phy_ts_info(struct phy_device *phydev, struct kernel_ethtool_ts_info *tsinfo) { return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo); } static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb, int type) { phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type); } /** * phy_is_default_hwtstamp - Is the PHY hwtstamp the default timestamp * @phydev: Pointer to phy_device * * This is used to get default timestamping device taking into account * the new API choice, which is selecting the timestamping from MAC by * default if the phydev does not have default_timestamp flag enabled. * * Return: True if phy is the default hw timestamp, false otherwise. */ static inline bool phy_is_default_hwtstamp(struct phy_device *phydev) { return phy_has_hwtstamp(phydev) && phydev->default_timestamp; } /** * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module * @phydev: the phy_device struct */ static inline bool phy_on_sfp(struct phy_device *phydev) { return phydev->is_on_sfp_module; } /** * phy_interface_mode_is_rgmii - Convenience function for testing if a * PHY interface mode is RGMII (all variants) * @mode: the &phy_interface_t enum */ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) { return mode >= PHY_INTERFACE_MODE_RGMII && mode <= PHY_INTERFACE_MODE_RGMII_TXID; }; /** * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z * negotiation * @mode: one of &enum phy_interface_t * * Returns true if the PHY interface mode uses the 16-bit negotiation * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding) */ static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) { return mode == PHY_INTERFACE_MODE_1000BASEX || mode == PHY_INTERFACE_MODE_2500BASEX; } /** * phy_interface_is_rgmii - Convenience function for testing if a PHY interface * is RGMII (all variants) * @phydev: the phy_device struct */ static inline bool phy_interface_is_rgmii(struct phy_device *phydev) { return phy_interface_mode_is_rgmii(phydev->interface); }; /** * phy_is_pseudo_fixed_link - Convenience function for testing if this * PHY is the CPU port facing side of an Ethernet switch, or similar. * @phydev: the phy_device struct */ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) { return phydev->is_pseudo_fixed_link; } phy_interface_t phy_fix_phy_mode_for_mac_delays(phy_interface_t interface, bool mac_txid, bool mac_rxid); int phy_save_page(struct phy_device *phydev); int phy_select_page(struct phy_device *phydev, int page); int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set); int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, u16 mask, u16 set); struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id); struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode); struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode); struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); int phy_device_register(struct phy_device *phy); void phy_device_free(struct phy_device *phydev); void phy_device_remove(struct phy_device *phydev); int phy_get_c45_ids(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); int __phy_resume(struct phy_device *phydev); int phy_loopback(struct phy_device *phydev, bool enable, int speed); int phy_sfp_connect_phy(void *upstream, struct phy_device *phy); void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy); void phy_sfp_attach(void *upstream, struct sfp_bus *bus); void phy_sfp_detach(void *upstream, struct sfp_bus *bus); int phy_sfp_probe(struct phy_device *phydev, const struct sfp_upstream_ops *ops); struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phy_interface_t interface); struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos); int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, u32 flags, phy_interface_t interface); int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, void (*handler)(struct net_device *), phy_interface_t interface); struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, void (*handler)(struct net_device *), phy_interface_t interface); void phy_disconnect(struct phy_device *phydev); void phy_detach(struct phy_device *phydev); void phy_start(struct phy_device *phydev); void phy_stop(struct phy_device *phydev); int phy_config_aneg(struct phy_device *phydev); int _phy_start_aneg(struct phy_device *phydev); int phy_start_aneg(struct phy_device *phydev); int phy_aneg_done(struct phy_device *phydev); unsigned int phy_inband_caps(struct phy_device *phydev, phy_interface_t interface); int phy_config_inband(struct phy_device *phydev, unsigned int modes); int phy_speed_down(struct phy_device *phydev, bool sync); int phy_speed_up(struct phy_device *phydev); bool phy_check_valid(int speed, int duplex, unsigned long *features); int phy_restart_aneg(struct phy_device *phydev); int phy_reset_after_clk_enable(struct phy_device *phydev); static inline struct phy_device *phy_find_first(struct mii_bus *bus) { return phy_find_next(bus, NULL); } #define mdiobus_for_each_phy(_bus, _phydev) \ for (_phydev = phy_find_first(_bus); _phydev; \ _phydev = phy_find_next(_bus, _phydev)) #if IS_ENABLED(CONFIG_PHYLIB) int phy_start_cable_test(struct phy_device *phydev, struct netlink_ext_ack *extack); int phy_start_cable_test_tdr(struct phy_device *phydev, struct netlink_ext_ack *extack, const struct phy_tdr_config *config); #else static inline int phy_start_cable_test(struct phy_device *phydev, struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); return -EOPNOTSUPP; } static inline int phy_start_cable_test_tdr(struct phy_device *phydev, struct netlink_ext_ack *extack, const struct phy_tdr_config *config) { NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); return -EOPNOTSUPP; } #endif static inline void phy_device_reset(struct phy_device *phydev, int value) { mdio_device_reset(&phydev->mdio, value); } #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) #define phydev_err_probe(_phydev, err, format, args...) \ dev_err_probe(&_phydev->mdio.dev, err, format, ##args) #define phydev_info(_phydev, format, args...) \ dev_info(&_phydev->mdio.dev, format, ##args) #define phydev_warn(_phydev, format, args...) \ dev_warn(&_phydev->mdio.dev, format, ##args) #define phydev_dbg(_phydev, format, args...) \ dev_dbg(&_phydev->mdio.dev, format, ##args) static inline const char *phydev_name(const struct phy_device *phydev) { return dev_name(&phydev->mdio.dev); } static inline void phy_lock_mdio_bus(struct phy_device *phydev) { mutex_lock(&phydev->mdio.bus->mdio_lock); } static inline void phy_unlock_mdio_bus(struct phy_device *phydev) { mutex_unlock(&phydev->mdio.bus->mdio_lock); } void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) __printf(2, 3); char *phy_attached_info_irq(struct phy_device *phydev) __malloc; void phy_attached_info(struct phy_device *phydev); int genphy_match_phy_device(struct phy_device *phydev, const struct phy_driver *phydrv); /* Clause 22 PHY */ int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart); int __genphy_config_aneg(struct phy_device *phydev, bool changed); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); int genphy_read_lpa(struct phy_device *phydev); int genphy_read_status_fixed(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); int genphy_read_master_slave(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable, int speed); int genphy_soft_reset(struct phy_device *phydev); irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev); static inline int genphy_config_aneg(struct phy_device *phydev) { return __genphy_config_aneg(phydev, false); } static inline int genphy_no_config_intr(struct phy_device *phydev) { return 0; } int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum); int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, u16 regnum, u16 val); /* Clause 37 */ int genphy_c37_config_aneg(struct phy_device *phydev); int genphy_c37_read_status(struct phy_device *phydev, bool *changed); /* Clause 45 PHY */ int genphy_c45_restart_aneg(struct phy_device *phydev); int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart); int genphy_c45_aneg_done(struct phy_device *phydev); int genphy_c45_read_link(struct phy_device *phydev); int genphy_c45_read_lpa(struct phy_device *phydev); int genphy_c45_read_pma(struct phy_device *phydev); int genphy_c45_pma_setup_forced(struct phy_device *phydev); int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev); int genphy_c45_an_config_aneg(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); int genphy_c45_read_mdix(struct phy_device *phydev); int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev); int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev); int genphy_c45_read_eee_abilities(struct phy_device *phydev); int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_baset1_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed); int genphy_c45_pma_resume(struct phy_device *phydev); int genphy_c45_pma_suspend(struct phy_device *phydev); int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable); int genphy_c45_plca_get_cfg(struct phy_device *phydev, struct phy_plca_cfg *plca_cfg); int genphy_c45_plca_set_cfg(struct phy_device *phydev, const struct phy_plca_cfg *plca_cfg); int genphy_c45_plca_get_status(struct phy_device *phydev, struct phy_plca_status *plca_st); int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp); int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev); int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev, bool *finished); int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev); int genphy_c45_oatc14_get_sqi(struct phy_device *phydev); /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { if (!phydev->drv) return -EIO; if (phydev->drv->read_status) return phydev->drv->read_status(phydev); else return genphy_read_status(phydev); } void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_error(struct phy_device *phydev); void phy_state_machine(struct work_struct *work); void phy_trigger_machine(struct phy_device *phydev); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd); int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd); int phy_disable_interrupts(struct phy_device *phydev); void phy_request_interrupt(struct phy_device *phydev); void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); int phy_get_rate_matching(struct phy_device *phydev, phy_interface_t iface); void phy_set_max_speed(struct phy_device *phydev, u32 max_speed); void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); void phy_advertise_supported(struct phy_device *phydev); void phy_advertise_eee_all(struct phy_device *phydev); void phy_support_sym_pause(struct phy_device *phydev); void phy_support_asym_pause(struct phy_device *phydev); void phy_support_eee(struct phy_device *phydev); void phy_disable_eee(struct phy_device *phydev); void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, bool autoneg); void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values, int size, bool is_rx); int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev, enum ethtool_link_mode_bit_indices linkmode, u32 *val); int phy_get_mac_termination(struct phy_device *phydev, struct device *dev, u32 *val); void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, bool *tx_pause, bool *rx_pause); int phy_register_fixup_for_id(const char *bus_id, int (*run)(struct phy_device *)); int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, int (*run)(struct phy_device *)); int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask); int phy_unregister_fixup_for_id(const char *bus_id); int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); int phy_eee_tx_clock_stop_capable(struct phy_device *phydev); int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable); int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable); int phy_get_eee_err(struct phy_device *phydev); int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data); int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); int phy_ethtool_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd); int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); int phy_ethtool_get_sset_count(struct phy_device *phydev); int phy_ethtool_get_stats(struct phy_device *phydev, struct ethtool_stats *stats, u64 *data); void __phy_ethtool_get_phy_stats(struct phy_device *phydev, struct ethtool_eth_phy_stats *phy_stats, struct ethtool_phy_stats *phydev_stats); void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev, struct ethtool_link_ext_stats *link_stats); int phy_ethtool_get_plca_cfg(struct phy_device *phydev, struct phy_plca_cfg *plca_cfg); int phy_ethtool_set_plca_cfg(struct phy_device *phydev, const struct phy_plca_cfg *plca_cfg, struct netlink_ext_ack *extack); int phy_ethtool_get_plca_status(struct phy_device *phydev, struct phy_plca_status *plca_st); int __phy_hwtstamp_get(struct phy_device *phydev, struct kernel_hwtstamp_config *config); int __phy_hwtstamp_set(struct phy_device *phydev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); extern const struct bus_type mdio_bus_type; extern const struct class mdio_bus_class; /** * phy_module_driver() - Helper macro for registering PHY drivers * @__phy_drivers: array of PHY drivers to register * @__count: Numbers of members in array * * Helper macro for PHY drivers which do not do anything special in module * init/exit. Each module may only use this macro once, and calling it * replaces module_init() and module_exit(). */ #define phy_module_driver(__phy_drivers, __count) \ static int __init phy_module_init(void) \ { \ return phy_drivers_register(__phy_drivers, __count, THIS_MODULE); \ } \ module_init(phy_module_init); \ static void __exit phy_module_exit(void) \ { \ phy_drivers_unregister(__phy_drivers, __count); \ } \ module_exit(phy_module_exit) #define module_phy_driver(__phy_drivers) \ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) #endif /* __PHY_H */
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #include <linux/types.h> #include <linux/bpf.h> #include <linux/bpf_local_storage.h> #include <uapi/linux/btf.h> #include <linux/btf_ids.h> DEFINE_BPF_STORAGE_CACHE(cgroup_cache); static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy); static void bpf_cgrp_storage_lock(void) { cant_migrate(); this_cpu_inc(bpf_cgrp_storage_busy); } static void bpf_cgrp_storage_unlock(void) { this_cpu_dec(bpf_cgrp_storage_busy); } static bool bpf_cgrp_storage_trylock(void) { cant_migrate(); if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) { this_cpu_dec(bpf_cgrp_storage_busy); return false; } return true; } static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner) { struct cgroup *cg = owner; return &cg->bpf_cgrp_storage; } void bpf_cgrp_storage_free(struct cgroup *cgroup) { struct bpf_local_storage *local_storage; rcu_read_lock_dont_migrate(); local_storage = rcu_dereference(cgroup->bpf_cgrp_storage); if (!local_storage) goto out; bpf_cgrp_storage_lock(); bpf_local_storage_destroy(local_storage); bpf_cgrp_storage_unlock(); out: rcu_read_unlock_migrate(); } static struct bpf_local_storage_data * cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit) { struct bpf_local_storage *cgroup_storage; struct bpf_local_storage_map *smap; cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage, bpf_rcu_lock_held()); if (!cgroup_storage) return NULL; smap = (struct bpf_local_storage_map *)map; return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit); } static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key) { struct bpf_local_storage_data *sdata; struct cgroup *cgroup; int fd; fd = *(int *)key; cgroup = cgroup_v1v2_get_from_fd(fd); if (IS_ERR(cgroup)) return ERR_CAST(cgroup); bpf_cgrp_storage_lock(); sdata = cgroup_storage_lookup(cgroup, map, true); bpf_cgrp_storage_unlock(); cgroup_put(cgroup); return sdata ? sdata->data : NULL; } static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_local_storage_data *sdata; struct cgroup *cgroup; int fd; fd = *(int *)key; cgroup = cgroup_v1v2_get_from_fd(fd); if (IS_ERR(cgroup)) return PTR_ERR(cgroup); bpf_cgrp_storage_lock(); sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, value, map_flags, false, GFP_ATOMIC); bpf_cgrp_storage_unlock(); cgroup_put(cgroup); return PTR_ERR_OR_ZERO(sdata); } static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map) { struct bpf_local_storage_data *sdata; sdata = cgroup_storage_lookup(cgroup, map, false); if (!sdata) return -ENOENT; bpf_selem_unlink(SELEM(sdata), false); return 0; } static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key) { struct cgroup *cgroup; int err, fd; fd = *(int *)key; cgroup = cgroup_v1v2_get_from_fd(fd); if (IS_ERR(cgroup)) return PTR_ERR(cgroup); bpf_cgrp_storage_lock(); err = cgroup_storage_delete(cgroup, map); bpf_cgrp_storage_unlock(); cgroup_put(cgroup); return err; } static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key) { return -ENOTSUPP; } static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) { return bpf_local_storage_map_alloc(attr, &cgroup_cache, true); } static void cgroup_storage_map_free(struct bpf_map *map) { bpf_local_storage_map_free(map, &cgroup_cache, &bpf_cgrp_storage_busy); } /* *gfp_flags* is a hidden argument provided by the verifier */ BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup, void *, value, u64, flags, gfp_t, gfp_flags) { struct bpf_local_storage_data *sdata; bool nobusy; WARN_ON_ONCE(!bpf_rcu_lock_held()); if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE)) return (unsigned long)NULL; if (!cgroup) return (unsigned long)NULL; nobusy = bpf_cgrp_storage_trylock(); sdata = cgroup_storage_lookup(cgroup, map, nobusy); if (sdata) goto unlock; /* only allocate new storage, when the cgroup is refcounted */ if (!percpu_ref_is_dying(&cgroup->self.refcnt) && (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map, value, BPF_NOEXIST, false, gfp_flags); unlock: if (nobusy) bpf_cgrp_storage_unlock(); return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data; } BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup) { int ret; WARN_ON_ONCE(!bpf_rcu_lock_held()); if (!cgroup) return -EINVAL; if (!bpf_cgrp_storage_trylock()) return -EBUSY; ret = cgroup_storage_delete(cgroup, map); bpf_cgrp_storage_unlock(); return ret; } const struct bpf_map_ops cgrp_storage_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc_check = bpf_local_storage_map_alloc_check, .map_alloc = cgroup_storage_map_alloc, .map_free = cgroup_storage_map_free, .map_get_next_key = notsupp_get_next_key, .map_lookup_elem = bpf_cgrp_storage_lookup_elem, .map_update_elem = bpf_cgrp_storage_update_elem, .map_delete_elem = bpf_cgrp_storage_delete_elem, .map_check_btf = bpf_local_storage_map_check_btf, .map_mem_usage = bpf_local_storage_map_mem_usage, .map_btf_id = &bpf_local_storage_map_btf_id[0], .map_owner_storage_ptr = cgroup_storage_ptr, }; const struct bpf_func_proto bpf_cgrp_storage_get_proto = { .func = bpf_cgrp_storage_get, .gpl_only = false, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL, .arg2_btf_id = &bpf_cgroup_btf_id[0], .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, .arg4_type = ARG_ANYTHING, }; const struct bpf_func_proto bpf_cgrp_storage_delete_proto = { .func = bpf_cgrp_storage_delete, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL, .arg2_btf_id = &bpf_cgroup_btf_id[0], };
2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose); /* * This routine purges all of the queues of frames. */ void rose_clear_queues(struct sock *sk) { skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&rose_sk(sk)->ack_queue); } /* * This routine purges the input queue of those frames that have been * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the * SDL diagram. */ void rose_frames_acked(struct sock *sk, unsigned short nr) { struct sk_buff *skb; struct rose_sock *rose = rose_sk(sk); /* * Remove all the ack-ed frames from the ack queue. */ if (rose->va != nr) { while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) { skb = skb_dequeue(&rose->ack_queue); kfree_skb(skb); rose->va = (rose->va + 1) % ROSE_MODULUS; } } } void rose_requeue_frames(struct sock *sk) { struct sk_buff *skb, *skb_prev = NULL; /* * Requeue all the un-ack-ed frames on the output queue to be picked * up by rose_kick. This arrangement handles the possibility of an * empty output queue. */ while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) { if (skb_prev == NULL) skb_queue_head(&sk->sk_write_queue, skb); else skb_append(skb_prev, skb, &sk->sk_write_queue); skb_prev = skb; } } /* * Validate that the value of nr is between va and vs. Return true or * false for testing. */ int rose_validate_nr(struct sock *sk, unsigned short nr) { struct rose_sock *rose = rose_sk(sk); unsigned short vc = rose->va; while (vc != rose->vs) { if (nr == vc) return 1; vc = (vc + 1) % ROSE_MODULUS; } return nr == rose->vs; } /* * This routine is called when the packet layer internally generates a * control frame. */ void rose_write_internal(struct sock *sk, int frametype) { struct rose_sock *rose = rose_sk(sk); struct sk_buff *skb; unsigned char *dptr; unsigned char lci1, lci2; int maxfaclen = 0; int len, faclen; int reserve; reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1; len = ROSE_MIN_LEN; switch (frametype) { case ROSE_CALL_REQUEST: len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; maxfaclen = 256; break; case ROSE_CALL_ACCEPTED: case ROSE_CLEAR_REQUEST: case ROSE_RESET_REQUEST: len += 2; break; } skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC); if (!skb) return; /* * Space for AX.25 header and PID. */ skb_reserve(skb, reserve); dptr = skb_put(skb, len); lci1 = (rose->lci >> 8) & 0x0F; lci2 = (rose->lci >> 0) & 0xFF; switch (frametype) { case ROSE_CALL_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = ROSE_CALL_REQ_ADDR_LEN_VAL; memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN); dptr += ROSE_ADDR_LEN; memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); dptr += ROSE_ADDR_LEN; faclen = rose_create_facilities(dptr, rose); skb_put(skb, faclen); dptr += faclen; break; case ROSE_CALL_ACCEPTED: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = 0x00; /* Address length */ *dptr++ = 0; /* Facilities length */ break; case ROSE_CLEAR_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = rose->cause; *dptr++ = rose->diagnostic; break; case ROSE_RESET_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = ROSE_DTE_ORIGINATED; *dptr++ = 0; break; case ROSE_RR: case ROSE_RNR: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr = frametype; *dptr++ |= (rose->vr << 5) & 0xE0; break; case ROSE_CLEAR_CONFIRMATION: case ROSE_RESET_CONFIRMATION: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; break; default: printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype); kfree_skb(skb); return; } rose_transmit_link(skb, rose->neighbour); } int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m) { unsigned char *frame; frame = skb->data; *ns = *nr = *q = *d = *m = 0; switch (frame[2]) { case ROSE_CALL_REQUEST: case ROSE_CALL_ACCEPTED: case ROSE_CLEAR_REQUEST: case ROSE_CLEAR_CONFIRMATION: case ROSE_RESET_REQUEST: case ROSE_RESET_CONFIRMATION: return frame[2]; default: break; } if ((frame[2] & 0x1F) == ROSE_RR || (frame[2] & 0x1F) == ROSE_RNR) { *nr = (frame[2] >> 5) & 0x07; return frame[2] & 0x1F; } if ((frame[2] & 0x01) == ROSE_DATA) { *q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT; *d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT; *m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT; *nr = (frame[2] >> 5) & 0x07; *ns = (frame[2] >> 1) & 0x07; return ROSE_DATA; } return ROSE_ILLEGAL; } static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len) { unsigned char *pt; unsigned char l, lg, n = 0; int fac_national_digis_received = 0; do { switch (*p & 0xC0) { case 0x00: if (len < 2) return -1; p += 2; n += 2; len -= 2; break; case 0x40: if (len < 3) return -1; if (*p == FAC_NATIONAL_RAND) facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); p += 3; n += 3; len -= 3; break; case 0x80: if (len < 4) return -1; p += 4; n += 4; len -= 4; break; case 0xC0: if (len < 2) return -1; l = p[1]; if (len < 2 + l) return -1; if (*p == FAC_NATIONAL_DEST_DIGI) { if (!fac_national_digis_received) { if (l < AX25_ADDR_LEN) return -1; memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); facilities->source_ndigis = 1; } } else if (*p == FAC_NATIONAL_SRC_DIGI) { if (!fac_national_digis_received) { if (l < AX25_ADDR_LEN) return -1; memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN); facilities->dest_ndigis = 1; } } else if (*p == FAC_NATIONAL_FAIL_CALL) { if (l < AX25_ADDR_LEN) return -1; memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN); } else if (*p == FAC_NATIONAL_FAIL_ADD) { if (l < 1 + ROSE_ADDR_LEN) return -1; memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN); } else if (*p == FAC_NATIONAL_DIGIS) { if (l % AX25_ADDR_LEN) return -1; fac_national_digis_received = 1; facilities->source_ndigis = 0; facilities->dest_ndigis = 0; for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { if (pt[6] & AX25_HBIT) { if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) return -1; memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); } else { if (facilities->source_ndigis >= ROSE_MAX_DIGIS) return -1; memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); } } } p += l + 2; n += l + 2; len -= l + 2; break; } } while (*p != 0x00 && len > 0); return n; } static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len) { unsigned char l, n = 0; char callsign[11]; do { switch (*p & 0xC0) { case 0x00: if (len < 2) return -1; p += 2; n += 2; len -= 2; break; case 0x40: if (len < 3) return -1; p += 3; n += 3; len -= 3; break; case 0x80: if (len < 4) return -1; p += 4; n += 4; len -= 4; break; case 0xC0: if (len < 2) return -1; l = p[1]; /* Prevent overflows*/ if (l < 10 || l > 20) return -1; if (*p == FAC_CCITT_DEST_NSAP) { memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); callsign[l - 10] = '\0'; asc2ax(&facilities->source_call, callsign); } if (*p == FAC_CCITT_SRC_NSAP) { memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); callsign[l - 10] = '\0'; asc2ax(&facilities->dest_call, callsign); } p += l + 2; n += l + 2; len -= l + 2; break; } } while (*p != 0x00 && len > 0); return n; } int rose_parse_facilities(unsigned char *p, unsigned packet_len, struct rose_facilities_struct *facilities) { int facilities_len, len; facilities_len = *p++; if (facilities_len == 0 || (unsigned int)facilities_len > packet_len) return 0; while (facilities_len >= 3 && *p == 0x00) { facilities_len--; p++; switch (*p) { case FAC_NATIONAL: /* National */ len = rose_parse_national(p + 1, facilities, facilities_len - 1); break; case FAC_CCITT: /* CCITT */ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); break; default: printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); len = 1; break; } if (len < 0) return 0; if (WARN_ON(len >= facilities_len)) return 0; facilities_len -= len + 1; p += len + 1; } return facilities_len == 0; } static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose) { unsigned char *p = buffer + 1; char *callsign; char buf[11]; int len, nb; /* National Facilities */ if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) { *p++ = 0x00; *p++ = FAC_NATIONAL; if (rose->rand != 0) { *p++ = FAC_NATIONAL_RAND; *p++ = (rose->rand >> 8) & 0xFF; *p++ = (rose->rand >> 0) & 0xFF; } /* Sent before older facilities */ if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) { int maxdigi = 0; *p++ = FAC_NATIONAL_DIGIS; *p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis); for (nb = 0 ; nb < rose->source_ndigis ; nb++) { if (++maxdigi >= ROSE_MAX_DIGIS) break; memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN); p[6] |= AX25_HBIT; p += AX25_ADDR_LEN; } for (nb = 0 ; nb < rose->dest_ndigis ; nb++) { if (++maxdigi >= ROSE_MAX_DIGIS) break; memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN); p[6] &= ~AX25_HBIT; p += AX25_ADDR_LEN; } } /* For compatibility */ if (rose->source_ndigis > 0) { *p++ = FAC_NATIONAL_SRC_DIGI; *p++ = AX25_ADDR_LEN; memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN); p += AX25_ADDR_LEN; } /* For compatibility */ if (rose->dest_ndigis > 0) { *p++ = FAC_NATIONAL_DEST_DIGI; *p++ = AX25_ADDR_LEN; memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN); p += AX25_ADDR_LEN; } } *p++ = 0x00; *p++ = FAC_CCITT; *p++ = FAC_CCITT_DEST_NSAP; callsign = ax2asc(buf, &rose->dest_call); *p++ = strlen(callsign) + 10; *p++ = (strlen(callsign) + 9) * 2; /* ??? */ *p++ = 0x47; *p++ = 0x00; *p++ = 0x11; *p++ = ROSE_ADDR_LEN * 2; memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN); p += ROSE_ADDR_LEN; memcpy(p, callsign, strlen(callsign)); p += strlen(callsign); *p++ = FAC_CCITT_SRC_NSAP; callsign = ax2asc(buf, &rose->source_call); *p++ = strlen(callsign) + 10; *p++ = (strlen(callsign) + 9) * 2; /* ??? */ *p++ = 0x47; *p++ = 0x00; *p++ = 0x11; *p++ = ROSE_ADDR_LEN * 2; memcpy(p, &rose->source_addr, ROSE_ADDR_LEN); p += ROSE_ADDR_LEN; memcpy(p, callsign, strlen(callsign)); p += strlen(callsign); len = p - buffer; buffer[0] = len - 1; return len; } void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic) { struct rose_sock *rose = rose_sk(sk); rose_stop_timer(sk); rose_stop_idletimer(sk); rose_clear_queues(sk); rose->lci = 0; rose->state = ROSE_STATE_0; if (cause != -1) rose->cause = cause; if (diagnostic != -1) rose->diagnostic = diagnostic; sk->sk_state = TCP_CLOSE; sk->sk_err = reason; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } }
1 1 1 1 1 1 2 4 2 2 1 2 2 2 2 1 2 2 2 2 2 2 1 1 1 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright Red Hat Inc. 2022 * * This file is part of the SCTP kernel implementation * * These functions manipulate sctp stream queue/scheduling. * * Please send any bug reports or fixes you make to the * email addresched(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Xin Long <lucien.xin@gmail.com> */ #include <linux/list.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/stream_sched.h> /* Fair Capacity and Weighted Fair Queueing handling * RFC 8260 section 3.5 and 3.6 */ static void sctp_sched_fc_unsched_all(struct sctp_stream *stream); static int sctp_sched_wfq_set(struct sctp_stream *stream, __u16 sid, __u16 weight, gfp_t gfp) { struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; if (!weight) return -EINVAL; soute->fc_weight = weight; return 0; } static int sctp_sched_wfq_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; *value = soute->fc_weight; return 0; } static int sctp_sched_fc_set(struct sctp_stream *stream, __u16 sid, __u16 weight, gfp_t gfp) { return 0; } static int sctp_sched_fc_get(struct sctp_stream *stream, __u16 sid, __u16 *value) { return 0; } static int sctp_sched_fc_init(struct sctp_stream *stream) { INIT_LIST_HEAD(&stream->fc_list); return 0; } static int sctp_sched_fc_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp) { struct sctp_stream_out_ext *soute = SCTP_SO(stream, sid)->ext; INIT_LIST_HEAD(&soute->fc_list); soute->fc_length = 0; soute->fc_weight = 1; return 0; } static void sctp_sched_fc_free_sid(struct sctp_stream *stream, __u16 sid) { } static void sctp_sched_fc_sched(struct sctp_stream *stream, struct sctp_stream_out_ext *soute) { struct sctp_stream_out_ext *pos; if (!list_empty(&soute->fc_list)) return; list_for_each_entry(pos, &stream->fc_list, fc_list) if ((__u64)pos->fc_length * soute->fc_weight >= (__u64)soute->fc_length * pos->fc_weight) break; list_add_tail(&soute->fc_list, &pos->fc_list); } static void sctp_sched_fc_enqueue(struct sctp_outq *q, struct sctp_datamsg *msg) { struct sctp_stream *stream; struct sctp_chunk *ch; __u16 sid; ch = list_first_entry(&msg->chunks, struct sctp_chunk, frag_list); sid = sctp_chunk_stream_no(ch); stream = &q->asoc->stream; sctp_sched_fc_sched(stream, SCTP_SO(stream, sid)->ext); } static struct sctp_chunk *sctp_sched_fc_dequeue(struct sctp_outq *q) { struct sctp_stream *stream = &q->asoc->stream; struct sctp_stream_out_ext *soute; struct sctp_chunk *ch; /* Bail out quickly if queue is empty */ if (list_empty(&q->out_chunk_list)) return NULL; /* Find which chunk is next */ if (stream->out_curr) soute = stream->out_curr->ext; else soute = list_entry(stream->fc_list.next, struct sctp_stream_out_ext, fc_list); ch = list_entry(soute->outq.next, struct sctp_chunk, stream_list); sctp_sched_dequeue_common(q, ch); return ch; } static void sctp_sched_fc_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) { struct sctp_stream *stream = &q->asoc->stream; struct sctp_stream_out_ext *soute, *pos; __u16 sid, i; sid = sctp_chunk_stream_no(ch); soute = SCTP_SO(stream, sid)->ext; /* reduce all fc_lengths by U32_MAX / 4 if the current fc_length overflows. */ if (soute->fc_length > U32_MAX - ch->skb->len) { for (i = 0; i < stream->outcnt; i++) { pos = SCTP_SO(stream, i)->ext; if (!pos) continue; if (pos->fc_length <= (U32_MAX >> 2)) { pos->fc_length = 0; continue; } pos->fc_length -= (U32_MAX >> 2); } } soute->fc_length += ch->skb->len; if (list_empty(&soute->outq)) { list_del_init(&soute->fc_list); return; } pos = soute; list_for_each_entry_continue(pos, &stream->fc_list, fc_list) if ((__u64)pos->fc_length * soute->fc_weight >= (__u64)soute->fc_length * pos->fc_weight) break; list_move_tail(&soute->fc_list, &pos->fc_list); } static void sctp_sched_fc_sched_all(struct sctp_stream *stream) { struct sctp_association *asoc; struct sctp_chunk *ch; asoc = container_of(stream, struct sctp_association, stream); list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) { __u16 sid = sctp_chunk_stream_no(ch); if (SCTP_SO(stream, sid)->ext) sctp_sched_fc_sched(stream, SCTP_SO(stream, sid)->ext); } } static void sctp_sched_fc_unsched_all(struct sctp_stream *stream) { struct sctp_stream_out_ext *soute, *tmp; list_for_each_entry_safe(soute, tmp, &stream->fc_list, fc_list) list_del_init(&soute->fc_list); } static const struct sctp_sched_ops sctp_sched_fc = { .set = sctp_sched_fc_set, .get = sctp_sched_fc_get, .init = sctp_sched_fc_init, .init_sid = sctp_sched_fc_init_sid, .free_sid = sctp_sched_fc_free_sid, .enqueue = sctp_sched_fc_enqueue, .dequeue = sctp_sched_fc_dequeue, .dequeue_done = sctp_sched_fc_dequeue_done, .sched_all = sctp_sched_fc_sched_all, .unsched_all = sctp_sched_fc_unsched_all, }; void sctp_sched_ops_fc_init(void) { sctp_sched_ops_register(SCTP_SS_FC, &sctp_sched_fc); } static const struct sctp_sched_ops sctp_sched_wfq = { .set = sctp_sched_wfq_set, .get = sctp_sched_wfq_get, .init = sctp_sched_fc_init, .init_sid = sctp_sched_fc_init_sid, .free_sid = sctp_sched_fc_free_sid, .enqueue = sctp_sched_fc_enqueue, .dequeue = sctp_sched_fc_dequeue, .dequeue_done = sctp_sched_fc_dequeue_done, .sched_all = sctp_sched_fc_sched_all, .unsched_all = sctp_sched_fc_unsched_all, }; void sctp_sched_ops_wfq_init(void) { sctp_sched_ops_register(SCTP_SS_WFQ, &sctp_sched_wfq); }
15 16 29 24 4 21 20 21 21 8 21 29 2 1 1 1 1 2 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 /* * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/in.h> #include <linux/ipv6.h> #include "rds.h" #include "loop.h" static char * const rds_trans_modules[] = { [RDS_TRANS_IB] = "rds_rdma", [RDS_TRANS_GAP] = NULL, [RDS_TRANS_TCP] = "rds_tcp", }; static struct rds_transport *transports[RDS_TRANS_COUNT]; static DECLARE_RWSEM(rds_trans_sem); void rds_trans_register(struct rds_transport *trans) { BUG_ON(strlen(trans->t_name) + 1 > TRANSNAMSIZ); down_write(&rds_trans_sem); if (transports[trans->t_type]) printk(KERN_ERR "RDS Transport type %d already registered\n", trans->t_type); else { transports[trans->t_type] = trans; printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name); } up_write(&rds_trans_sem); } EXPORT_SYMBOL_GPL(rds_trans_register); void rds_trans_unregister(struct rds_transport *trans) { down_write(&rds_trans_sem); transports[trans->t_type] = NULL; printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name); up_write(&rds_trans_sem); } EXPORT_SYMBOL_GPL(rds_trans_unregister); void rds_trans_put(struct rds_transport *trans) { if (trans) module_put(trans->t_owner); } struct rds_transport *rds_trans_get_preferred(struct net *net, const struct in6_addr *addr, __u32 scope_id) { struct rds_transport *ret = NULL; struct rds_transport *trans; unsigned int i; if (ipv6_addr_v4mapped(addr)) { if (*(u_int8_t *)&addr->s6_addr32[3] == IN_LOOPBACKNET) return &rds_loop_transport; } else if (ipv6_addr_loopback(addr)) { return &rds_loop_transport; } down_read(&rds_trans_sem); for (i = 0; i < RDS_TRANS_COUNT; i++) { trans = transports[i]; if (trans && (trans->laddr_check(net, addr, scope_id) == 0) && (!trans->t_owner || try_module_get(trans->t_owner))) { ret = trans; break; } } up_read(&rds_trans_sem); return ret; } struct rds_transport *rds_trans_get(int t_type) { struct rds_transport *ret = NULL; struct rds_transport *trans; down_read(&rds_trans_sem); trans = transports[t_type]; if (!trans) { up_read(&rds_trans_sem); if (rds_trans_modules[t_type]) request_module(rds_trans_modules[t_type]); down_read(&rds_trans_sem); trans = transports[t_type]; } if (trans && trans->t_type == t_type && (!trans->t_owner || try_module_get(trans->t_owner))) ret = trans; up_read(&rds_trans_sem); return ret; } /* * This returns the number of stats entries in the snapshot and only * copies them using the iter if there is enough space for them. The * caller passes in the global stats so that we can size and copy while * holding the lock. */ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail) { struct rds_transport *trans; unsigned int total = 0; unsigned int part; int i; rds_info_iter_unmap(iter); down_read(&rds_trans_sem); for (i = 0; i < RDS_TRANS_COUNT; i++) { trans = transports[i]; if (!trans || !trans->stats_info_copy) continue; part = trans->stats_info_copy(iter, avail); avail -= min(avail, part); total += part; } up_read(&rds_trans_sem); return total; }
5 2 1 1 5 3 3 3 2 2 2 2 3 2 2 3 3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/hpfs/super.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * mounting, unmounting, error handling */ #include "hpfs_fn.h" #include <linux/module.h> #include <linux/fs_struct.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/init.h> #include <linux/statfs.h> #include <linux/magic.h> #include <linux/sched.h> #include <linux/bitmap.h> #include <linux/slab.h> #include <linux/seq_file.h> /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ static void mark_dirty(struct super_block *s, int remount) { if (hpfs_sb(s)->sb_chkdsk && (remount || !sb_rdonly(s))) { struct buffer_head *bh; struct hpfs_spare_block *sb; if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = 1; sb->old_wrote = 0; mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } } } /* Mark the filesystem clean (mark it dirty for chkdsk if chkdsk==2 or if there were errors) */ static void unmark_dirty(struct super_block *s) { struct buffer_head *bh; struct hpfs_spare_block *sb; if (sb_rdonly(s)) return; sync_blockdev(s->s_bdev); if ((sb = hpfs_map_sector(s, 17, &bh, 0))) { sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error; sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error; mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); } } /* Filesystem error... */ void hpfs_error(struct super_block *s, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("filesystem error: %pV", &vaf); va_end(args); if (!hpfs_sb(s)->sb_was_error) { if (hpfs_sb(s)->sb_err == 2) { pr_cont("; crashing the system because you wanted it\n"); mark_dirty(s, 0); panic("HPFS panic"); } else if (hpfs_sb(s)->sb_err == 1) { if (sb_rdonly(s)) pr_cont("; already mounted read-only\n"); else { pr_cont("; remounting read-only\n"); mark_dirty(s, 0); s->s_flags |= SB_RDONLY; } } else if (sb_rdonly(s)) pr_cont("; going on - but anything won't be destroyed because it's read-only\n"); else pr_cont("; corrupted filesystem mounted read/write - your computer will explode within 20 seconds ... but you wanted it so!\n"); } else pr_cont("\n"); hpfs_sb(s)->sb_was_error = 1; } /* * A little trick to detect cycles in many hpfs structures and don't let the * kernel crash on corrupted filesystem. When first called, set c2 to 0. * * BTW. chkdsk doesn't detect cycles correctly. When I had 2 lost directories * nested each in other, chkdsk locked up happilly. */ int hpfs_stop_cycles(struct super_block *s, int key, int *c1, int *c2, char *msg) { if (*c2 && *c1 == key) { hpfs_error(s, "cycle detected on key %08x in %s", key, msg); return 1; } (*c2)++; if (!((*c2 - 1) & *c2)) *c1 = key; return 0; } static void free_sbi(struct hpfs_sb_info *sbi) { kfree(sbi->sb_cp_table); kfree(sbi->sb_bmp_dir); kfree(sbi); } static void lazy_free_sbi(struct rcu_head *rcu) { free_sbi(container_of(rcu, struct hpfs_sb_info, rcu)); } static void hpfs_put_super(struct super_block *s) { hpfs_lock(s); unmark_dirty(s); hpfs_unlock(s); call_rcu(&hpfs_sb(s)->rcu, lazy_free_sbi); } static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno) { struct quad_buffer_head qbh; unsigned long *bits; unsigned count; bits = hpfs_map_4sectors(s, secno, &qbh, 0); if (!bits) return (unsigned)-1; count = bitmap_weight(bits, 2048 * BITS_PER_BYTE); hpfs_brelse4(&qbh); return count; } static unsigned count_bitmaps(struct super_block *s) { unsigned n, count, n_bands; n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; count = 0; for (n = 0; n < COUNT_RD_AHEAD; n++) { hpfs_prefetch_bitmap(s, n); } for (n = 0; n < n_bands; n++) { unsigned c; hpfs_prefetch_bitmap(s, n + COUNT_RD_AHEAD); c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n])); if (c != (unsigned)-1) count += c; } return count; } unsigned hpfs_get_free_dnodes(struct super_block *s) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes == (unsigned)-1) { unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap); if (c == (unsigned)-1) return 0; sbi->sb_n_free_dnodes = c; } return sbi->sb_n_free_dnodes; } static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *s = dentry->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(s); u64 id = huge_encode_dev(s->s_bdev->bd_dev); hpfs_lock(s); if (sbi->sb_n_free == (unsigned)-1) sbi->sb_n_free = count_bitmaps(s); buf->f_type = s->s_magic; buf->f_bsize = 512; buf->f_blocks = sbi->sb_fs_size; buf->f_bfree = sbi->sb_n_free; buf->f_bavail = sbi->sb_n_free; buf->f_files = sbi->sb_dirband_size / 4; buf->f_ffree = hpfs_get_free_dnodes(s); buf->f_fsid = u64_to_fsid(id); buf->f_namelen = 254; hpfs_unlock(s); return 0; } long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg) { switch (cmd) { case FITRIM: { struct fstrim_range range; secno n_trimmed; int r; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed); if (r) return r; range.len = (u64)n_trimmed << 9; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } default: { return -ENOIOCTLCMD; } } } static struct kmem_cache * hpfs_inode_cachep; static struct inode *hpfs_alloc_inode(struct super_block *sb) { struct hpfs_inode_info *ei; ei = alloc_inode_sb(sb, hpfs_inode_cachep, GFP_NOFS); if (!ei) return NULL; return &ei->vfs_inode; } static void hpfs_free_inode(struct inode *inode) { kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); } static void init_once(void *foo) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache", sizeof(struct hpfs_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), init_once); if (hpfs_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(hpfs_inode_cachep); } enum { Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case, Opt_check, Opt_err, Opt_eas, Opt_chkdsk, Opt_timeshift, }; static const struct constant_table hpfs_param_case[] = { {"asis", 0}, {"lower", 1}, {} }; static const struct constant_table hpfs_param_check[] = { {"none", 0}, {"normal", 1}, {"strict", 2}, {} }; static const struct constant_table hpfs_param_err[] = { {"continue", 0}, {"remount-ro", 1}, {"panic", 2}, {} }; static const struct constant_table hpfs_param_eas[] = { {"no", 0}, {"ro", 1}, {"rw", 2}, {} }; static const struct constant_table hpfs_param_chkdsk[] = { {"no", 0}, {"errors", 1}, {"always", 2}, {} }; static const struct fs_parameter_spec hpfs_param_spec[] = { fsparam_flag ("help", Opt_help), fsparam_uid ("uid", Opt_uid), fsparam_gid ("gid", Opt_gid), fsparam_u32oct ("umask", Opt_umask), fsparam_enum ("case", Opt_case, hpfs_param_case), fsparam_enum ("check", Opt_check, hpfs_param_check), fsparam_enum ("errors", Opt_err, hpfs_param_err), fsparam_enum ("eas", Opt_eas, hpfs_param_eas), fsparam_enum ("chkdsk", Opt_chkdsk, hpfs_param_chkdsk), fsparam_s32 ("timeshift", Opt_timeshift), {} }; struct hpfs_fc_context { kuid_t uid; kgid_t gid; umode_t umask; int lowercase; int eas; int chk; int errs; int chkdsk; int timeshift; }; static inline void hpfs_help(void) { pr_info("\n\ HPFS filesystem options:\n\ help do not mount and display this text\n\ uid=xxx set uid of files that don't have uid specified in eas\n\ gid=xxx set gid of files that don't have gid specified in eas\n\ umask=xxx set mode of files that don't have mode specified in eas\n\ case=lower lowercase all files\n\ case=asis do not lowercase files (default)\n\ check=none no fs checks - kernel may crash on corrupted filesystem\n\ check=normal do some checks - it should not crash (default)\n\ check=strict do extra time-consuming checks, used for debugging\n\ errors=continue continue on errors\n\ errors=remount-ro remount read-only if errors found (default)\n\ errors=panic panic on errors\n\ chkdsk=no do not mark fs for chkdsking even if there were errors\n\ chkdsk=errors mark fs dirty if errors found (default)\n\ chkdsk=always always mark fs dirty - used for debugging\n\ eas=no ignore extended attributes\n\ eas=ro read but do not write extended attributes\n\ eas=rw r/w eas => enables chmod, chown, mknod, ln -s (default)\n\ timeshift=nnn add nnn seconds to file times\n\ \n"); } static int hpfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct hpfs_fc_context *ctx = fc->fs_private; struct fs_parse_result result; int opt; opt = fs_parse(fc, hpfs_param_spec, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_help: hpfs_help(); return -EINVAL; case Opt_uid: ctx->uid = result.uid; break; case Opt_gid: ctx->gid = result.gid; break; case Opt_umask: ctx->umask = result.uint_32; break; case Opt_case: ctx->lowercase = result.uint_32; break; case Opt_check: ctx->chk = result.uint_32; break; case Opt_err: ctx->errs = result.uint_32; break; case Opt_eas: ctx->eas = result.uint_32; break; case Opt_chkdsk: ctx->chkdsk = result.uint_32; break; case Opt_timeshift: { char *rhs = param->string; int timeshift; if (kstrtoint(rhs, 0, &timeshift)) return -EINVAL; ctx->timeshift = timeshift; break; } default: return -EINVAL; } return 0; } static int hpfs_reconfigure(struct fs_context *fc) { struct hpfs_fc_context *ctx = fc->fs_private; struct super_block *s = fc->root->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(s); sync_filesystem(s); fc->sb_flags |= SB_NOATIME; hpfs_lock(s); if (ctx->timeshift != sbi->sb_timeshift) { pr_err("timeshift can't be changed using remount.\n"); goto out_err; } unmark_dirty(s); sbi->sb_uid = ctx->uid; sbi->sb_gid = ctx->gid; sbi->sb_mode = 0777 & ~ctx->umask; sbi->sb_lowercase = ctx->lowercase; sbi->sb_eas = ctx->eas; sbi->sb_chk = ctx->chk; sbi->sb_chkdsk = ctx->chkdsk; sbi->sb_err = ctx->errs; sbi->sb_timeshift = ctx->timeshift; if (!(fc->sb_flags & SB_RDONLY)) mark_dirty(s, 1); hpfs_unlock(s); return 0; out_err: hpfs_unlock(s); return -EINVAL; } static int hpfs_show_options(struct seq_file *seq, struct dentry *root) { struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb); seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid)); seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid)); seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777)); if (sbi->sb_lowercase) seq_printf(seq, ",case=lower"); if (!sbi->sb_chk) seq_printf(seq, ",check=none"); if (sbi->sb_chk == 2) seq_printf(seq, ",check=strict"); if (!sbi->sb_err) seq_printf(seq, ",errors=continue"); if (sbi->sb_err == 2) seq_printf(seq, ",errors=panic"); if (!sbi->sb_chkdsk) seq_printf(seq, ",chkdsk=no"); if (sbi->sb_chkdsk == 2) seq_printf(seq, ",chkdsk=always"); if (!sbi->sb_eas) seq_printf(seq, ",eas=no"); if (sbi->sb_eas == 1) seq_printf(seq, ",eas=ro"); if (sbi->sb_timeshift) seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift); return 0; } /* Super operations */ static const struct super_operations hpfs_sops = { .alloc_inode = hpfs_alloc_inode, .free_inode = hpfs_free_inode, .evict_inode = hpfs_evict_inode, .put_super = hpfs_put_super, .statfs = hpfs_statfs, .show_options = hpfs_show_options, }; static int hpfs_fill_super(struct super_block *s, struct fs_context *fc) { struct hpfs_fc_context *ctx = fc->fs_private; struct buffer_head *bh0, *bh1, *bh2; struct hpfs_boot_block *bootblock; struct hpfs_super_block *superblock; struct hpfs_spare_block *spareblock; struct hpfs_sb_info *sbi; struct inode *root; int silent = fc->sb_flags & SB_SILENT; dnode_secno root_dno; struct hpfs_dirent *de = NULL; struct quad_buffer_head qbh; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) { return -ENOMEM; } s->s_fs_info = sbi; mutex_init(&sbi->hpfs_mutex); hpfs_lock(s); /*sbi->sb_mounting = 1;*/ sb_set_blocksize(s, 512); sbi->sb_fs_size = -1; if (!(bootblock = hpfs_map_sector(s, 0, &bh0, 0))) goto bail1; if (!(superblock = hpfs_map_sector(s, 16, &bh1, 1))) goto bail2; if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3; /* Check magics */ if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC || le32_to_cpu(spareblock->magic) != SP_MAGIC) { if (!silent) pr_err("Bad magic ... probably not HPFS\n"); goto bail4; } /* Check version */ if (!sb_rdonly(s) && superblock->funcversion != 2 && superblock->funcversion != 3) { pr_err("Bad version %d,%d. Mount readonly to go around\n", (int)superblock->version, (int)superblock->funcversion); pr_err("please try recent version of HPFS driver at http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi and if it still can't understand this format, contact author - mikulas@artax.karlin.mff.cuni.cz\n"); goto bail4; } s->s_flags |= SB_NOATIME; /* Fill superblock stuff */ s->s_magic = HPFS_SUPER_MAGIC; s->s_op = &hpfs_sops; set_default_d_op(s, &hpfs_dentry_operations); s->s_time_min = local_to_gmt(s, 0); s->s_time_max = local_to_gmt(s, U32_MAX); sbi->sb_root = le32_to_cpu(superblock->root); sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors); sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps); sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start); sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band); sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap); sbi->sb_uid = ctx->uid; sbi->sb_gid = ctx->gid; sbi->sb_mode = 0777 & ~ctx->umask; sbi->sb_n_free = -1; sbi->sb_n_free_dnodes = -1; sbi->sb_lowercase = ctx->lowercase; sbi->sb_eas = ctx->eas; sbi->sb_chk = ctx->chk; sbi->sb_chkdsk = ctx->chkdsk; sbi->sb_err = ctx->errs; sbi->sb_timeshift = ctx->timeshift; sbi->sb_was_error = 0; sbi->sb_cp_table = NULL; sbi->sb_c_bitmap = -1; sbi->sb_max_fwd_alloc = 0xffffff; if (sbi->sb_fs_size >= 0x80000000) { hpfs_error(s, "invalid size in superblock: %08x", (unsigned)sbi->sb_fs_size); goto bail4; } if (spareblock->n_spares_used) hpfs_load_hotfix_map(s, spareblock); /* Load bitmap directory */ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) goto bail4; /* Check for general fs errors*/ if (spareblock->dirty && !spareblock->old_wrote) { if (sbi->sb_err == 2) { pr_err("Improperly stopped, not mounted\n"); goto bail4; } hpfs_error(s, "improperly stopped"); } if (!sb_rdonly(s)) { spareblock->dirty = 1; spareblock->old_wrote = 0; mark_buffer_dirty(bh2); } if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) { if (sbi->sb_err >= 2) { pr_err("Spare dnodes used, try chkdsk\n"); mark_dirty(s, 0); goto bail4; } hpfs_error(s, "warning: spare dnodes used, try chkdsk"); if (sbi->sb_err == 0) pr_err("Proceeding, but your filesystem could be corrupted if you delete files or directories\n"); } if (sbi->sb_chk) { unsigned a; if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) || le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) { hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x", le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band)); goto bail4; } a = sbi->sb_dirband_size; sbi->sb_dirband_size = 0; if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") || hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") || hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) { mark_dirty(s, 0); goto bail4; } sbi->sb_dirband_size = a; } else pr_err("You really don't want any checks? You are crazy...\n"); /* Load code page table */ if (le32_to_cpu(spareblock->n_code_pages)) if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir)))) pr_err("code page support is disabled\n"); brelse(bh2); brelse(bh1); brelse(bh0); root = iget_locked(s, sbi->sb_root); if (!root) goto bail0; hpfs_init_inode(root); hpfs_read_inode(root); unlock_new_inode(root); s->s_root = d_make_root(root); if (!s->s_root) goto bail0; /* * find the root directory's . pointer & finish filling in the inode */ root_dno = hpfs_fnode_dno(s, sbi->sb_root); if (root_dno) de = map_dirent(root, root_dno, "\001\001", 2, NULL, &qbh); if (!de) hpfs_error(s, "unable to find root dir"); else { inode_set_atime(root, local_to_gmt(s, le32_to_cpu(de->read_date)), 0); inode_set_mtime(root, local_to_gmt(s, le32_to_cpu(de->write_date)), 0); inode_set_ctime(root, local_to_gmt(s, le32_to_cpu(de->creation_date)), 0); hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size); hpfs_i(root)->i_parent_dir = root->i_ino; if (root->i_size == -1) root->i_size = 2048; if (root->i_blocks == -1) root->i_blocks = 5; hpfs_brelse4(&qbh); } hpfs_unlock(s); return 0; bail4: brelse(bh2); bail3: brelse(bh1); bail2: brelse(bh0); bail1: bail0: hpfs_unlock(s); free_sbi(sbi); return -EINVAL; } static int hpfs_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, hpfs_fill_super); } static void hpfs_free_fc(struct fs_context *fc) { kfree(fc->fs_private); } static const struct fs_context_operations hpfs_fc_context_ops = { .parse_param = hpfs_parse_param, .get_tree = hpfs_get_tree, .reconfigure = hpfs_reconfigure, .free = hpfs_free_fc, }; static int hpfs_init_fs_context(struct fs_context *fc) { struct hpfs_fc_context *ctx; ctx = kzalloc(sizeof(struct hpfs_fc_context), GFP_KERNEL); if (!ctx) return -ENOMEM; if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { struct super_block *sb = fc->root->d_sb; struct hpfs_sb_info *sbi = hpfs_sb(sb); ctx->uid = sbi->sb_uid; ctx->gid = sbi->sb_gid; ctx->umask = 0777 & ~sbi->sb_mode; ctx->lowercase = sbi->sb_lowercase; ctx->eas = sbi->sb_eas; ctx->chk = sbi->sb_chk; ctx->chkdsk = sbi->sb_chkdsk; ctx->errs = sbi->sb_err; ctx->timeshift = sbi->sb_timeshift; } else { ctx->uid = current_uid(); ctx->gid = current_gid(); ctx->umask = current_umask(); ctx->lowercase = 0; ctx->eas = 2; ctx->chk = 1; ctx->errs = 1; ctx->chkdsk = 1; ctx->timeshift = 0; } fc->fs_private = ctx; fc->ops = &hpfs_fc_context_ops; return 0; }; static struct file_system_type hpfs_fs_type = { .owner = THIS_MODULE, .name = "hpfs", .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, .init_fs_context = hpfs_init_fs_context, .parameters = hpfs_param_spec, }; MODULE_ALIAS_FS("hpfs"); static int __init init_hpfs_fs(void) { int err = init_inodecache(); if (err) goto out1; err = register_filesystem(&hpfs_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_hpfs_fs(void) { unregister_filesystem(&hpfs_fs_type); destroy_inodecache(); } module_init(init_hpfs_fs) module_exit(exit_hpfs_fs) MODULE_DESCRIPTION("OS/2 HPFS file system support"); MODULE_LICENSE("GPL");
68 3 3 1 1 1 1 1 1 1 3 68 20 17 15 15 14 15 15 10 10 9 9 20 9 3 20 9 2 11 11 11 11 11 1 1 1 1 11 8 9 8 9 9 9 2 20 20 20 19 3 17 17 11 7 16 16 8 2 11 11 4 11 19 20 11 11 10 11 22 12 12 12 12 12 12 12 12 10 10 2 2 51 51 4 2 4 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 // SPDX-License-Identifier: GPL-2.0-only /* * Kernel-based Virtual Machine driver for Linux * * This module enables kernel and guest-mode vCPU access to guest physical * memory with suitable invalidation mechanisms. * * Copyright © 2021 Amazon.com, Inc. or its affiliates. * * Authors: * David Woodhouse <dwmw2@infradead.org> */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/highmem.h> #include <linux/module.h> #include <linux/errno.h> #include "kvm_mm.h" /* * MMU notifier 'invalidate_range_start' hook. */ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, unsigned long end) { struct gfn_to_pfn_cache *gpc; spin_lock(&kvm->gpc_lock); list_for_each_entry(gpc, &kvm->gpc_list, list) { read_lock_irq(&gpc->lock); /* Only a single page so no need to care about length */ if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && gpc->uhva >= start && gpc->uhva < end) { read_unlock_irq(&gpc->lock); /* * There is a small window here where the cache could * be modified, and invalidation would no longer be * necessary. Hence check again whether invalidation * is still necessary once the write lock has been * acquired. */ write_lock_irq(&gpc->lock); if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && gpc->uhva >= start && gpc->uhva < end) gpc->valid = false; write_unlock_irq(&gpc->lock); continue; } read_unlock_irq(&gpc->lock); } spin_unlock(&kvm->gpc_lock); } static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva, unsigned long len) { unsigned long offset = kvm_is_error_gpa(gpa) ? offset_in_page(uhva) : offset_in_page(gpa); /* * The cached access must fit within a single page. The 'len' argument * to activate() and refresh() exists only to enforce that. */ return offset + len <= PAGE_SIZE; } bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); if (!gpc->active) return false; /* * If the page was cached from a memslot, make sure the memslots have * not been re-configured. */ if (!kvm_is_error_gpa(gpc->gpa) && gpc->generation != slots->generation) return false; if (kvm_is_error_hva(gpc->uhva)) return false; if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) return false; if (!gpc->valid) return false; return true; } static void *gpc_map(kvm_pfn_t pfn) { if (pfn_valid(pfn)) return kmap(pfn_to_page(pfn)); #ifdef CONFIG_HAS_IOMEM return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); #else return NULL; #endif } static void gpc_unmap(kvm_pfn_t pfn, void *khva) { /* Unmap the old pfn/page if it was mapped before. */ if (is_error_noslot_pfn(pfn) || !khva) return; if (pfn_valid(pfn)) { kunmap(pfn_to_page(pfn)); return; } #ifdef CONFIG_HAS_IOMEM memunmap(khva); #endif } static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_seq) { /* * mn_active_invalidate_count acts for all intents and purposes * like mmu_invalidate_in_progress here; but the latter cannot * be used here because the invalidation of caches in the * mmu_notifier event occurs _before_ mmu_invalidate_in_progress * is elevated. * * Note, it does not matter that mn_active_invalidate_count * is not protected by gpc->lock. It is guaranteed to * be elevated before the mmu_notifier acquires gpc->lock, and * isn't dropped until after mmu_invalidate_seq is updated. */ if (kvm->mn_active_invalidate_count) return true; /* * Ensure mn_active_invalidate_count is read before * mmu_invalidate_seq. This pairs with the smp_wmb() in * mmu_notifier_invalidate_range_end() to guarantee either the * old (non-zero) value of mn_active_invalidate_count or the * new (incremented) value of mmu_invalidate_seq is observed. */ smp_rmb(); return kvm->mmu_invalidate_seq != mmu_seq; } static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) { /* Note, the new page offset may be different than the old! */ void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT; void *new_khva = NULL; unsigned long mmu_seq; struct page *page; struct kvm_follow_pfn kfp = { .slot = gpc->memslot, .gfn = gpa_to_gfn(gpc->gpa), .flags = FOLL_WRITE, .hva = gpc->uhva, .refcounted_page = &page, }; lockdep_assert_held(&gpc->refresh_lock); lockdep_assert_held_write(&gpc->lock); /* * Invalidate the cache prior to dropping gpc->lock, the gpa=>uhva * assets have already been updated and so a concurrent check() from a * different task may not fail the gpa/uhva/generation checks. */ gpc->valid = false; do { mmu_seq = gpc->kvm->mmu_invalidate_seq; smp_rmb(); write_unlock_irq(&gpc->lock); /* * If the previous iteration "failed" due to an mmu_notifier * event, release the pfn and unmap the kernel virtual address * from the previous attempt. Unmapping might sleep, so this * needs to be done after dropping the lock. Opportunistically * check for resched while the lock isn't held. */ if (new_pfn != KVM_PFN_ERR_FAULT) { /* * Keep the mapping if the previous iteration reused * the existing mapping and didn't create a new one. */ if (new_khva != old_khva) gpc_unmap(new_pfn, new_khva); kvm_release_page_unused(page); cond_resched(); } new_pfn = hva_to_pfn(&kfp); if (is_error_noslot_pfn(new_pfn)) goto out_error; /* * Obtain a new kernel mapping if KVM itself will access the * pfn. Note, kmap() and memremap() can both sleep, so this * too must be done outside of gpc->lock! */ if (new_pfn == gpc->pfn) new_khva = old_khva; else new_khva = gpc_map(new_pfn); if (!new_khva) { kvm_release_page_unused(page); goto out_error; } write_lock_irq(&gpc->lock); /* * Other tasks must wait for _this_ refresh to complete before * attempting to refresh. */ WARN_ON_ONCE(gpc->valid); } while (mmu_notifier_retry_cache(gpc->kvm, mmu_seq)); gpc->valid = true; gpc->pfn = new_pfn; gpc->khva = new_khva + offset_in_page(gpc->uhva); /* * Put the reference to the _new_ page. The page is now tracked by the * cache and can be safely migrated, swapped, etc... as the cache will * invalidate any mappings in response to relevant mmu_notifier events. */ kvm_release_page_clean(page); return 0; out_error: write_lock_irq(&gpc->lock); return -EFAULT; } static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva) { unsigned long page_offset; bool unmap_old = false; unsigned long old_uhva; kvm_pfn_t old_pfn; bool hva_change = false; void *old_khva; int ret; /* Either gpa or uhva must be valid, but not both */ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa) == kvm_is_error_hva(uhva))) return -EINVAL; lockdep_assert_held(&gpc->refresh_lock); write_lock_irq(&gpc->lock); if (!gpc->active) { ret = -EINVAL; goto out_unlock; } old_pfn = gpc->pfn; old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); old_uhva = PAGE_ALIGN_DOWN(gpc->uhva); if (kvm_is_error_gpa(gpa)) { page_offset = offset_in_page(uhva); gpc->gpa = INVALID_GPA; gpc->memslot = NULL; gpc->uhva = PAGE_ALIGN_DOWN(uhva); if (gpc->uhva != old_uhva) hva_change = true; } else { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); page_offset = offset_in_page(gpa); if (gpc->gpa != gpa || gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva)) { gfn_t gfn = gpa_to_gfn(gpa); gpc->gpa = gpa; gpc->generation = slots->generation; gpc->memslot = __gfn_to_memslot(slots, gfn); gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); if (kvm_is_error_hva(gpc->uhva)) { ret = -EFAULT; goto out; } /* * Even if the GPA and/or the memslot generation changed, the * HVA may still be the same. */ if (gpc->uhva != old_uhva) hva_change = true; } else { gpc->uhva = old_uhva; } } /* Note: the offset must be correct before calling hva_to_pfn_retry() */ gpc->uhva += page_offset; /* * If the userspace HVA changed or the PFN was already invalid, * drop the lock and do the HVA to PFN lookup again. */ if (!gpc->valid || hva_change) { ret = hva_to_pfn_retry(gpc); } else { /* * If the HVA→PFN mapping was already valid, don't unmap it. * But do update gpc->khva because the offset within the page * may have changed. */ gpc->khva = old_khva + page_offset; ret = 0; goto out_unlock; } out: /* * Invalidate the cache and purge the pfn/khva if the refresh failed. * Some/all of the uhva, gpa, and memslot generation info may still be * valid, leave it as is. */ if (ret) { gpc->valid = false; gpc->pfn = KVM_PFN_ERR_FAULT; gpc->khva = NULL; } /* Detect a pfn change before dropping the lock! */ unmap_old = (old_pfn != gpc->pfn); out_unlock: write_unlock_irq(&gpc->lock); if (unmap_old) gpc_unmap(old_pfn, old_khva); return ret; } int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { unsigned long uhva; guard(mutex)(&gpc->refresh_lock); if (!kvm_gpc_is_valid_len(gpc->gpa, gpc->uhva, len)) return -EINVAL; /* * If the GPA is valid then ignore the HVA, as a cache can be GPA-based * or HVA-based, not both. For GPA-based caches, the HVA will be * recomputed during refresh if necessary. */ uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; return __kvm_gpc_refresh(gpc, gpc->gpa, uhva); } void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) { rwlock_init(&gpc->lock); mutex_init(&gpc->refresh_lock); gpc->kvm = kvm; gpc->pfn = KVM_PFN_ERR_FAULT; gpc->gpa = INVALID_GPA; gpc->uhva = KVM_HVA_ERR_BAD; gpc->active = gpc->valid = false; } static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, unsigned long len) { struct kvm *kvm = gpc->kvm; if (!kvm_gpc_is_valid_len(gpa, uhva, len)) return -EINVAL; guard(mutex)(&gpc->refresh_lock); if (!gpc->active) { if (KVM_BUG_ON(gpc->valid, kvm)) return -EIO; spin_lock(&kvm->gpc_lock); list_add(&gpc->list, &kvm->gpc_list); spin_unlock(&kvm->gpc_lock); /* * Activate the cache after adding it to the list, a concurrent * refresh must not establish a mapping until the cache is * reachable by mmu_notifier events. */ write_lock_irq(&gpc->lock); gpc->active = true; write_unlock_irq(&gpc->lock); } return __kvm_gpc_refresh(gpc, gpa, uhva); } int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) { /* * Explicitly disallow INVALID_GPA so that the magic value can be used * by KVM to differentiate between GPA-based and HVA-based caches. */ if (WARN_ON_ONCE(kvm_is_error_gpa(gpa))) return -EINVAL; return __kvm_gpc_activate(gpc, gpa, KVM_HVA_ERR_BAD, len); } int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long uhva, unsigned long len) { if (!access_ok((void __user *)uhva, len)) return -EINVAL; return __kvm_gpc_activate(gpc, INVALID_GPA, uhva, len); } void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) { struct kvm *kvm = gpc->kvm; kvm_pfn_t old_pfn; void *old_khva; guard(mutex)(&gpc->refresh_lock); if (gpc->active) { /* * Deactivate the cache before removing it from the list, KVM * must stall mmu_notifier events until all users go away, i.e. * until gpc->lock is dropped and refresh is guaranteed to fail. */ write_lock_irq(&gpc->lock); gpc->active = false; gpc->valid = false; /* * Leave the GPA => uHVA cache intact, it's protected by the * memslot generation. The PFN lookup needs to be redone every * time as mmu_notifier protection is lost when the cache is * removed from the VM's gpc_list. */ old_khva = gpc->khva - offset_in_page(gpc->khva); gpc->khva = NULL; old_pfn = gpc->pfn; gpc->pfn = KVM_PFN_ERR_FAULT; write_unlock_irq(&gpc->lock); spin_lock(&kvm->gpc_lock); list_del(&gpc->list); spin_unlock(&kvm->gpc_lock); gpc_unmap(old_pfn, old_khva); } }
32 42 611 610 609 606 662 7 40 34 42 115 563 622 6 617 615 613 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 /* SPDX-License-Identifier: GPL-2.0 */ /* * A security context is a set of security attributes * associated with each subject and object controlled * by the security policy. Security contexts are * externally represented as variable-length strings * that can be interpreted by a user or application * with an understanding of the security policy. * Internally, the security server uses a simple * structure. This structure is private to the * security server and can be changed without affecting * clients of the security server. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ #ifndef _SS_CONTEXT_H_ #define _SS_CONTEXT_H_ #include "ebitmap.h" #include "mls_types.h" #include "security.h" /* * A security context consists of an authenticated user * identity, a role, a type and a MLS range. */ struct context { u32 user; u32 role; u32 type; u32 len; /* length of string in bytes */ struct mls_range range; char *str; /* string representation if context cannot be mapped. */ }; static inline void mls_context_init(struct context *c) { memset(&c->range, 0, sizeof(c->range)); } static inline int mls_context_cpy(struct context *dst, const struct context *src) { int rc; dst->range.level[0].sens = src->range.level[0].sens; rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); if (rc) goto out; dst->range.level[1].sens = src->range.level[1].sens; rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); if (rc) ebitmap_destroy(&dst->range.level[0].cat); out: return rc; } /* * Sets both levels in the MLS range of 'dst' to the low level of 'src'. */ static inline int mls_context_cpy_low(struct context *dst, const struct context *src) { int rc; dst->range.level[0].sens = src->range.level[0].sens; rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); if (rc) goto out; dst->range.level[1].sens = src->range.level[0].sens; rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat); if (rc) ebitmap_destroy(&dst->range.level[0].cat); out: return rc; } /* * Sets both levels in the MLS range of 'dst' to the high level of 'src'. */ static inline int mls_context_cpy_high(struct context *dst, const struct context *src) { int rc; dst->range.level[0].sens = src->range.level[1].sens; rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat); if (rc) goto out; dst->range.level[1].sens = src->range.level[1].sens; rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); if (rc) ebitmap_destroy(&dst->range.level[0].cat); out: return rc; } static inline int mls_context_glblub(struct context *dst, const struct context *c1, const struct context *c2) { struct mls_range *dr = &dst->range; const struct mls_range *r1 = &c1->range, *r2 = &c2->range; int rc = 0; if (r1->level[1].sens < r2->level[0].sens || r2->level[1].sens < r1->level[0].sens) /* These ranges have no common sensitivities */ return -EINVAL; /* Take the greatest of the low */ dr->level[0].sens = max(r1->level[0].sens, r2->level[0].sens); /* Take the least of the high */ dr->level[1].sens = min(r1->level[1].sens, r2->level[1].sens); rc = ebitmap_and(&dr->level[0].cat, &r1->level[0].cat, &r2->level[0].cat); if (rc) goto out; rc = ebitmap_and(&dr->level[1].cat, &r1->level[1].cat, &r2->level[1].cat); if (rc) goto out; out: return rc; } static inline bool mls_context_equal(const struct context *c1, const struct context *c2) { return ((c1->range.level[0].sens == c2->range.level[0].sens) && ebitmap_equal(&c1->range.level[0].cat, &c2->range.level[0].cat) && (c1->range.level[1].sens == c2->range.level[1].sens) && ebitmap_equal(&c1->range.level[1].cat, &c2->range.level[1].cat)); } static inline void mls_context_destroy(struct context *c) { ebitmap_destroy(&c->range.level[0].cat); ebitmap_destroy(&c->range.level[1].cat); mls_context_init(c); } static inline void context_init(struct context *c) { memset(c, 0, sizeof(*c)); } static inline int context_cpy(struct context *dst, const struct context *src) { int rc; dst->user = src->user; dst->role = src->role; dst->type = src->type; if (src->str) { dst->str = kstrdup(src->str, GFP_ATOMIC); if (!dst->str) return -ENOMEM; dst->len = src->len; } else { dst->str = NULL; dst->len = 0; } rc = mls_context_cpy(dst, src); if (rc) { kfree(dst->str); dst->str = NULL; dst->len = 0; return rc; } return 0; } static inline void context_destroy(struct context *c) { c->user = c->role = c->type = 0; kfree(c->str); c->str = NULL; c->len = 0; mls_context_destroy(c); } static inline bool context_equal(const struct context *c1, const struct context *c2) { if (c1->len && c2->len) return (c1->len == c2->len && !strcmp(c1->str, c2->str)); if (c1->len || c2->len) return 0; return ((c1->user == c2->user) && (c1->role == c2->role) && (c1->type == c2->type) && mls_context_equal(c1, c2)); } u32 context_compute_hash(const struct context *c); #endif /* _SS_CONTEXT_H_ */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_CRC32_H #define _LINUX_CRC32_H #include <linux/types.h> #include <linux/bitrev.h> /** * crc32_le() - Compute least-significant-bit-first IEEE CRC-32 * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or * the previous CRC value if computing incrementally. * @p: Pointer to the data buffer * @len: Length of data in bytes * * This implements the CRC variant that is often known as the IEEE CRC-32, or * simply CRC-32, and is widely used in Ethernet and other applications: * * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + * x^7 + x^5 + x^4 + x^2 + x^1 + x^0 * - Bit order: Least-significant-bit-first * - Polynomial in integer form: 0xedb88320 * * This does *not* invert the CRC at the beginning or end. The caller is * expected to do that if it needs to. Inverting at both ends is recommended. * * For new applications, prefer to use CRC-32C instead. See crc32c(). * * Context: Any context * Return: The new CRC value */ u32 crc32_le(u32 crc, const void *p, size_t len); /* This is just an alias for crc32_le(). */ static inline u32 crc32(u32 crc, const void *p, size_t len) { return crc32_le(crc, p, len); } /** * crc32_be() - Compute most-significant-bit-first IEEE CRC-32 * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or * the previous CRC value if computing incrementally. * @p: Pointer to the data buffer * @len: Length of data in bytes * * crc32_be() is the same as crc32_le() except that crc32_be() computes the * *most-significant-bit-first* variant of the CRC. I.e., within each byte, the * most significant bit is processed first (treated as highest order polynomial * coefficient). The same bit order is also used for the CRC value itself: * * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + * x^7 + x^5 + x^4 + x^2 + x^1 + x^0 * - Bit order: Most-significant-bit-first * - Polynomial in integer form: 0x04c11db7 * * Context: Any context * Return: The new CRC value */ u32 crc32_be(u32 crc, const void *p, size_t len); /** * crc32c() - Compute CRC-32C * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or * the previous CRC value if computing incrementally. * @p: Pointer to the data buffer * @len: Length of data in bytes * * This implements CRC-32C, i.e. the Castagnoli CRC. This is the recommended * CRC variant to use in new applications that want a 32-bit CRC. * * - Polynomial: x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 + * x^18 + x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0 * - Bit order: Least-significant-bit-first * - Polynomial in integer form: 0x82f63b78 * * This does *not* invert the CRC at the beginning or end. The caller is * expected to do that if it needs to. Inverting at both ends is recommended. * * Context: Any context * Return: The new CRC value */ u32 crc32c(u32 crc, const void *p, size_t len); /* * crc32_optimizations() returns flags that indicate which CRC32 library * functions are using architecture-specific optimizations. Unlike * IS_ENABLED(CONFIG_CRC32_ARCH) it takes into account the different CRC32 * variants and also whether any needed CPU features are available at runtime. */ #define CRC32_LE_OPTIMIZATION BIT(0) /* crc32_le() is optimized */ #define CRC32_BE_OPTIMIZATION BIT(1) /* crc32_be() is optimized */ #define CRC32C_OPTIMIZATION BIT(2) /* crc32c() is optimized */ #if IS_ENABLED(CONFIG_CRC32_ARCH) u32 crc32_optimizations(void); #else static inline u32 crc32_optimizations(void) { return 0; } #endif /* * Helpers for hash table generation of ethernet nics: * * Ethernet sends the least significant bit of a byte first, thus crc32_le * is used. The output of crc32_le is bit reversed [most significant bit * is in bit nr 0], thus it must be reversed before use. Except for * nics that bit swap the result internally... */ #define ether_crc(length, data) bitrev32(crc32_le(~0, data, length)) #define ether_crc_le(length, data) crc32_le(~0, data, length) #endif /* _LINUX_CRC32_H */
3 1 1 2 2 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/io_uring.h> #include <linux/fsnotify.h> #include <uapi/linux/io_uring.h> #include "io_uring.h" #include "sync.h" struct io_sync { struct file *file; loff_t len; loff_t off; int flags; int mode; }; int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->len); sync->flags = READ_ONCE(sqe->sync_range_flags); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); int ret; /* sync_file_range always requires a blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); io_req_set_res(req, ret, 0); return IOU_COMPLETE; } int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (unlikely(sqe->addr || sqe->buf_index || sqe->splice_fd_in)) return -EINVAL; sync->flags = READ_ONCE(sqe->fsync_flags); if (unlikely(sync->flags & ~IORING_FSYNC_DATASYNC)) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->len); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_fsync(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); loff_t end = sync->off + sync->len; int ret; /* fsync always requires a blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, sync->flags & IORING_FSYNC_DATASYNC); io_req_set_res(req, ret, 0); return IOU_COMPLETE; } int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); if (sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in) return -EINVAL; sync->off = READ_ONCE(sqe->off); sync->len = READ_ONCE(sqe->addr); sync->mode = READ_ONCE(sqe->len); req->flags |= REQ_F_FORCE_ASYNC; return 0; } int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) { struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); int ret; /* fallocate always requiring blocking context */ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); if (ret >= 0) fsnotify_modify(req->file); io_req_set_res(req, ret, 0); return IOU_COMPLETE; }
8 8 1 8 7 2 1 5 1 1 1 5 5 4 1 3 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 // SPDX-License-Identifier: GPL-2.0-only /* * VMware VMCI Driver * * Copyright (C) 2012 VMware, Inc. All rights reserved. */ #include <linux/vmw_vmci_defs.h> #include <linux/vmw_vmci_api.h> #include "vmci_context.h" #include "vmci_driver.h" #include "vmci_route.h" /* * Make a routing decision for the given source and destination handles. * This will try to determine the route using the handles and the available * devices. Will set the source context if it is invalid. */ int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst, bool from_guest, enum vmci_route *route) { bool has_host_device = vmci_host_code_active(); bool has_guest_device = vmci_guest_code_active(); *route = VMCI_ROUTE_NONE; /* * "from_guest" is only ever set to true by * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent), * which comes from the VMX, so we know it is coming from a * guest. * * To avoid inconsistencies, test these once. We will test * them again when we do the actual send to ensure that we do * not touch a non-existent device. */ /* Must have a valid destination context. */ if (VMCI_INVALID_ID == dst->context) return VMCI_ERROR_INVALID_ARGS; /* Anywhere to hypervisor. */ if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { /* * If this message already came from a guest then we * cannot send it to the hypervisor. It must come * from a local client. */ if (from_guest) return VMCI_ERROR_DST_UNREACHABLE; /* * We must be acting as a guest in order to send to * the hypervisor. */ if (!has_guest_device) return VMCI_ERROR_DEVICE_NOT_FOUND; /* And we cannot send if the source is the host context. */ if (VMCI_HOST_CONTEXT_ID == src->context) return VMCI_ERROR_INVALID_ARGS; /* * If the client passed the ANON source handle then * respect it (both context and resource are invalid). * However, if they passed only an invalid context, * then they probably mean ANY, in which case we * should set the real context here before passing it * down. */ if (VMCI_INVALID_ID == src->context && VMCI_INVALID_ID != src->resource) src->context = vmci_get_context_id(); /* Send from local client down to the hypervisor. */ *route = VMCI_ROUTE_AS_GUEST; return VMCI_SUCCESS; } /* Anywhere to local client on host. */ if (VMCI_HOST_CONTEXT_ID == dst->context) { /* * If it is not from a guest but we are acting as a * guest, then we need to send it down to the host. * Note that if we are also acting as a host then this * will prevent us from sending from local client to * local client, but we accept that restriction as a * way to remove any ambiguity from the host context. */ if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { /* * If the hypervisor is the source, this is * host local communication. The hypervisor * may send vmci event datagrams to the host * itself, but it will never send datagrams to * an "outer host" through the guest device. */ if (has_host_device) { *route = VMCI_ROUTE_AS_HOST; return VMCI_SUCCESS; } else { return VMCI_ERROR_DEVICE_NOT_FOUND; } } if (!from_guest && has_guest_device) { /* If no source context then use the current. */ if (VMCI_INVALID_ID == src->context) src->context = vmci_get_context_id(); /* Send it from local client down to the host. */ *route = VMCI_ROUTE_AS_GUEST; return VMCI_SUCCESS; } /* * Otherwise we already received it from a guest and * it is destined for a local client on this host, or * it is from another local client on this host. We * must be acting as a host to service it. */ if (!has_host_device) return VMCI_ERROR_DEVICE_NOT_FOUND; if (VMCI_INVALID_ID == src->context) { /* * If it came from a guest then it must have a * valid context. Otherwise we can use the * host context. */ if (from_guest) return VMCI_ERROR_INVALID_ARGS; src->context = VMCI_HOST_CONTEXT_ID; } /* Route to local client. */ *route = VMCI_ROUTE_AS_HOST; return VMCI_SUCCESS; } /* * If we are acting as a host then this might be destined for * a guest. */ if (has_host_device) { /* It will have a context if it is meant for a guest. */ if (vmci_ctx_exists(dst->context)) { if (VMCI_INVALID_ID == src->context) { /* * If it came from a guest then it * must have a valid context. * Otherwise we can use the host * context. */ if (from_guest) return VMCI_ERROR_INVALID_ARGS; src->context = VMCI_HOST_CONTEXT_ID; } else if (VMCI_CONTEXT_IS_VM(src->context) && src->context != dst->context) { /* * VM to VM communication is not * allowed. Since we catch all * communication destined for the host * above, this must be destined for a * VM since there is a valid context. */ return VMCI_ERROR_DST_UNREACHABLE; } /* Pass it up to the guest. */ *route = VMCI_ROUTE_AS_HOST; return VMCI_SUCCESS; } else if (!has_guest_device) { /* * The host is attempting to reach a CID * without an active context, and we can't * send it down, since we have no guest * device. */ return VMCI_ERROR_DST_UNREACHABLE; } } /* * We must be a guest trying to send to another guest, which means * we need to send it down to the host. We do not filter out VM to * VM communication here, since we want to be able to use the guest * driver on older versions that do support VM to VM communication. */ if (!has_guest_device) { /* * Ending up here means we have neither guest nor host * device. */ return VMCI_ERROR_DEVICE_NOT_FOUND; } /* If no source context then use the current context. */ if (VMCI_INVALID_ID == src->context) src->context = vmci_get_context_id(); /* * Send it from local client down to the host, which will * route it to the other guest for us. */ *route = VMCI_ROUTE_AS_GUEST; return VMCI_SUCCESS; }
1 1 1 1 5 5 5 3 1 1 1 1 1 3 2 2 1 1 1 1 2 2 2 2 2 2 1 1 1 2 1 1 1 1 2 4 2 2 2 4 41 12 40 39 40 40 9 1 35 6 5 1 58 1 1 1 1 3 3 3 2 2 2 1 1 1 1 1 1 1 1 1 5 4 4 4 1 49 49 6 3 49 49 6 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/list.h> #include <linux/log2.h> #include <linux/jhash.h> #include <linux/netlink.h> #include <linux/workqueue.h> #include <linux/rhashtable.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> /* We target a hash table size of 4, element hint is 75% of final size */ #define NFT_RHASH_ELEMENT_HINT 3 struct nft_rhash { struct rhashtable ht; struct delayed_work gc_work; u32 wq_gc_seq; }; struct nft_rhash_elem { struct nft_elem_priv priv; struct rhash_head node; struct llist_node walk_node; u32 wq_gc_seq; struct nft_set_ext ext; }; struct nft_rhash_cmp_arg { const struct nft_set *set; const u32 *key; u8 genmask; u64 tstamp; }; static inline u32 nft_rhash_key(const void *data, u32 len, u32 seed) { const struct nft_rhash_cmp_arg *arg = data; return jhash(arg->key, len, seed); } static inline u32 nft_rhash_obj(const void *data, u32 len, u32 seed) { const struct nft_rhash_elem *he = data; return jhash(nft_set_ext_key(&he->ext), len, seed); } static inline int nft_rhash_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { const struct nft_rhash_cmp_arg *x = arg->key; const struct nft_rhash_elem *he = ptr; if (memcmp(nft_set_ext_key(&he->ext), x->key, x->set->klen)) return 1; if (nft_set_elem_is_dead(&he->ext)) return 1; if (__nft_set_elem_expired(&he->ext, x->tstamp)) return 1; if (!nft_set_elem_active(&he->ext, x->genmask)) return 1; return 0; } static const struct rhashtable_params nft_rhash_params = { .head_offset = offsetof(struct nft_rhash_elem, node), .hashfn = nft_rhash_key, .obj_hashfn = nft_rhash_obj, .obj_cmpfn = nft_rhash_cmp, .automatic_shrinking = true, }; INDIRECT_CALLABLE_SCOPE const struct nft_set_ext * nft_rhash_lookup(const struct net *net, const struct nft_set *set, const u32 *key) { struct nft_rhash *priv = nft_set_priv(set); const struct nft_rhash_elem *he; struct nft_rhash_cmp_arg arg = { .genmask = nft_genmask_cur(net), .set = set, .key = key, .tstamp = get_jiffies_64(), }; he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL) return &he->ext; return NULL; } static struct nft_elem_priv * nft_rhash_get(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags) { struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_elem *he; struct nft_rhash_cmp_arg arg = { .genmask = nft_genmask_cur(net), .set = set, .key = elem->key.val.data, .tstamp = get_jiffies_64(), }; he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL) return &he->priv; return ERR_PTR(-ENOENT); } static const struct nft_set_ext * nft_rhash_update(struct nft_set *set, const u32 *key, const struct nft_expr *expr, struct nft_regs *regs) { struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_elem *he, *prev; struct nft_elem_priv *elem_priv; struct nft_rhash_cmp_arg arg = { .genmask = NFT_GENMASK_ANY, .set = set, .key = key, .tstamp = get_jiffies_64(), }; he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he != NULL) goto out; elem_priv = nft_dynset_new(set, expr, regs); if (!elem_priv) goto err1; he = nft_elem_priv_cast(elem_priv); init_llist_node(&he->walk_node); prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, nft_rhash_params); if (IS_ERR(prev)) goto err2; /* Another cpu may race to insert the element with the same key */ if (prev) { nft_set_elem_destroy(set, &he->priv, true); atomic_dec(&set->nelems); he = prev; } out: return &he->ext; err2: nft_set_elem_destroy(set, &he->priv, true); atomic_dec(&set->nelems); err1: return NULL; } static int nft_rhash_insert(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, struct nft_elem_priv **elem_priv) { struct nft_rhash_elem *he = nft_elem_priv_cast(elem->priv); struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_cmp_arg arg = { .genmask = nft_genmask_next(net), .set = set, .key = elem->key.val.data, .tstamp = nft_net_tstamp(net), }; struct nft_rhash_elem *prev; init_llist_node(&he->walk_node); prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, nft_rhash_params); if (IS_ERR(prev)) return PTR_ERR(prev); if (prev) { *elem_priv = &prev->priv; return -EEXIST; } return 0; } static void nft_rhash_activate(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv); nft_clear(net, &he->ext); } static void nft_rhash_flush(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv); nft_set_elem_change_active(net, set, &he->ext); } static struct nft_elem_priv * nft_rhash_deactivate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_elem *he; struct nft_rhash_cmp_arg arg = { .genmask = nft_genmask_next(net), .set = set, .key = elem->key.val.data, .tstamp = nft_net_tstamp(net), }; rcu_read_lock(); he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he) nft_set_elem_change_active(net, set, &he->ext); rcu_read_unlock(); return &he->priv; } static void nft_rhash_remove(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_rhash_elem *he = nft_elem_priv_cast(elem_priv); struct nft_rhash *priv = nft_set_priv(set); rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); } static bool nft_rhash_delete(const struct nft_set *set, const u32 *key) { struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_cmp_arg arg = { .genmask = NFT_GENMASK_ANY, .set = set, .key = key, }; struct nft_rhash_elem *he; he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); if (he == NULL) return false; nft_set_elem_dead(&he->ext); return true; } static void nft_rhash_walk_ro(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { struct nft_rhash *priv = nft_set_priv(set); struct rhashtable_iter hti; struct nft_rhash_elem *he; rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { if (PTR_ERR(he) != -EAGAIN) { iter->err = PTR_ERR(he); break; } continue; } if (iter->count < iter->skip) goto cont; iter->err = iter->fn(ctx, set, iter, &he->priv); if (iter->err < 0) break; cont: iter->count++; } rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); } static void nft_rhash_walk_update(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_elem *he, *tmp; struct llist_node *first_node; struct rhashtable_iter hti; LLIST_HEAD(walk_list); lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex); if (set->in_update_walk) { /* This can happen with bogus rulesets during ruleset validation * when a verdict map causes a jump back to the same map. * * Without this extra check the walk_next loop below will see * elems on the callers walk_list and skip (not validate) them. */ iter->err = -EMLINK; return; } /* walk happens under RCU. * * We create a snapshot list so ->iter callback can sleep. * commit_mutex is held, elements can ... * .. be added in parallel from dataplane (dynset) * .. be marked as dead in parallel from dataplane (dynset). * .. be queued for removal in parallel (gc timeout). * .. not be freed: transaction mutex is held. */ rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { if (PTR_ERR(he) != -EAGAIN) { iter->err = PTR_ERR(he); break; } continue; } /* rhashtable resized during walk, skip */ if (llist_on_list(&he->walk_node)) continue; llist_add(&he->walk_node, &walk_list); } rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); first_node = __llist_del_all(&walk_list); set->in_update_walk = true; llist_for_each_entry_safe(he, tmp, first_node, walk_node) { if (iter->err == 0) { iter->err = iter->fn(ctx, set, iter, &he->priv); if (iter->err == 0) iter->count++; } /* all entries must be cleared again, else next ->walk iteration * will skip entries. */ init_llist_node(&he->walk_node); } set->in_update_walk = false; } static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { switch (iter->type) { case NFT_ITER_UPDATE: /* only relevant for netlink dumps which use READ type */ WARN_ON_ONCE(iter->skip != 0); nft_rhash_walk_update(ctx, set, iter); break; case NFT_ITER_READ: nft_rhash_walk_ro(ctx, set, iter); break; default: iter->err = -EINVAL; WARN_ON_ONCE(1); break; } } static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set, struct nft_set_ext *ext) { struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext); struct nft_expr *expr; u32 size; nft_setelem_expr_foreach(expr, elem_expr, size) { if (expr->ops->gc && expr->ops->gc(read_pnet(&set->net), expr) && set->flags & NFT_SET_EVAL) return true; } return false; } static void nft_rhash_gc(struct work_struct *work) { struct nftables_pernet *nft_net; struct nft_set *set; struct nft_rhash_elem *he; struct nft_rhash *priv; struct rhashtable_iter hti; struct nft_trans_gc *gc; struct net *net; u32 gc_seq; priv = container_of(work, struct nft_rhash, gc_work.work); set = nft_set_container_of(priv); net = read_pnet(&set->net); nft_net = nft_pernet(net); gc_seq = READ_ONCE(nft_net->gc_seq); if (nft_set_gc_is_pending(set)) goto done; gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); if (!gc) goto done; /* Elements never collected use a zero gc worker sequence number. */ if (unlikely(++priv->wq_gc_seq == 0)) priv->wq_gc_seq++; rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); while ((he = rhashtable_walk_next(&hti))) { if (IS_ERR(he)) { nft_trans_gc_destroy(gc); gc = NULL; goto try_later; } /* Ruleset has been updated, try later. */ if (READ_ONCE(nft_net->gc_seq) != gc_seq) { nft_trans_gc_destroy(gc); gc = NULL; goto try_later; } /* rhashtable walk is unstable, already seen in this gc run? * Then, skip this element. In case of (unlikely) sequence * wraparound and stale element wq_gc_seq, next gc run will * just find this expired element. */ if (he->wq_gc_seq == priv->wq_gc_seq) continue; if (nft_set_elem_is_dead(&he->ext)) goto dead_elem; if (nft_set_ext_exists(&he->ext, NFT_SET_EXT_EXPRESSIONS) && nft_rhash_expr_needs_gc_run(set, &he->ext)) goto needs_gc_run; if (!nft_set_elem_expired(&he->ext)) continue; needs_gc_run: nft_set_elem_dead(&he->ext); dead_elem: gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC); if (!gc) goto try_later; /* annotate gc sequence for this attempt. */ he->wq_gc_seq = priv->wq_gc_seq; nft_trans_gc_elem_add(gc, he); } gc = nft_trans_gc_catchall_async(gc, gc_seq); try_later: /* catchall list iteration requires rcu read side lock. */ rhashtable_walk_stop(&hti); rhashtable_walk_exit(&hti); if (gc) nft_trans_gc_queue_async_done(gc); done: queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } static u64 nft_rhash_privsize(const struct nlattr * const nla[], const struct nft_set_desc *desc) { return sizeof(struct nft_rhash); } static void nft_rhash_gc_init(const struct nft_set *set) { struct nft_rhash *priv = nft_set_priv(set); queue_delayed_work(system_power_efficient_wq, &priv->gc_work, nft_set_gc_interval(set)); } static int nft_rhash_init(const struct nft_set *set, const struct nft_set_desc *desc, const struct nlattr * const tb[]) { struct nft_rhash *priv = nft_set_priv(set); struct rhashtable_params params = nft_rhash_params; int err; BUILD_BUG_ON(offsetof(struct nft_rhash_elem, priv) != 0); params.nelem_hint = desc->size ?: NFT_RHASH_ELEMENT_HINT; params.key_len = set->klen; err = rhashtable_init(&priv->ht, &params); if (err < 0) return err; INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rhash_gc); if (set->flags & (NFT_SET_TIMEOUT | NFT_SET_EVAL)) nft_rhash_gc_init(set); return 0; } struct nft_rhash_ctx { const struct nft_ctx ctx; const struct nft_set *set; }; static void nft_rhash_elem_destroy(void *ptr, void *arg) { struct nft_rhash_ctx *rhash_ctx = arg; struct nft_rhash_elem *he = ptr; nf_tables_set_elem_destroy(&rhash_ctx->ctx, rhash_ctx->set, &he->priv); } static void nft_rhash_destroy(const struct nft_ctx *ctx, const struct nft_set *set) { struct nft_rhash *priv = nft_set_priv(set); struct nft_rhash_ctx rhash_ctx = { .ctx = *ctx, .set = set, }; cancel_delayed_work_sync(&priv->gc_work); rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, (void *)&rhash_ctx); } /* Number of buckets is stored in u32, so cap our result to 1U<<31 */ #define NFT_MAX_BUCKETS (1U << 31) static u32 nft_hash_buckets(u32 size) { u64 val = div_u64((u64)size * 4, 3); if (val >= NFT_MAX_BUCKETS) return NFT_MAX_BUCKETS; return roundup_pow_of_two(val); } static bool nft_rhash_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est) { est->size = ~0; est->lookup = NFT_SET_CLASS_O_1; est->space = NFT_SET_CLASS_O_N; return true; } struct nft_hash { u32 seed; u32 buckets; struct hlist_head table[]; }; struct nft_hash_elem { struct nft_elem_priv priv; struct hlist_node node; struct nft_set_ext ext; }; INDIRECT_CALLABLE_SCOPE const struct nft_set_ext * nft_hash_lookup(const struct net *net, const struct nft_set *set, const u32 *key) { struct nft_hash *priv = nft_set_priv(set); u8 genmask = nft_genmask_cur(net); const struct nft_hash_elem *he; u32 hash; hash = jhash(key, set->klen, priv->seed); hash = reciprocal_scale(hash, priv->buckets); hlist_for_each_entry_rcu(he, &priv->table[hash], node) { if (!memcmp(nft_set_ext_key(&he->ext), key, set->klen) && nft_set_elem_active(&he->ext, genmask)) return &he->ext; } return NULL; } static struct nft_elem_priv * nft_hash_get(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags) { struct nft_hash *priv = nft_set_priv(set); u8 genmask = nft_genmask_cur(net); struct nft_hash_elem *he; u32 hash; hash = jhash(elem->key.val.data, set->klen, priv->seed); hash = reciprocal_scale(hash, priv->buckets); hlist_for_each_entry_rcu(he, &priv->table[hash], node) { if (!memcmp(nft_set_ext_key(&he->ext), elem->key.val.data, set->klen) && nft_set_elem_active(&he->ext, genmask)) return &he->priv; } return ERR_PTR(-ENOENT); } INDIRECT_CALLABLE_SCOPE const struct nft_set_ext * nft_hash_lookup_fast(const struct net *net, const struct nft_set *set, const u32 *key) { struct nft_hash *priv = nft_set_priv(set); u8 genmask = nft_genmask_cur(net); const struct nft_hash_elem *he; u32 hash, k1, k2; k1 = *key; hash = jhash_1word(k1, priv->seed); hash = reciprocal_scale(hash, priv->buckets); hlist_for_each_entry_rcu(he, &priv->table[hash], node) { k2 = *(u32 *)nft_set_ext_key(&he->ext)->data; if (k1 == k2 && nft_set_elem_active(&he->ext, genmask)) return &he->ext; } return NULL; } static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv, const struct nft_set_ext *ext) { const struct nft_data *key = nft_set_ext_key(ext); u32 hash, k1; if (set->klen == 4) { k1 = *(u32 *)key; hash = jhash_1word(k1, priv->seed); } else { hash = jhash(key, set->klen, priv->seed); } hash = reciprocal_scale(hash, priv->buckets); return hash; } static int nft_hash_insert(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, struct nft_elem_priv **elem_priv) { struct nft_hash_elem *this = nft_elem_priv_cast(elem->priv), *he; struct nft_hash *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 hash; hash = nft_jhash(set, priv, &this->ext); hlist_for_each_entry(he, &priv->table[hash], node) { if (!memcmp(nft_set_ext_key(&this->ext), nft_set_ext_key(&he->ext), set->klen) && nft_set_elem_active(&he->ext, genmask)) { *elem_priv = &he->priv; return -EEXIST; } } hlist_add_head_rcu(&this->node, &priv->table[hash]); return 0; } static void nft_hash_activate(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv); nft_clear(net, &he->ext); } static void nft_hash_flush(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv); nft_set_elem_change_active(net, set, &he->ext); } static struct nft_elem_priv * nft_hash_deactivate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_hash_elem *this = nft_elem_priv_cast(elem->priv), *he; struct nft_hash *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 hash; hash = nft_jhash(set, priv, &this->ext); hlist_for_each_entry(he, &priv->table[hash], node) { if (!memcmp(nft_set_ext_key(&he->ext), &elem->key.val, set->klen) && nft_set_elem_active(&he->ext, genmask)) { nft_set_elem_change_active(net, set, &he->ext); return &he->priv; } } return NULL; } static void nft_hash_remove(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_hash_elem *he = nft_elem_priv_cast(elem_priv); hlist_del_rcu(&he->node); } static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { struct nft_hash *priv = nft_set_priv(set); struct nft_hash_elem *he; int i; for (i = 0; i < priv->buckets; i++) { hlist_for_each_entry_rcu(he, &priv->table[i], node, lockdep_is_held(&nft_pernet(ctx->net)->commit_mutex)) { if (iter->count < iter->skip) goto cont; iter->err = iter->fn(ctx, set, iter, &he->priv); if (iter->err < 0) return; cont: iter->count++; } } } static u64 nft_hash_privsize(const struct nlattr * const nla[], const struct nft_set_desc *desc) { return sizeof(struct nft_hash) + (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head); } static int nft_hash_init(const struct nft_set *set, const struct nft_set_desc *desc, const struct nlattr * const tb[]) { struct nft_hash *priv = nft_set_priv(set); priv->buckets = nft_hash_buckets(desc->size); get_random_bytes(&priv->seed, sizeof(priv->seed)); return 0; } static void nft_hash_destroy(const struct nft_ctx *ctx, const struct nft_set *set) { struct nft_hash *priv = nft_set_priv(set); struct nft_hash_elem *he; struct hlist_node *next; int i; for (i = 0; i < priv->buckets; i++) { hlist_for_each_entry_safe(he, next, &priv->table[i], node) { hlist_del_rcu(&he->node); nf_tables_set_elem_destroy(ctx, set, &he->priv); } } } static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est) { if (!desc->size) return false; if (desc->klen == 4) return false; est->size = sizeof(struct nft_hash) + (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) + (u64)desc->size * sizeof(struct nft_hash_elem); est->lookup = NFT_SET_CLASS_O_1; est->space = NFT_SET_CLASS_O_N; return true; } static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est) { if (!desc->size) return false; if (desc->klen != 4) return false; est->size = sizeof(struct nft_hash) + (u64)nft_hash_buckets(desc->size) * sizeof(struct hlist_head) + (u64)desc->size * sizeof(struct nft_hash_elem); est->lookup = NFT_SET_CLASS_O_1; est->space = NFT_SET_CLASS_O_N; return true; } const struct nft_set_type nft_set_rhash_type = { .features = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT | NFT_SET_EVAL, .ops = { .privsize = nft_rhash_privsize, .elemsize = offsetof(struct nft_rhash_elem, ext), .estimate = nft_rhash_estimate, .init = nft_rhash_init, .gc_init = nft_rhash_gc_init, .destroy = nft_rhash_destroy, .insert = nft_rhash_insert, .activate = nft_rhash_activate, .deactivate = nft_rhash_deactivate, .flush = nft_rhash_flush, .remove = nft_rhash_remove, .lookup = nft_rhash_lookup, .update = nft_rhash_update, .delete = nft_rhash_delete, .walk = nft_rhash_walk, .get = nft_rhash_get, }, }; const struct nft_set_type nft_set_hash_type = { .features = NFT_SET_MAP | NFT_SET_OBJECT, .ops = { .privsize = nft_hash_privsize, .elemsize = offsetof(struct nft_hash_elem, ext), .estimate = nft_hash_estimate, .init = nft_hash_init, .destroy = nft_hash_destroy, .insert = nft_hash_insert, .activate = nft_hash_activate, .deactivate = nft_hash_deactivate, .flush = nft_hash_flush, .remove = nft_hash_remove, .lookup = nft_hash_lookup, .walk = nft_hash_walk, .get = nft_hash_get, }, }; const struct nft_set_type nft_set_hash_fast_type = { .features = NFT_SET_MAP | NFT_SET_OBJECT, .ops = { .privsize = nft_hash_privsize, .elemsize = offsetof(struct nft_hash_elem, ext), .estimate = nft_hash_fast_estimate, .init = nft_hash_init, .destroy = nft_hash_destroy, .insert = nft_hash_insert, .activate = nft_hash_activate, .deactivate = nft_hash_deactivate, .flush = nft_hash_flush, .remove = nft_hash_remove, .lookup = nft_hash_lookup_fast, .walk = nft_hash_walk, .get = nft_hash_get, }, };
14 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SR-IPv6 implementation * * Author: * David Lebrun <david.lebrun@uclouvain.be> */ #ifndef _NET_SEG6_H #define _NET_SEG6_H #include <linux/net.h> #include <linux/ipv6.h> #include <linux/seg6.h> #include <linux/rhashtable-types.h> static inline void update_csum_diff4(struct sk_buff *skb, __be32 from, __be32 to) { __be32 diff[] = { ~from, to }; skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum); } static inline void update_csum_diff16(struct sk_buff *skb, __be32 *from, __be32 *to) { __be32 diff[] = { ~from[0], ~from[1], ~from[2], ~from[3], to[0], to[1], to[2], to[3], }; skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum); } struct seg6_pernet_data { struct mutex lock; struct in6_addr __rcu *tun_src; #ifdef CONFIG_IPV6_SEG6_HMAC struct rhashtable hmac_infos; #endif }; static inline struct seg6_pernet_data *seg6_pernet(struct net *net) { #if IS_ENABLED(CONFIG_IPV6) return net->ipv6.seg6_data; #else return NULL; #endif } extern int seg6_init(void); extern void seg6_exit(void); #ifdef CONFIG_IPV6_SEG6_LWTUNNEL extern int seg6_iptunnel_init(void); extern void seg6_iptunnel_exit(void); extern int seg6_local_init(void); extern void seg6_local_exit(void); #else static inline int seg6_iptunnel_init(void) { return 0; } static inline void seg6_iptunnel_exit(void) {} static inline int seg6_local_init(void) { return 0; } static inline void seg6_local_exit(void) {} #endif extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced); extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags); extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt); extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto); extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh); extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, u32 tbl_id); /* If the packet which invoked an ICMP error contains an SRH return * the true destination address from within the SRH, otherwise use the * destination address in the IP header. */ static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb, struct inet6_skb_parm *opt) { struct ipv6_sr_hdr *srh; if (opt->flags & IP6SKB_SEG6) { srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff); return &srh->segments[0]; } return NULL; } #endif
3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 // SPDX-License-Identifier: GPL-2.0-or-later /* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. * * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 * Free Software Foundation, Inc. * * This file was based on the GNU MP Library source file: * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h * * The GNU MP Library is free software; you can redistribute it and/or modify * it under the terms of either: * * * the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your * option) any later version. * * or * * * the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any * later version. * * or both in parallel, as here. * * The GNU MP Library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received copies of the GNU General Public License and the * GNU Lesser General Public License along with the GNU MP Library. If not, * see https://www.gnu.org/licenses/. */ #include <linux/export.h> #include "mpi-internal.h" int mpi_sub_ui(MPI w, MPI u, unsigned long vval) { if (u->nlimbs == 0) { if (mpi_resize(w, 1) < 0) return -ENOMEM; w->d[0] = vval; w->nlimbs = (vval != 0); w->sign = (vval != 0); return 0; } /* If not space for W (and possible carry), increase space. */ if (mpi_resize(w, u->nlimbs + 1)) return -ENOMEM; if (u->sign) { mpi_limb_t cy; cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); w->d[u->nlimbs] = cy; w->nlimbs = u->nlimbs + cy; w->sign = 1; } else { /* The signs are different. Need exact comparison to determine * which operand to subtract from which. */ if (u->nlimbs == 1 && u->d[0] < vval) { w->d[0] = vval - u->d[0]; w->nlimbs = 1; w->sign = 1; } else { mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); /* Size can decrease with at most one limb. */ w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); w->sign = 0; } } mpi_normalize(w); return 0; } EXPORT_SYMBOL_GPL(mpi_sub_ui);
39 41 41 40 41 15 15 15 13 13 13 13 12 12 15 8 8 8 2 1 66 66 66 66 66 52 66 50 65 2 2 66 66 32 59 66 30 44 16 61 21 15 61 61 1 61 50 15 50 15 9 6 1 1 9 61 22 41 13 31 10 22 7 15 3 15 41 40 13 41 42 42 42 41 11 37 41 6 5 4 4 42 42 42 10 11 10 6 6 2 11 1 1 1 1 1 1 1 1 44 37 46 45 36 3 3 1 1 1 4 1 3 3 1 13 13 13 1 1 1 7 7 7 2 1 1 19 1 18 18 17 4 1 3 16 14 1 15 15 15 13 14 1 1 1 2 1 1 19 19 5 5 1 5 1 5 1 5 1 5 5 5 5 12 13 13 16 16 16 16 16 26 14 14 2 2 14 13 4 3 4 2 1 2 1 2 1 1 1 1 2 2 2 4 4 4 3 13 13 15 13 13 13 9 2 1 12 8 2 2 13 15 21 15 15 15 14 8 14 5 14 11 3 13 8 1 14 6 14 14 5 14 4 14 5 14 6 8 3 14 4 2 14 7 14 5 14 2 14 6 14 8 14 4 14 3 14 5 14 7 14 3 14 1 14 7 13 4 14 2 14 1 14 1 14 1 14 2 2 14 1 1 15 15 14 15 6 6 21 10 10 10 10 10 10 1 9 10 1 2 11 11 13 13 13 13 13 13 13 13 13 2 11 11 11 11 12 12 11 12 3 2 9 11 11 11 11 11 2 11 11 11 2 1 10 10 10 10 10 1 1 1 12 12 12 12 12 11 12 2 10 10 6 1 1 15 14 14 13 15 3 12 10 1 1 9 15 3 15 15 20 19 4 20 20 6 6 6 1 6 6 6 6 6 12 3 12 9 5 9 9 3 1 3 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 /* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include "fuse_dev_i.h" #include "dev_uring_i.h" #include <linux/dax.h> #include <linux/pagemap.h> #include <linux/slab.h> #include <linux/file.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/exportfs.h> #include <linux/posix_acl.h> #include <linux/pid_namespace.h> #include <uapi/linux/magic.h> MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Filesystem in Userspace"); MODULE_LICENSE("GPL"); static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); DECLARE_WAIT_QUEUE_HEAD(fuse_dev_waitq); static int set_global_limit(const char *val, const struct kernel_param *kp); unsigned int fuse_max_pages_limit = 256; /* default is no timeout */ unsigned int fuse_default_req_timeout; unsigned int fuse_max_req_timeout; unsigned int max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); __MODULE_PARM_TYPE(max_user_bgreq, "uint"); MODULE_PARM_DESC(max_user_bgreq, "Global limit for the maximum number of backgrounded requests an " "unprivileged user can set"); unsigned int max_user_congthresh; module_param_call(max_user_congthresh, set_global_limit, param_get_uint, &max_user_congthresh, 0644); __MODULE_PARM_TYPE(max_user_congthresh, "uint"); MODULE_PARM_DESC(max_user_congthresh, "Global limit for the maximum congestion threshold an " "unprivileged user can set"); #define FUSE_DEFAULT_BLKSIZE 512 /** Maximum number of outstanding background requests */ #define FUSE_DEFAULT_MAX_BACKGROUND 12 /** Congestion starts at 75% of maximum */ #define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) #ifdef CONFIG_BLOCK static struct file_system_type fuseblk_fs_type; #endif struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); } static struct fuse_submount_lookup *fuse_alloc_submount_lookup(void) { struct fuse_submount_lookup *sl; sl = kzalloc(sizeof(struct fuse_submount_lookup), GFP_KERNEL_ACCOUNT); if (!sl) return NULL; sl->forget = fuse_alloc_forget(); if (!sl->forget) goto out_free; return sl; out_free: kfree(sl); return NULL; } static struct inode *fuse_alloc_inode(struct super_block *sb) { struct fuse_inode *fi; fi = alloc_inode_sb(sb, fuse_inode_cachep, GFP_KERNEL); if (!fi) return NULL; /* Initialize private data (i.e. everything except fi->inode) */ BUILD_BUG_ON(offsetof(struct fuse_inode, inode) != 0); memset((void *) fi + sizeof(fi->inode), 0, sizeof(*fi) - sizeof(fi->inode)); fi->inval_mask = ~0; mutex_init(&fi->mutex); spin_lock_init(&fi->lock); fi->forget = fuse_alloc_forget(); if (!fi->forget) goto out_free; if (IS_ENABLED(CONFIG_FUSE_DAX) && !fuse_dax_inode_alloc(sb, fi)) goto out_free_forget; if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) fuse_inode_backing_set(fi, NULL); return &fi->inode; out_free_forget: kfree(fi->forget); out_free: kmem_cache_free(fuse_inode_cachep, fi); return NULL; } static void fuse_free_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); mutex_destroy(&fi->mutex); kfree(fi->forget); #ifdef CONFIG_FUSE_DAX kfree(fi->dax); #endif if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) fuse_backing_put(fuse_inode_backing(fi)); kmem_cache_free(fuse_inode_cachep, fi); } static void fuse_cleanup_submount_lookup(struct fuse_conn *fc, struct fuse_submount_lookup *sl) { if (!refcount_dec_and_test(&sl->count)) return; fuse_queue_forget(fc, sl->forget, sl->nodeid, 1); sl->forget = NULL; kfree(sl); } static void fuse_evict_inode(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); /* Will write inode on close/munmap and in all other dirtiers */ WARN_ON(inode_state_read_once(inode) & I_DIRTY_INODE); if (FUSE_IS_DAX(inode)) dax_break_layout_final(inode); truncate_inode_pages_final(&inode->i_data); clear_inode(inode); if (inode->i_sb->s_flags & SB_ACTIVE) { struct fuse_conn *fc = get_fuse_conn(inode); if (FUSE_IS_DAX(inode)) fuse_dax_inode_cleanup(inode); if (fi->nlookup) { fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup); fi->forget = NULL; } if (fi->submount_lookup) { fuse_cleanup_submount_lookup(fc, fi->submount_lookup); fi->submount_lookup = NULL; } /* * Evict of non-deleted inode may race with outstanding * LOOKUP/READDIRPLUS requests and result in inconsistency when * the request finishes. Deal with that here by bumping a * counter that can be compared to the starting value. */ if (inode->i_nlink > 0) atomic64_inc(&fc->evict_ctr); } if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) { WARN_ON(fi->iocachectr != 0); WARN_ON(!list_empty(&fi->write_files)); WARN_ON(!list_empty(&fi->queued_writes)); } } static int fuse_reconfigure(struct fs_context *fsc) { struct super_block *sb = fsc->root->d_sb; sync_filesystem(sb); if (fsc->sb_flags & SB_MANDLOCK) return -EINVAL; return 0; } /* * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down * so that it will fit. */ static ino_t fuse_squash_ino(u64 ino64) { ino_t ino = (ino_t) ino64; if (sizeof(ino_t) < sizeof(u64)) ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8; return ino; } void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, struct fuse_statx *sx, u64 attr_valid, u32 cache_mask, u64 evict_ctr) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); lockdep_assert_held(&fi->lock); /* * Clear basic stats from invalid mask. * * Don't do this if this is coming from a fuse_iget() call and there * might have been a racing evict which would've invalidated the result * if the attr_version would've been preserved. * * !evict_ctr -> this is create * fi->attr_version != 0 -> this is not a new inode * evict_ctr == fuse_get_evict_ctr() -> no evicts while during request */ if (!evict_ctr || fi->attr_version || evict_ctr == fuse_get_evict_ctr(fc)) set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0); fi->attr_version = atomic64_inc_return(&fc->attr_version); fi->i_time = attr_valid; inode->i_ino = fuse_squash_ino(attr->ino); inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); set_nlink(inode, attr->nlink); inode->i_uid = make_kuid(fc->user_ns, attr->uid); inode->i_gid = make_kgid(fc->user_ns, attr->gid); inode->i_blocks = attr->blocks; /* Sanitize nsecs */ attr->atimensec = min_t(u32, attr->atimensec, NSEC_PER_SEC - 1); attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1); attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1); inode_set_atime(inode, attr->atime, attr->atimensec); /* mtime from server may be stale due to local buffered write */ if (!(cache_mask & STATX_MTIME)) { inode_set_mtime(inode, attr->mtime, attr->mtimensec); } if (!(cache_mask & STATX_CTIME)) { inode_set_ctime(inode, attr->ctime, attr->ctimensec); } if (sx) { /* Sanitize nsecs */ sx->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1); /* * Btime has been queried, cache is valid (whether or not btime * is available or not) so clear STATX_BTIME from inval_mask. * * Availability of the btime attribute is indicated in * FUSE_I_BTIME */ set_mask_bits(&fi->inval_mask, STATX_BTIME, 0); if (sx->mask & STATX_BTIME) { set_bit(FUSE_I_BTIME, &fi->state); fi->i_btime.tv_sec = sx->btime.tv_sec; fi->i_btime.tv_nsec = sx->btime.tv_nsec; } } if (attr->blksize) fi->cached_i_blkbits = ilog2(attr->blksize); else fi->cached_i_blkbits = inode->i_sb->s_blocksize_bits; /* * Don't set the sticky bit in i_mode, unless we want the VFS * to check permissions. This prevents failures due to the * check in may_delete(). */ fi->orig_i_mode = inode->i_mode; if (!fc->default_permissions) inode->i_mode &= ~S_ISVTX; fi->orig_ino = attr->ino; /* * We are refreshing inode data and it is possible that another * client set suid/sgid or security.capability xattr. So clear * S_NOSEC. Ideally, we could have cleared it only if suid/sgid * was set or if security.capability xattr was set. But we don't * know if security.capability has been set or not. So clear it * anyway. Its less efficient but should be safe. */ inode->i_flags &= ~S_NOSEC; } u32 fuse_get_cache_mask(struct inode *inode) { struct fuse_conn *fc = get_fuse_conn(inode); if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) return 0; return STATX_MTIME | STATX_CTIME | STATX_SIZE; } static void fuse_change_attributes_i(struct inode *inode, struct fuse_attr *attr, struct fuse_statx *sx, u64 attr_valid, u64 attr_version, u64 evict_ctr) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); u32 cache_mask; loff_t oldsize; struct timespec64 old_mtime; spin_lock(&fi->lock); /* * In case of writeback_cache enabled, writes update mtime, ctime and * may update i_size. In these cases trust the cached value in the * inode. */ cache_mask = fuse_get_cache_mask(inode); if (cache_mask & STATX_SIZE) attr->size = i_size_read(inode); if (cache_mask & STATX_MTIME) { attr->mtime = inode_get_mtime_sec(inode); attr->mtimensec = inode_get_mtime_nsec(inode); } if (cache_mask & STATX_CTIME) { attr->ctime = inode_get_ctime_sec(inode); attr->ctimensec = inode_get_ctime_nsec(inode); } if ((attr_version != 0 && fi->attr_version > attr_version) || test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { spin_unlock(&fi->lock); return; } old_mtime = inode_get_mtime(inode); fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask, evict_ctr); oldsize = inode->i_size; /* * In case of writeback_cache enabled, the cached writes beyond EOF * extend local i_size without keeping userspace server in sync. So, * attr->size coming from server can be stale. We cannot trust it. */ if (!(cache_mask & STATX_SIZE)) i_size_write(inode, attr->size); spin_unlock(&fi->lock); if (!cache_mask && S_ISREG(inode->i_mode)) { bool inval = false; if (oldsize != attr->size) { truncate_pagecache(inode, attr->size); if (!fc->explicit_inval_data) inval = true; } else if (fc->auto_inval_data) { struct timespec64 new_mtime = { .tv_sec = attr->mtime, .tv_nsec = attr->mtimensec, }; /* * Auto inval mode also checks and invalidates if mtime * has changed. */ if (!timespec64_equal(&old_mtime, &new_mtime)) inval = true; } if (inval) invalidate_inode_pages2(inode->i_mapping); } if (IS_ENABLED(CONFIG_FUSE_DAX)) fuse_dax_dontcache(inode, attr->flags); } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, struct fuse_statx *sx, u64 attr_valid, u64 attr_version) { fuse_change_attributes_i(inode, attr, sx, attr_valid, attr_version, 0); } static void fuse_init_submount_lookup(struct fuse_submount_lookup *sl, u64 nodeid) { sl->nodeid = nodeid; refcount_set(&sl->count, 1); } static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr, struct fuse_conn *fc) { inode->i_mode = attr->mode & S_IFMT; inode->i_size = attr->size; inode_set_mtime(inode, attr->mtime, attr->mtimensec); inode_set_ctime(inode, attr->ctime, attr->ctimensec); if (S_ISREG(inode->i_mode)) { fuse_init_common(inode); fuse_init_file_inode(inode, attr->flags); } else if (S_ISDIR(inode->i_mode)) fuse_init_dir(inode); else if (S_ISLNK(inode->i_mode)) fuse_init_symlink(inode); else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { fuse_init_common(inode); init_special_inode(inode, inode->i_mode, new_decode_dev(attr->rdev)); } else BUG(); /* * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL * so they see the exact same behavior as before. */ if (!fc->posix_acl) inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE; } static int fuse_inode_eq(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; if (get_node_id(inode) == nodeid) return 1; else return 0; } static int fuse_inode_set(struct inode *inode, void *_nodeidp) { u64 nodeid = *(u64 *) _nodeidp; get_fuse_inode(inode)->nodeid = nodeid; return 0; } struct inode *fuse_iget(struct super_block *sb, u64 nodeid, int generation, struct fuse_attr *attr, u64 attr_valid, u64 attr_version, u64 evict_ctr) { struct inode *inode; struct fuse_inode *fi; struct fuse_conn *fc = get_fuse_conn_super(sb); /* * Auto mount points get their node id from the submount root, which is * not a unique identifier within this filesystem. * * To avoid conflicts, do not place submount points into the inode hash * table. */ if (fc->auto_submounts && (attr->flags & FUSE_ATTR_SUBMOUNT) && S_ISDIR(attr->mode)) { struct fuse_inode *fi; inode = new_inode(sb); if (!inode) return NULL; fuse_init_inode(inode, attr, fc); fi = get_fuse_inode(inode); fi->nodeid = nodeid; fi->submount_lookup = fuse_alloc_submount_lookup(); if (!fi->submount_lookup) { iput(inode); return NULL; } /* Sets nlookup = 1 on fi->submount_lookup->nlookup */ fuse_init_submount_lookup(fi->submount_lookup, nodeid); inode->i_flags |= S_AUTOMOUNT; goto done; } retry: inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid); if (!inode) return NULL; if ((inode_state_read_once(inode) & I_NEW)) { inode->i_flags |= S_NOATIME; if (!fc->writeback_cache || !S_ISREG(attr->mode)) inode->i_flags |= S_NOCMTIME; inode->i_generation = generation; fuse_init_inode(inode, attr, fc); unlock_new_inode(inode); } else if (fuse_stale_inode(inode, generation, attr)) { /* nodeid was reused, any I/O on the old inode should fail */ fuse_make_bad(inode); if (inode != d_inode(sb->s_root)) { remove_inode_hash(inode); iput(inode); goto retry; } } fi = get_fuse_inode(inode); spin_lock(&fi->lock); fi->nlookup++; spin_unlock(&fi->lock); done: fuse_change_attributes_i(inode, attr, NULL, attr_valid, attr_version, evict_ctr); return inode; } struct inode *fuse_ilookup(struct fuse_conn *fc, u64 nodeid, struct fuse_mount **fm) { struct fuse_mount *fm_iter; struct inode *inode; WARN_ON(!rwsem_is_locked(&fc->killsb)); list_for_each_entry(fm_iter, &fc->mounts, fc_entry) { if (!fm_iter->sb) continue; inode = ilookup5(fm_iter->sb, nodeid, fuse_inode_eq, &nodeid); if (inode) { if (fm) *fm = fm_iter; return inode; } } return NULL; } int fuse_reverse_inval_inode(struct fuse_conn *fc, u64 nodeid, loff_t offset, loff_t len) { struct fuse_inode *fi; struct inode *inode; pgoff_t pg_start; pgoff_t pg_end; inode = fuse_ilookup(fc, nodeid, NULL); if (!inode) return -ENOENT; fi = get_fuse_inode(inode); spin_lock(&fi->lock); fi->attr_version = atomic64_inc_return(&fc->attr_version); spin_unlock(&fi->lock); fuse_invalidate_attr(inode); forget_all_cached_acls(inode); if (offset >= 0) { pg_start = offset >> PAGE_SHIFT; if (len <= 0) pg_end = -1; else pg_end = (offset + len - 1) >> PAGE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } iput(inode); return 0; } void fuse_try_prune_one_inode(struct fuse_conn *fc, u64 nodeid) { struct inode *inode; inode = fuse_ilookup(fc, nodeid, NULL); if (!inode) return; d_prune_aliases(inode); iput(inode); } bool fuse_lock_inode(struct inode *inode) { bool locked = false; if (!get_fuse_conn(inode)->parallel_dirops) { mutex_lock(&get_fuse_inode(inode)->mutex); locked = true; } return locked; } void fuse_unlock_inode(struct inode *inode, bool locked) { if (locked) mutex_unlock(&get_fuse_inode(inode)->mutex); } static void fuse_umount_begin(struct super_block *sb) { struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc->no_force_umount) return; fuse_abort_conn(fc); // Only retire block-device-based superblocks. if (sb->s_bdev != NULL) retire_super(sb); } static void fuse_send_destroy(struct fuse_mount *fm) { if (fm->fc->conn_init) { FUSE_ARGS(args); args.opcode = FUSE_DESTROY; args.force = true; args.nocreds = true; fuse_simple_request(fm, &args); } } static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) { stbuf->f_type = FUSE_SUPER_MAGIC; stbuf->f_bsize = attr->bsize; stbuf->f_frsize = attr->frsize; stbuf->f_blocks = attr->blocks; stbuf->f_bfree = attr->bfree; stbuf->f_bavail = attr->bavail; stbuf->f_files = attr->files; stbuf->f_ffree = attr->ffree; stbuf->f_namelen = attr->namelen; /* fsid is left zero */ } static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct fuse_mount *fm = get_fuse_mount_super(sb); FUSE_ARGS(args); struct fuse_statfs_out outarg; int err; if (!fuse_allow_current_process(fm->fc)) { buf->f_type = FUSE_SUPER_MAGIC; return 0; } memset(&outarg, 0, sizeof(outarg)); args.in_numargs = 0; args.opcode = FUSE_STATFS; args.nodeid = get_node_id(d_inode(dentry)); args.out_numargs = 1; args.out_args[0].size = sizeof(outarg); args.out_args[0].value = &outarg; err = fuse_simple_request(fm, &args); if (!err) convert_fuse_statfs(buf, &outarg.st); return err; } static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void) { struct fuse_sync_bucket *bucket; bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL); if (bucket) { init_waitqueue_head(&bucket->waitq); /* Initial active count */ atomic_set(&bucket->count, 1); } return bucket; } static void fuse_sync_fs_writes(struct fuse_conn *fc) { struct fuse_sync_bucket *bucket, *new_bucket; int count; new_bucket = fuse_sync_bucket_alloc(); spin_lock(&fc->lock); bucket = rcu_dereference_protected(fc->curr_bucket, 1); count = atomic_read(&bucket->count); WARN_ON(count < 1); /* No outstanding writes? */ if (count == 1) { spin_unlock(&fc->lock); kfree(new_bucket); return; } /* * Completion of new bucket depends on completion of this bucket, so add * one more count. */ atomic_inc(&new_bucket->count); rcu_assign_pointer(fc->curr_bucket, new_bucket); spin_unlock(&fc->lock); /* * Drop initial active count. At this point if all writes in this and * ancestor buckets complete, the count will go to zero and this task * will be woken up. */ atomic_dec(&bucket->count); wait_event(bucket->waitq, atomic_read(&bucket->count) == 0); /* Drop temp count on descendant bucket */ fuse_sync_bucket_dec(new_bucket); kfree_rcu(bucket, rcu); } static int fuse_sync_fs(struct super_block *sb, int wait) { struct fuse_mount *fm = get_fuse_mount_super(sb); struct fuse_conn *fc = fm->fc; struct fuse_syncfs_in inarg; FUSE_ARGS(args); int err; /* * Userspace cannot handle the wait == 0 case. Avoid a * gratuitous roundtrip. */ if (!wait) return 0; /* The filesystem is being unmounted. Nothing to do. */ if (!sb->s_root) return 0; if (!fc->sync_fs) return 0; fuse_sync_fs_writes(fc); memset(&inarg, 0, sizeof(inarg)); args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.opcode = FUSE_SYNCFS; args.nodeid = get_node_id(sb->s_root->d_inode); args.out_numargs = 0; err = fuse_simple_request(fm, &args); if (err == -ENOSYS) { fc->sync_fs = 0; err = 0; } return err; } enum { OPT_SOURCE, OPT_SUBTYPE, OPT_FD, OPT_ROOTMODE, OPT_USER_ID, OPT_GROUP_ID, OPT_DEFAULT_PERMISSIONS, OPT_ALLOW_OTHER, OPT_MAX_READ, OPT_BLKSIZE, OPT_ERR }; static const struct fs_parameter_spec fuse_fs_parameters[] = { fsparam_string ("source", OPT_SOURCE), fsparam_u32 ("fd", OPT_FD), fsparam_u32oct ("rootmode", OPT_ROOTMODE), fsparam_uid ("user_id", OPT_USER_ID), fsparam_gid ("group_id", OPT_GROUP_ID), fsparam_flag ("default_permissions", OPT_DEFAULT_PERMISSIONS), fsparam_flag ("allow_other", OPT_ALLOW_OTHER), fsparam_u32 ("max_read", OPT_MAX_READ), fsparam_u32 ("blksize", OPT_BLKSIZE), fsparam_string ("subtype", OPT_SUBTYPE), {} }; static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param) { struct fs_parse_result result; struct fuse_fs_context *ctx = fsc->fs_private; int opt; kuid_t kuid; kgid_t kgid; if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { /* * Ignore options coming from mount(MS_REMOUNT) for backward * compatibility. */ if (fsc->oldapi) return 0; return invalfc(fsc, "No changes allowed in reconfigure"); } opt = fs_parse(fsc, fuse_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case OPT_SOURCE: if (fsc->source) return invalfc(fsc, "Multiple sources specified"); fsc->source = param->string; param->string = NULL; break; case OPT_SUBTYPE: if (ctx->subtype) return invalfc(fsc, "Multiple subtypes specified"); ctx->subtype = param->string; param->string = NULL; return 0; case OPT_FD: ctx->fd = result.uint_32; ctx->fd_present = true; break; case OPT_ROOTMODE: if (!fuse_valid_type(result.uint_32)) return invalfc(fsc, "Invalid rootmode"); ctx->rootmode = result.uint_32; ctx->rootmode_present = true; break; case OPT_USER_ID: kuid = result.uid; /* * The requested uid must be representable in the * filesystem's idmapping. */ if (!kuid_has_mapping(fsc->user_ns, kuid)) return invalfc(fsc, "Invalid user_id"); ctx->user_id = kuid; ctx->user_id_present = true; break; case OPT_GROUP_ID: kgid = result.gid; /* * The requested gid must be representable in the * filesystem's idmapping. */ if (!kgid_has_mapping(fsc->user_ns, kgid)) return invalfc(fsc, "Invalid group_id"); ctx->group_id = kgid; ctx->group_id_present = true; break; case OPT_DEFAULT_PERMISSIONS: ctx->default_permissions = true; break; case OPT_ALLOW_OTHER: ctx->allow_other = true; break; case OPT_MAX_READ: ctx->max_read = result.uint_32; break; case OPT_BLKSIZE: if (!ctx->is_bdev) return invalfc(fsc, "blksize only supported for fuseblk"); ctx->blksize = result.uint_32; break; default: return -EINVAL; } return 0; } static void fuse_free_fsc(struct fs_context *fsc) { struct fuse_fs_context *ctx = fsc->fs_private; if (ctx) { kfree(ctx->subtype); kfree(ctx); } } static int fuse_show_options(struct seq_file *m, struct dentry *root) { struct super_block *sb = root->d_sb; struct fuse_conn *fc = get_fuse_conn_super(sb); if (fc->legacy_opts_show) { seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id)); seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id)); if (fc->default_permissions) seq_puts(m, ",default_permissions"); if (fc->allow_other) seq_puts(m, ",allow_other"); if (fc->max_read != ~0) seq_printf(m, ",max_read=%u", fc->max_read); if (sb->s_bdev && sb->s_blocksize != FUSE_DEFAULT_BLKSIZE) seq_printf(m, ",blksize=%lu", sb->s_blocksize); } #ifdef CONFIG_FUSE_DAX if (fc->dax_mode == FUSE_DAX_ALWAYS) seq_puts(m, ",dax=always"); else if (fc->dax_mode == FUSE_DAX_NEVER) seq_puts(m, ",dax=never"); else if (fc->dax_mode == FUSE_DAX_INODE_USER) seq_puts(m, ",dax=inode"); #endif return 0; } static void fuse_iqueue_init(struct fuse_iqueue *fiq, const struct fuse_iqueue_ops *ops, void *priv) { memset(fiq, 0, sizeof(struct fuse_iqueue)); spin_lock_init(&fiq->lock); init_waitqueue_head(&fiq->waitq); INIT_LIST_HEAD(&fiq->pending); INIT_LIST_HEAD(&fiq->interrupts); fiq->forget_list_tail = &fiq->forget_list_head; fiq->connected = 1; fiq->ops = ops; fiq->priv = priv; } void fuse_pqueue_init(struct fuse_pqueue *fpq) { unsigned int i; spin_lock_init(&fpq->lock); for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) INIT_LIST_HEAD(&fpq->processing[i]); INIT_LIST_HEAD(&fpq->io); fpq->connected = 1; } void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, struct user_namespace *user_ns, const struct fuse_iqueue_ops *fiq_ops, void *fiq_priv) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); spin_lock_init(&fc->bg_lock); init_rwsem(&fc->killsb); refcount_set(&fc->count, 1); atomic_set(&fc->dev_count, 1); atomic_set(&fc->epoch, 1); INIT_WORK(&fc->epoch_work, fuse_epoch_work); init_waitqueue_head(&fc->blocked_waitq); fuse_iqueue_init(&fc->iq, fiq_ops, fiq_priv); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); INIT_LIST_HEAD(&fc->devices); atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; atomic64_set(&fc->khctr, 0); fc->polled_files = RB_ROOT; fc->blocked = 0; fc->initialized = 0; fc->connected = 1; atomic64_set(&fc->attr_version, 1); atomic64_set(&fc->evict_ctr, 1); get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); fc->user_ns = get_user_ns(user_ns); fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; fc->max_pages_limit = fuse_max_pages_limit; fc->name_max = FUSE_NAME_LOW_MAX; fc->timeout.req_timeout = 0; if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) fuse_backing_files_init(fc); INIT_LIST_HEAD(&fc->mounts); list_add(&fm->fc_entry, &fc->mounts); fm->fc = fc; } EXPORT_SYMBOL_GPL(fuse_conn_init); static void delayed_release(struct rcu_head *p) { struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu); fuse_uring_destruct(fc); put_user_ns(fc->user_ns); fc->release(fc); } void fuse_conn_put(struct fuse_conn *fc) { struct fuse_iqueue *fiq = &fc->iq; struct fuse_sync_bucket *bucket; if (!refcount_dec_and_test(&fc->count)) return; if (IS_ENABLED(CONFIG_FUSE_DAX)) fuse_dax_conn_free(fc); if (fc->timeout.req_timeout) cancel_delayed_work_sync(&fc->timeout.work); cancel_work_sync(&fc->epoch_work); if (fiq->ops->release) fiq->ops->release(fiq); put_pid_ns(fc->pid_ns); bucket = rcu_dereference_protected(fc->curr_bucket, 1); if (bucket) { WARN_ON(atomic_read(&bucket->count) != 1); kfree(bucket); } if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) fuse_backing_files_free(fc); call_rcu(&fc->rcu, delayed_release); } EXPORT_SYMBOL_GPL(fuse_conn_put); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) { refcount_inc(&fc->count); return fc; } EXPORT_SYMBOL_GPL(fuse_conn_get); static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned int mode) { struct fuse_attr attr; memset(&attr, 0, sizeof(attr)); attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; return fuse_iget(sb, FUSE_ROOT_ID, 0, &attr, 0, 0, 0); } struct fuse_inode_handle { u64 nodeid; u32 generation; }; static struct dentry *fuse_get_dentry(struct super_block *sb, struct fuse_inode_handle *handle) { struct fuse_conn *fc = get_fuse_conn_super(sb); struct inode *inode; struct dentry *entry; int err = -ESTALE; if (handle->nodeid == 0) goto out_err; inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid); if (!inode) { struct fuse_entry_out outarg; const struct qstr name = QSTR_INIT(".", 1); if (!fc->export_support) goto out_err; err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg, &inode); if (err && err != -ENOENT) goto out_err; if (err || !inode) { err = -ESTALE; goto out_err; } err = -EIO; if (get_node_id(inode) != handle->nodeid) goto out_iput; } err = -ESTALE; if (inode->i_generation != handle->generation) goto out_iput; entry = d_obtain_alias(inode); if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(entry); return entry; out_iput: iput(inode); out_err: return ERR_PTR(err); } static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len, struct inode *parent) { int len = parent ? 6 : 3; u64 nodeid; u32 generation; if (*max_len < len) { *max_len = len; return FILEID_INVALID; } nodeid = get_fuse_inode(inode)->nodeid; generation = inode->i_generation; fh[0] = (u32)(nodeid >> 32); fh[1] = (u32)(nodeid & 0xffffffff); fh[2] = generation; if (parent) { nodeid = get_fuse_inode(parent)->nodeid; generation = parent->i_generation; fh[3] = (u32)(nodeid >> 32); fh[4] = (u32)(nodeid & 0xffffffff); fh[5] = generation; } *max_len = len; return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN; } static struct dentry *fuse_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle handle; if ((fh_type != FILEID_INO64_GEN && fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3) return NULL; handle.nodeid = (u64) fid->raw[0] << 32; handle.nodeid |= (u64) fid->raw[1]; handle.generation = fid->raw[2]; return fuse_get_dentry(sb, &handle); } static struct dentry *fuse_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct fuse_inode_handle parent; if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6) return NULL; parent.nodeid = (u64) fid->raw[3] << 32; parent.nodeid |= (u64) fid->raw[4]; parent.generation = fid->raw[5]; return fuse_get_dentry(sb, &parent); } static struct dentry *fuse_get_parent(struct dentry *child) { struct inode *child_inode = d_inode(child); struct fuse_conn *fc = get_fuse_conn(child_inode); struct inode *inode; struct dentry *parent; struct fuse_entry_out outarg; int err; if (!fc->export_support) return ERR_PTR(-ESTALE); err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode), &dotdot_name, &outarg, &inode); if (err) { if (err == -ENOENT) return ERR_PTR(-ESTALE); return ERR_PTR(err); } parent = d_obtain_alias(inode); if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(parent); return parent; } /* only for fid encoding; no support for file handle */ static const struct export_operations fuse_export_fid_operations = { .encode_fh = fuse_encode_fh, }; static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, .encode_fh = fuse_encode_fh, .get_parent = fuse_get_parent, }; static const struct super_operations fuse_super_operations = { .alloc_inode = fuse_alloc_inode, .free_inode = fuse_free_inode, .evict_inode = fuse_evict_inode, .write_inode = fuse_write_inode, .drop_inode = inode_just_drop, .umount_begin = fuse_umount_begin, .statfs = fuse_statfs, .sync_fs = fuse_sync_fs, .show_options = fuse_show_options, }; static void sanitize_global_limit(unsigned int *limit) { /* * The default maximum number of async requests is calculated to consume * 1/2^13 of the total memory, assuming 392 bytes per request. */ if (*limit == 0) *limit = ((totalram_pages() << PAGE_SHIFT) >> 13) / 392; if (*limit >= 1 << 16) *limit = (1 << 16) - 1; } static int set_global_limit(const char *val, const struct kernel_param *kp) { int rv; rv = param_set_uint(val, kp); if (rv) return rv; sanitize_global_limit((unsigned int *)kp->arg); return 0; } static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) { int cap_sys_admin = capable(CAP_SYS_ADMIN); if (arg->minor < 13) return; sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); spin_lock(&fc->bg_lock); if (arg->max_background) { fc->max_background = arg->max_background; if (!cap_sys_admin && fc->max_background > max_user_bgreq) fc->max_background = max_user_bgreq; } if (arg->congestion_threshold) { fc->congestion_threshold = arg->congestion_threshold; if (!cap_sys_admin && fc->congestion_threshold > max_user_congthresh) fc->congestion_threshold = max_user_congthresh; } spin_unlock(&fc->bg_lock); } static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout) { fc->timeout.req_timeout = secs_to_jiffies(timeout); INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout); queue_delayed_work(system_percpu_wq, &fc->timeout.work, fuse_timeout_timer_freq); } static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout) { if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout) return; if (!timeout) timeout = fuse_default_req_timeout; if (fuse_max_req_timeout) { if (timeout) timeout = min(fuse_max_req_timeout, timeout); else timeout = fuse_max_req_timeout; } timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout); set_request_timeout(fc, timeout); } struct fuse_init_args { struct fuse_args args; struct fuse_init_in in; struct fuse_init_out out; }; static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, int error) { struct fuse_conn *fc = fm->fc; struct fuse_init_args *ia = container_of(args, typeof(*ia), args); struct fuse_init_out *arg = &ia->out; bool ok = true; if (error || arg->major != FUSE_KERNEL_VERSION) ok = false; else { unsigned long ra_pages; unsigned int timeout = 0; process_init_limits(fc, arg); if (arg->minor >= 6) { u64 flags = arg->flags; if (flags & FUSE_INIT_EXT) flags |= (u64) arg->flags2 << 32; ra_pages = arg->max_readahead / PAGE_SIZE; if (flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(flags & FUSE_POSIX_LOCKS)) fc->no_lock = 1; if (arg->minor >= 17) { if (!(flags & FUSE_FLOCK_LOCKS)) fc->no_flock = 1; } else { if (!(flags & FUSE_POSIX_LOCKS)) fc->no_flock = 1; } if (flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; if (arg->minor >= 9) { /* LOOKUP has dependency on proto version */ if (flags & FUSE_EXPORT_SUPPORT) fc->export_support = 1; } if (flags & FUSE_BIG_WRITES) fc->big_writes = 1; if (flags & FUSE_DONT_MASK) fc->dont_mask = 1; if (flags & FUSE_AUTO_INVAL_DATA) fc->auto_inval_data = 1; else if (flags & FUSE_EXPLICIT_INVAL_DATA) fc->explicit_inval_data = 1; if (flags & FUSE_DO_READDIRPLUS) { fc->do_readdirplus = 1; if (flags & FUSE_READDIRPLUS_AUTO) fc->readdirplus_auto = 1; } if (flags & FUSE_ASYNC_DIO) fc->async_dio = 1; if (flags & FUSE_WRITEBACK_CACHE) fc->writeback_cache = 1; if (flags & FUSE_PARALLEL_DIROPS) fc->parallel_dirops = 1; if (flags & FUSE_HANDLE_KILLPRIV) fc->handle_killpriv = 1; if (arg->time_gran && arg->time_gran <= 1000000000) fm->sb->s_time_gran = arg->time_gran; if ((flags & FUSE_POSIX_ACL)) { fc->default_permissions = 1; fc->posix_acl = 1; } if (flags & FUSE_CACHE_SYMLINKS) fc->cache_symlinks = 1; if (flags & FUSE_ABORT_ERROR) fc->abort_err = 1; if (flags & FUSE_MAX_PAGES) { fc->max_pages = min_t(unsigned int, fc->max_pages_limit, max_t(unsigned int, arg->max_pages, 1)); /* * PATH_MAX file names might need two pages for * ops like rename */ if (fc->max_pages > 1) fc->name_max = FUSE_NAME_MAX; } if (IS_ENABLED(CONFIG_FUSE_DAX)) { if (flags & FUSE_MAP_ALIGNMENT && !fuse_dax_check_alignment(fc, arg->map_alignment)) { ok = false; } if (flags & FUSE_HAS_INODE_DAX) fc->inode_dax = 1; } if (flags & FUSE_HANDLE_KILLPRIV_V2) { fc->handle_killpriv_v2 = 1; fm->sb->s_flags |= SB_NOSEC; } if (flags & FUSE_SETXATTR_EXT) fc->setxattr_ext = 1; if (flags & FUSE_SECURITY_CTX) fc->init_security = 1; if (flags & FUSE_CREATE_SUPP_GROUP) fc->create_supp_group = 1; if (flags & FUSE_DIRECT_IO_ALLOW_MMAP) fc->direct_io_allow_mmap = 1; /* * max_stack_depth is the max stack depth of FUSE fs, * so it has to be at least 1 to support passthrough * to backing files. * * with max_stack_depth > 1, the backing files can be * on a stacked fs (e.g. overlayfs) themselves and with * max_stack_depth == 1, FUSE fs can be stacked as the * underlying fs of a stacked fs (e.g. overlayfs). * * Also don't allow the combination of FUSE_PASSTHROUGH * and FUSE_WRITEBACK_CACHE, current design doesn't handle * them together. */ if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) && (flags & FUSE_PASSTHROUGH) && arg->max_stack_depth > 0 && arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH && !(flags & FUSE_WRITEBACK_CACHE)) { fc->passthrough = 1; fc->max_stack_depth = arg->max_stack_depth; fm->sb->s_stack_depth = arg->max_stack_depth; } if (flags & FUSE_NO_EXPORT_SUPPORT) fm->sb->s_export_op = &fuse_export_fid_operations; if (flags & FUSE_ALLOW_IDMAP) { if (fc->default_permissions) fm->sb->s_iflags &= ~SB_I_NOIDMAP; else ok = false; } if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled()) fc->io_uring = 1; if (flags & FUSE_REQUEST_TIMEOUT) timeout = arg->request_timeout; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; fc->no_flock = 1; } init_server_timeout(fc, timeout); fm->sb->s_bdi->ra_pages = min(fm->sb->s_bdi->ra_pages, ra_pages); fc->minor = arg->minor; fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; fc->max_write = max_t(unsigned, 4096, fc->max_write); fc->conn_init = 1; } kfree(ia); if (!ok) { fc->conn_init = 0; fc->conn_error = 1; } fuse_set_initialized(fc); wake_up_all(&fc->blocked_waitq); } static struct fuse_init_args *fuse_new_init(struct fuse_mount *fm) { struct fuse_init_args *ia; u64 flags; ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL); ia->in.major = FUSE_KERNEL_VERSION; ia->in.minor = FUSE_KERNEL_MINOR_VERSION; ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE; flags = FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL | FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS | FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP | FUSE_REQUEST_TIMEOUT; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; if (fuse_is_inode_dax_mode(fm->fc->dax_mode)) flags |= FUSE_HAS_INODE_DAX; #endif if (fm->fc->auto_submounts) flags |= FUSE_SUBMOUNTS; if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) flags |= FUSE_PASSTHROUGH; /* * This is just an information flag for fuse server. No need to check * the reply - server is either sending IORING_OP_URING_CMD or not. */ if (fuse_uring_enabled()) flags |= FUSE_OVER_IO_URING; ia->in.flags = flags; ia->in.flags2 = flags >> 32; ia->args.opcode = FUSE_INIT; ia->args.in_numargs = 1; ia->args.in_args[0].size = sizeof(ia->in); ia->args.in_args[0].value = &ia->in; ia->args.out_numargs = 1; /* Variable length argument used for backward compatibility with interface version < 7.5. Rest of init_out is zeroed by do_get_request(), so a short reply is not a problem */ ia->args.out_argvar = true; ia->args.out_args[0].size = sizeof(ia->out); ia->args.out_args[0].value = &ia->out; ia->args.force = true; ia->args.nocreds = true; return ia; } int fuse_send_init(struct fuse_mount *fm) { struct fuse_init_args *ia = fuse_new_init(fm); int err; if (fm->fc->sync_init) { err = fuse_simple_request(fm, &ia->args); /* Ignore size of init reply */ if (err > 0) err = 0; } else { ia->args.end = process_init_reply; err = fuse_simple_background(fm, &ia->args, GFP_KERNEL); if (!err) return 0; } process_init_reply(fm, &ia->args, err); if (fm->fc->conn_error) return -ENOTCONN; return 0; } EXPORT_SYMBOL_GPL(fuse_send_init); void fuse_free_conn(struct fuse_conn *fc) { WARN_ON(!list_empty(&fc->devices)); kfree(fc); } EXPORT_SYMBOL_GPL(fuse_free_conn); static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) { int err; char *suffix = ""; if (sb->s_bdev) { suffix = "-fuseblk"; /* * sb->s_bdi points to blkdev's bdi however we want to redirect * it to our private bdi... */ bdi_put(sb->s_bdi); sb->s_bdi = &noop_backing_dev_info; } err = super_setup_bdi_name(sb, "%u:%u%s", MAJOR(fc->dev), MINOR(fc->dev), suffix); if (err) return err; sb->s_bdi->capabilities |= BDI_CAP_STRICTLIMIT; /* * For a single fuse filesystem use max 1% of dirty + * writeback threshold. * * This gives about 1M of write buffer for memory maps on a * machine with 1G and 10% dirty_ratio, which should be more * than enough. * * Privileged users can raise it by writing to * * /sys/class/bdi/<bdi>/max_ratio */ bdi_set_max_ratio(sb->s_bdi, 1); return 0; } struct fuse_dev *fuse_dev_alloc(void) { struct fuse_dev *fud; struct list_head *pq; fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL); if (!fud) return NULL; pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); if (!pq) { kfree(fud); return NULL; } fud->pq.processing = pq; fuse_pqueue_init(&fud->pq); return fud; } EXPORT_SYMBOL_GPL(fuse_dev_alloc); void fuse_dev_install(struct fuse_dev *fud, struct fuse_conn *fc) { fud->fc = fuse_conn_get(fc); spin_lock(&fc->lock); list_add_tail(&fud->entry, &fc->devices); spin_unlock(&fc->lock); } EXPORT_SYMBOL_GPL(fuse_dev_install); struct fuse_dev *fuse_dev_alloc_install(struct fuse_conn *fc) { struct fuse_dev *fud; fud = fuse_dev_alloc(); if (!fud) return NULL; fuse_dev_install(fud, fc); return fud; } EXPORT_SYMBOL_GPL(fuse_dev_alloc_install); void fuse_dev_free(struct fuse_dev *fud) { struct fuse_conn *fc = fud->fc; if (fc) { spin_lock(&fc->lock); list_del(&fud->entry); spin_unlock(&fc->lock); fuse_conn_put(fc); } kfree(fud->pq.processing); kfree(fud); } EXPORT_SYMBOL_GPL(fuse_dev_free); static void fuse_fill_attr_from_inode(struct fuse_attr *attr, const struct fuse_inode *fi) { struct timespec64 atime = inode_get_atime(&fi->inode); struct timespec64 mtime = inode_get_mtime(&fi->inode); struct timespec64 ctime = inode_get_ctime(&fi->inode); *attr = (struct fuse_attr){ .ino = fi->inode.i_ino, .size = fi->inode.i_size, .blocks = fi->inode.i_blocks, .atime = atime.tv_sec, .mtime = mtime.tv_sec, .ctime = ctime.tv_sec, .atimensec = atime.tv_nsec, .mtimensec = mtime.tv_nsec, .ctimensec = ctime.tv_nsec, .mode = fi->inode.i_mode, .nlink = fi->inode.i_nlink, .uid = __kuid_val(fi->inode.i_uid), .gid = __kgid_val(fi->inode.i_gid), .rdev = fi->inode.i_rdev, .blksize = 1u << fi->inode.i_blkbits, }; } static void fuse_sb_defaults(struct super_block *sb) { sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; sb->s_xattr = fuse_xattr_handlers; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_gran = 1; sb->s_export_op = &fuse_export_operations; sb->s_iflags |= SB_I_IMA_UNVERIFIABLE_SIGNATURE; sb->s_iflags |= SB_I_NOIDMAP; if (sb->s_user_ns != &init_user_ns) sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER; sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION); } static int fuse_fill_super_submount(struct super_block *sb, struct fuse_inode *parent_fi) { struct fuse_mount *fm = get_fuse_mount_super(sb); struct super_block *parent_sb = parent_fi->inode.i_sb; struct fuse_attr root_attr; struct inode *root; struct fuse_submount_lookup *sl; struct fuse_inode *fi; fuse_sb_defaults(sb); fm->sb = sb; WARN_ON(sb->s_bdi != &noop_backing_dev_info); sb->s_bdi = bdi_get(parent_sb->s_bdi); sb->s_xattr = parent_sb->s_xattr; sb->s_export_op = parent_sb->s_export_op; sb->s_time_gran = parent_sb->s_time_gran; sb->s_blocksize = parent_sb->s_blocksize; sb->s_blocksize_bits = parent_sb->s_blocksize_bits; sb->s_subtype = kstrdup(parent_sb->s_subtype, GFP_KERNEL); if (parent_sb->s_subtype && !sb->s_subtype) return -ENOMEM; fuse_fill_attr_from_inode(&root_attr, parent_fi); root = fuse_iget(sb, parent_fi->nodeid, 0, &root_attr, 0, 0, fuse_get_evict_ctr(fm->fc)); /* * This inode is just a duplicate, so it is not looked up and * its nlookup should not be incremented. fuse_iget() does * that, though, so undo it here. */ fi = get_fuse_inode(root); fi->nlookup--; set_default_d_op(sb, &fuse_dentry_operations); sb->s_root = d_make_root(root); if (!sb->s_root) return -ENOMEM; /* * Grab the parent's submount_lookup pointer and take a * reference on the shared nlookup from the parent. This is to * prevent the last forget for this nodeid from getting * triggered until all users have finished with it. */ sl = parent_fi->submount_lookup; WARN_ON(!sl); if (sl) { refcount_inc(&sl->count); fi->submount_lookup = sl; } return 0; } /* Filesystem context private data holds the FUSE inode of the mount point */ static int fuse_get_tree_submount(struct fs_context *fsc) { struct fuse_mount *fm; struct fuse_inode *mp_fi = fsc->fs_private; struct fuse_conn *fc = get_fuse_conn(&mp_fi->inode); struct super_block *sb; int err; fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL); if (!fm) return -ENOMEM; fm->fc = fuse_conn_get(fc); fsc->s_fs_info = fm; sb = sget_fc(fsc, NULL, set_anon_super_fc); if (fsc->s_fs_info) fuse_mount_destroy(fm); if (IS_ERR(sb)) return PTR_ERR(sb); /* Initialize superblock, making @mp_fi its root */ err = fuse_fill_super_submount(sb, mp_fi); if (err) { deactivate_locked_super(sb); return err; } down_write(&fc->killsb); list_add_tail(&fm->fc_entry, &fc->mounts); up_write(&fc->killsb); sb->s_flags |= SB_ACTIVE; fsc->root = dget(sb->s_root); return 0; } static const struct fs_context_operations fuse_context_submount_ops = { .get_tree = fuse_get_tree_submount, }; int fuse_init_fs_context_submount(struct fs_context *fsc) { fsc->ops = &fuse_context_submount_ops; return 0; } EXPORT_SYMBOL_GPL(fuse_init_fs_context_submount); int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx) { struct fuse_dev *fud = NULL; struct fuse_mount *fm = get_fuse_mount_super(sb); struct fuse_conn *fc = fm->fc; struct inode *root; struct dentry *root_dentry; int err; err = -EINVAL; if (sb->s_flags & SB_MANDLOCK) goto err; rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc()); fuse_sb_defaults(sb); if (ctx->is_bdev) { #ifdef CONFIG_BLOCK err = -EINVAL; if (!sb_set_blocksize(sb, ctx->blksize)) goto err; #endif fc->sync_fs = 1; } else { sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; } sb->s_subtype = ctx->subtype; ctx->subtype = NULL; if (IS_ENABLED(CONFIG_FUSE_DAX)) { err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev); if (err) goto err; } if (ctx->fudptr) { err = -ENOMEM; fud = fuse_dev_alloc_install(fc); if (!fud) goto err_free_dax; } fc->dev = sb->s_dev; fm->sb = sb; err = fuse_bdi_init(fc, sb); if (err) goto err_dev_free; /* Handle umasking inside the fuse code */ if (sb->s_flags & SB_POSIXACL) fc->dont_mask = 1; sb->s_flags |= SB_POSIXACL; fc->default_permissions = ctx->default_permissions; fc->allow_other = ctx->allow_other; fc->user_id = ctx->user_id; fc->group_id = ctx->group_id; fc->legacy_opts_show = ctx->legacy_opts_show; fc->max_read = max_t(unsigned int, 4096, ctx->max_read); fc->destroy = ctx->destroy; fc->no_control = ctx->no_control; fc->no_force_umount = ctx->no_force_umount; err = -ENOMEM; root = fuse_get_root_inode(sb, ctx->rootmode); set_default_d_op(sb, &fuse_dentry_operations); root_dentry = d_make_root(root); if (!root_dentry) goto err_dev_free; mutex_lock(&fuse_mutex); err = -EINVAL; if (ctx->fudptr && *ctx->fudptr) { if (*ctx->fudptr == FUSE_DEV_SYNC_INIT) fc->sync_init = 1; else goto err_unlock; } err = fuse_ctl_add_conn(fc); if (err) goto err_unlock; list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; if (ctx->fudptr) { *ctx->fudptr = fud; wake_up_all(&fuse_dev_waitq); } mutex_unlock(&fuse_mutex); return 0; err_unlock: mutex_unlock(&fuse_mutex); dput(root_dentry); err_dev_free: if (fud) fuse_dev_free(fud); err_free_dax: if (IS_ENABLED(CONFIG_FUSE_DAX)) fuse_dax_conn_free(fc); err: return err; } EXPORT_SYMBOL_GPL(fuse_fill_super_common); static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) { struct fuse_fs_context *ctx = fsc->fs_private; struct fuse_mount *fm; int err; if (!ctx->file || !ctx->rootmode_present || !ctx->user_id_present || !ctx->group_id_present) return -EINVAL; /* * Require mount to happen from the same user namespace which * opened /dev/fuse to prevent potential attacks. */ if ((ctx->file->f_op != &fuse_dev_operations) || (ctx->file->f_cred->user_ns != sb->s_user_ns)) return -EINVAL; ctx->fudptr = &ctx->file->private_data; err = fuse_fill_super_common(sb, ctx); if (err) return err; /* file->private_data shall be visible on all CPUs after this */ smp_mb(); fm = get_fuse_mount_super(sb); return fuse_send_init(fm); } /* * This is the path where user supplied an already initialized fuse dev. In * this case never create a new super if the old one is gone. */ static int fuse_set_no_super(struct super_block *sb, struct fs_context *fsc) { return -ENOTCONN; } static int fuse_test_super(struct super_block *sb, struct fs_context *fsc) { return fsc->sget_key == get_fuse_conn_super(sb); } static int fuse_get_tree(struct fs_context *fsc) { struct fuse_fs_context *ctx = fsc->fs_private; struct fuse_dev *fud; struct fuse_conn *fc; struct fuse_mount *fm; struct super_block *sb; int err; fc = kmalloc(sizeof(*fc), GFP_KERNEL); if (!fc) return -ENOMEM; fm = kzalloc(sizeof(*fm), GFP_KERNEL); if (!fm) { kfree(fc); return -ENOMEM; } fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL); fc->release = fuse_free_conn; fsc->s_fs_info = fm; if (ctx->fd_present) ctx->file = fget(ctx->fd); if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) { err = get_tree_bdev(fsc, fuse_fill_super); goto out; } /* * While block dev mount can be initialized with a dummy device fd * (found by device name), normal fuse mounts can't */ err = -EINVAL; if (!ctx->file) goto out; /* * Allow creating a fuse mount with an already initialized fuse * connection */ fud = __fuse_get_dev(ctx->file); if (ctx->file->f_op == &fuse_dev_operations && fud) { fsc->sget_key = fud->fc; sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); err = PTR_ERR_OR_ZERO(sb); if (!IS_ERR(sb)) fsc->root = dget(sb->s_root); } else { err = get_tree_nodev(fsc, fuse_fill_super); } out: if (fsc->s_fs_info) fuse_mount_destroy(fm); if (ctx->file) fput(ctx->file); return err; } static const struct fs_context_operations fuse_context_ops = { .free = fuse_free_fsc, .parse_param = fuse_parse_param, .reconfigure = fuse_reconfigure, .get_tree = fuse_get_tree, }; /* * Set up the filesystem mount context. */ static int fuse_init_fs_context(struct fs_context *fsc) { struct fuse_fs_context *ctx; ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->max_read = ~0; ctx->blksize = FUSE_DEFAULT_BLKSIZE; ctx->legacy_opts_show = true; #ifdef CONFIG_BLOCK if (fsc->fs_type == &fuseblk_fs_type) { ctx->is_bdev = true; ctx->destroy = true; } #endif fsc->fs_private = ctx; fsc->ops = &fuse_context_ops; return 0; } bool fuse_mount_remove(struct fuse_mount *fm) { struct fuse_conn *fc = fm->fc; bool last = false; down_write(&fc->killsb); list_del_init(&fm->fc_entry); if (list_empty(&fc->mounts)) last = true; up_write(&fc->killsb); return last; } EXPORT_SYMBOL_GPL(fuse_mount_remove); void fuse_conn_destroy(struct fuse_mount *fm) { struct fuse_conn *fc = fm->fc; if (fc->destroy) fuse_send_destroy(fm); fuse_abort_conn(fc); fuse_wait_aborted(fc); if (!list_empty(&fc->entry)) { mutex_lock(&fuse_mutex); list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); } } EXPORT_SYMBOL_GPL(fuse_conn_destroy); static void fuse_sb_destroy(struct super_block *sb) { struct fuse_mount *fm = get_fuse_mount_super(sb); bool last; if (sb->s_root) { last = fuse_mount_remove(fm); if (last) fuse_conn_destroy(fm); } } void fuse_mount_destroy(struct fuse_mount *fm) { fuse_conn_put(fm->fc); kfree_rcu(fm, rcu); } EXPORT_SYMBOL(fuse_mount_destroy); static void fuse_kill_sb_anon(struct super_block *sb) { fuse_sb_destroy(sb); kill_anon_super(sb); fuse_mount_destroy(get_fuse_mount_super(sb)); } static struct file_system_type fuse_fs_type = { .owner = THIS_MODULE, .name = "fuse", .fs_flags = FS_HAS_SUBTYPE | FS_USERNS_MOUNT | FS_ALLOW_IDMAP, .init_fs_context = fuse_init_fs_context, .parameters = fuse_fs_parameters, .kill_sb = fuse_kill_sb_anon, }; MODULE_ALIAS_FS("fuse"); #ifdef CONFIG_BLOCK static void fuse_kill_sb_blk(struct super_block *sb) { fuse_sb_destroy(sb); kill_block_super(sb); fuse_mount_destroy(get_fuse_mount_super(sb)); } static struct file_system_type fuseblk_fs_type = { .owner = THIS_MODULE, .name = "fuseblk", .init_fs_context = fuse_init_fs_context, .parameters = fuse_fs_parameters, .kill_sb = fuse_kill_sb_blk, .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE | FS_ALLOW_IDMAP, }; MODULE_ALIAS_FS("fuseblk"); static inline int register_fuseblk(void) { return register_filesystem(&fuseblk_fs_type); } static inline void unregister_fuseblk(void) { unregister_filesystem(&fuseblk_fs_type); } #else static inline int register_fuseblk(void) { return 0; } static inline void unregister_fuseblk(void) { } #endif static void fuse_inode_init_once(void *foo) { struct inode *inode = foo; inode_init_once(inode); } static int __init fuse_fs_init(void) { int err; fuse_inode_cachep = kmem_cache_create("fuse_inode", sizeof(struct fuse_inode), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT, fuse_inode_init_once); err = -ENOMEM; if (!fuse_inode_cachep) goto out; err = register_fuseblk(); if (err) goto out2; err = register_filesystem(&fuse_fs_type); if (err) goto out3; err = fuse_sysctl_register(); if (err) goto out4; return 0; out4: unregister_filesystem(&fuse_fs_type); out3: unregister_fuseblk(); out2: kmem_cache_destroy(fuse_inode_cachep); out: return err; } static void fuse_fs_cleanup(void) { fuse_sysctl_unregister(); unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(fuse_inode_cachep); } static struct kobject *fuse_kobj; static int fuse_sysfs_init(void) { int err; fuse_kobj = kobject_create_and_add("fuse", fs_kobj); if (!fuse_kobj) { err = -ENOMEM; goto out_err; } err = sysfs_create_mount_point(fuse_kobj, "connections"); if (err) goto out_fuse_unregister; return 0; out_fuse_unregister: kobject_put(fuse_kobj); out_err: return err; } static void fuse_sysfs_cleanup(void) { sysfs_remove_mount_point(fuse_kobj, "connections"); kobject_put(fuse_kobj); } static int __init fuse_init(void) { int res; pr_info("init (API version %i.%i)\n", FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); INIT_LIST_HEAD(&fuse_conn_list); res = fuse_fs_init(); if (res) goto err; res = fuse_dev_init(); if (res) goto err_fs_cleanup; res = fuse_sysfs_init(); if (res) goto err_dev_cleanup; res = fuse_ctl_init(); if (res) goto err_sysfs_cleanup; fuse_dentry_tree_init(); sanitize_global_limit(&max_user_bgreq); sanitize_global_limit(&max_user_congthresh); return 0; err_sysfs_cleanup: fuse_sysfs_cleanup(); err_dev_cleanup: fuse_dev_cleanup(); err_fs_cleanup: fuse_fs_cleanup(); err: return res; } static void __exit fuse_exit(void) { pr_debug("exit\n"); fuse_dentry_tree_cleanup(); fuse_ctl_cleanup(); fuse_sysfs_cleanup(); fuse_fs_cleanup(); fuse_dev_cleanup(); } module_init(fuse_init); module_exit(fuse_exit);
1 1 2 2 2 2 2 1 1 2 2 2 2 2 2 2 1 2 2 2 2 2 1 2 2 1 1 1 1 1 2 2 2 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 // SPDX-License-Identifier: GPL-2.0+ /* * linux/fs/jbd2/checkpoint.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1999 Red Hat Software --- All Rights Reserved * * Checkpoint routines for the generic filesystem journaling code. * Part of the ext2fs journaling system. * * Checkpointing is the process of ensuring that a section of the log is * committed fully to disk, so that that portion of the log can be * reused. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <trace/events/jbd2.h> /* * Unlink a buffer from a transaction checkpoint list. * * Called with j_list_lock held. */ static inline void __buffer_unlink(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; jh->b_cpnext->b_cpprev = jh->b_cpprev; jh->b_cpprev->b_cpnext = jh->b_cpnext; if (transaction->t_checkpoint_list == jh) { transaction->t_checkpoint_list = jh->b_cpnext; if (transaction->t_checkpoint_list == jh) transaction->t_checkpoint_list = NULL; } } /* * __jbd2_log_wait_for_space: wait until there is space in the journal. * * Called under j-state_lock *only*. It will be unlocked if we have to wait * for a checkpoint to free up some space in the log. */ void __jbd2_log_wait_for_space(journal_t *journal) __acquires(&journal->j_state_lock) __releases(&journal->j_state_lock) { int nblocks, space_left; /* assert_spin_locked(&journal->j_state_lock); */ nblocks = journal->j_max_transaction_buffers; while (jbd2_log_space_left(journal) < nblocks) { write_unlock(&journal->j_state_lock); mutex_lock_io(&journal->j_checkpoint_mutex); /* * Test again, another process may have checkpointed while we * were waiting for the checkpoint lock. If there are no * transactions ready to be checkpointed, try to recover * journal space by calling cleanup_journal_tail(), and if * that doesn't work, by waiting for the currently committing * transaction to complete. If there is absolutely no way * to make progress, this is either a BUG or corrupted * filesystem, so abort the journal and leave a stack * trace for forensic evidence. */ write_lock(&journal->j_state_lock); if (journal->j_flags & JBD2_ABORT) { mutex_unlock(&journal->j_checkpoint_mutex); return; } spin_lock(&journal->j_list_lock); space_left = jbd2_log_space_left(journal); if (space_left < nblocks) { int chkpt = journal->j_checkpoint_transactions != NULL; tid_t tid = 0; bool has_transaction = false; if (journal->j_committing_transaction) { tid = journal->j_committing_transaction->t_tid; has_transaction = true; } spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { jbd2_log_do_checkpoint(journal); } else if (jbd2_cleanup_journal_tail(journal) <= 0) { /* * We were able to recover space or the * journal was aborted due to an error. */ ; } else if (has_transaction) { /* * jbd2_journal_commit_transaction() may want * to take the checkpoint_mutex if JBD2_FLUSHED * is set. So we need to temporarily drop it. */ mutex_unlock(&journal->j_checkpoint_mutex); jbd2_log_wait_commit(journal, tid); write_lock(&journal->j_state_lock); continue; } else { printk(KERN_ERR "%s: needed %d blocks and " "only had %d space available\n", __func__, nblocks, space_left); printk(KERN_ERR "%s: no way to get more " "journal space in %s\n", __func__, journal->j_devname); WARN_ON(1); jbd2_journal_abort(journal, -ENOSPC); } write_lock(&journal->j_state_lock); } else { spin_unlock(&journal->j_list_lock); } mutex_unlock(&journal->j_checkpoint_mutex); } } static void __flush_batch(journal_t *journal, int *batch_count) { int i; struct blk_plug plug; blk_start_plug(&plug); for (i = 0; i < *batch_count; i++) write_dirty_buffer(journal->j_chkpt_bhs[i], JBD2_JOURNAL_REQ_FLAGS); blk_finish_plug(&plug); for (i = 0; i < *batch_count; i++) { struct buffer_head *bh = journal->j_chkpt_bhs[i]; BUFFER_TRACE(bh, "brelse"); __brelse(bh); journal->j_chkpt_bhs[i] = NULL; } *batch_count = 0; } /* * Perform an actual checkpoint. We take the first transaction on the * list of transactions to be checkpointed and send all its buffers * to disk. We submit larger chunks of data at once. * * The journal should be locked before calling this function. * Called with j_checkpoint_mutex held. */ int jbd2_log_do_checkpoint(journal_t *journal) { struct journal_head *jh; struct buffer_head *bh; transaction_t *transaction; tid_t this_tid; int result, batch_count = 0; jbd2_debug(1, "Start checkpoint\n"); /* * First thing: if there are any transactions in the log which * don't need checkpointing, just eliminate them from the * journal straight away. */ result = jbd2_cleanup_journal_tail(journal); trace_jbd2_checkpoint(journal, result); jbd2_debug(1, "cleanup_journal_tail returned %d\n", result); if (result <= 0) return result; /* * OK, we need to start writing disk blocks. Take one transaction * and write it. */ spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) goto out; transaction = journal->j_checkpoint_transactions; if (transaction->t_chp_stats.cs_chp_time == 0) transaction->t_chp_stats.cs_chp_time = jiffies; this_tid = transaction->t_tid; restart: /* * If someone cleaned up this transaction while we slept, we're * done (maybe it's a new transaction, but it fell at the same * address). */ if (journal->j_checkpoint_transactions != transaction || transaction->t_tid != this_tid) goto out; /* checkpoint all of the transaction's buffers */ while (transaction->t_checkpoint_list) { jh = transaction->t_checkpoint_list; bh = jh2bh(jh); if (jh->b_transaction != NULL) { transaction_t *t = jh->b_transaction; tid_t tid = t->t_tid; transaction->t_chp_stats.cs_forced_to_close++; spin_unlock(&journal->j_list_lock); if (unlikely(journal->j_flags & JBD2_UNMOUNT)) /* * The journal thread is dead; so * starting and waiting for a commit * to finish will cause us to wait for * a _very_ long time. */ printk(KERN_ERR "JBD2: %s: Waiting for Godot: block %llu\n", journal->j_devname, (unsigned long long) bh->b_blocknr); if (batch_count) __flush_batch(journal, &batch_count); jbd2_log_start_commit(journal, tid); /* * jbd2_journal_commit_transaction() may want * to take the checkpoint_mutex if JBD2_FLUSHED * is set, jbd2_update_log_tail() called by * jbd2_journal_commit_transaction() may also take * checkpoint_mutex. So we need to temporarily * drop it. */ mutex_unlock(&journal->j_checkpoint_mutex); jbd2_log_wait_commit(journal, tid); mutex_lock_io(&journal->j_checkpoint_mutex); spin_lock(&journal->j_list_lock); goto restart; } if (!trylock_buffer(bh)) { /* * The buffer is locked, it may be writing back, or * flushing out in the last couple of cycles, or * re-adding into a new transaction, need to check * it again until it's unlocked. */ get_bh(bh); spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); /* the journal_head may have gone by now */ BUFFER_TRACE(bh, "brelse"); __brelse(bh); goto retry; } else if (!buffer_dirty(bh)) { unlock_buffer(bh); BUFFER_TRACE(bh, "remove from checkpoint"); /* * If the transaction was released or the checkpoint * list was empty, we're done. */ if (__jbd2_journal_remove_checkpoint(jh) || !transaction->t_checkpoint_list) goto out; } else { unlock_buffer(bh); /* * We are about to write the buffer, it could be * raced by some other transaction shrink or buffer * re-log logic once we release the j_list_lock, * leave it on the checkpoint list and check status * again to make sure it's clean. */ BUFFER_TRACE(bh, "queue"); get_bh(bh); J_ASSERT_BH(bh, !buffer_jwrite(bh)); journal->j_chkpt_bhs[batch_count++] = bh; transaction->t_chp_stats.cs_written++; transaction->t_checkpoint_list = jh->b_cpnext; } if ((batch_count == JBD2_NR_BATCH) || need_resched() || spin_needbreak(&journal->j_list_lock) || jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0]) goto unlock_and_flush; } if (batch_count) { unlock_and_flush: spin_unlock(&journal->j_list_lock); retry: if (batch_count) __flush_batch(journal, &batch_count); cond_resched(); spin_lock(&journal->j_list_lock); goto restart; } out: spin_unlock(&journal->j_list_lock); result = jbd2_cleanup_journal_tail(journal); return (result < 0) ? result : 0; } /* * Check the list of checkpoint transactions for the journal to see if * we have already got rid of any since the last update of the log tail * in the journal superblock. If so, we can instantly roll the * superblock forward to remove those transactions from the log. * * Return <0 on error, 0 on success, 1 if there was nothing to clean up. * * Called with the journal lock held. * * This is the only part of the journaling code which really needs to be * aware of transaction aborts. Checkpointing involves writing to the * main filesystem area rather than to the journal, so it can proceed * even in abort state, but we must not update the super block if * checkpointing may have failed. Otherwise, we would lose some metadata * buffers which should be written-back to the filesystem. */ int jbd2_cleanup_journal_tail(journal_t *journal) { tid_t first_tid; unsigned long blocknr; if (is_journal_aborted(journal)) return -EIO; if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr)) return 1; J_ASSERT(blocknr != 0); /* * We need to make sure that any blocks that were recently written out * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before * we drop the transactions from the journal. It's unlikely this will * be necessary, especially with an appropriately sized journal, but we * need this to guarantee correctness. Fortunately * jbd2_cleanup_journal_tail() doesn't get called all that often. */ if (journal->j_flags & JBD2_BARRIER) blkdev_issue_flush(journal->j_fs_dev); return __jbd2_update_log_tail(journal, first_tid, blocknr); } /* Checkpoint list management */ /* * journal_shrink_one_cp_list * * Find all the written-back checkpoint buffers in the given list * and try to release them. If the whole transaction is released, set * the 'released' parameter. Return the number of released checkpointed * buffers. * * Called with j_list_lock held. */ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, enum jbd2_shrink_type type, bool *released) { struct journal_head *last_jh; struct journal_head *next_jh = jh; unsigned long nr_freed = 0; int ret; *released = false; if (!jh) return 0; last_jh = jh->b_cpprev; do { jh = next_jh; next_jh = jh->b_cpnext; if (type == JBD2_SHRINK_DESTROY) { ret = __jbd2_journal_remove_checkpoint(jh); } else { ret = jbd2_journal_try_remove_checkpoint(jh); if (ret < 0) { if (type == JBD2_SHRINK_BUSY_SKIP) continue; break; } } nr_freed++; if (ret) { *released = true; break; } if (need_resched()) break; } while (jh != last_jh); return nr_freed; } /* * jbd2_journal_shrink_checkpoint_list * * Find 'nr_to_scan' written-back checkpoint buffers in the journal * and try to release them. Return the number of released checkpointed * buffers. * * Called with j_list_lock held. */ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan) { transaction_t *transaction, *last_transaction, *next_transaction; bool __maybe_unused released; tid_t first_tid = 0, last_tid = 0, next_tid = 0; tid_t tid = 0; unsigned long nr_freed = 0; unsigned long freed; bool first_set = false; again: spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) { spin_unlock(&journal->j_list_lock); goto out; } /* * Get next shrink transaction, resume previous scan or start * over again. If some others do checkpoint and drop transaction * from the checkpoint list, we ignore saved j_shrink_transaction * and start over unconditionally. */ if (journal->j_shrink_transaction) transaction = journal->j_shrink_transaction; else transaction = journal->j_checkpoint_transactions; if (!first_set) { first_tid = transaction->t_tid; first_set = true; } last_transaction = journal->j_checkpoint_transactions->t_cpprev; next_transaction = transaction; last_tid = last_transaction->t_tid; do { transaction = next_transaction; next_transaction = transaction->t_cpnext; tid = transaction->t_tid; freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, JBD2_SHRINK_BUSY_SKIP, &released); nr_freed += freed; (*nr_to_scan) -= min(*nr_to_scan, freed); if (*nr_to_scan == 0) break; if (need_resched() || spin_needbreak(&journal->j_list_lock)) break; } while (transaction != last_transaction); if (transaction != last_transaction) { journal->j_shrink_transaction = next_transaction; next_tid = next_transaction->t_tid; } else { journal->j_shrink_transaction = NULL; next_tid = 0; } spin_unlock(&journal->j_list_lock); cond_resched(); if (*nr_to_scan && journal->j_shrink_transaction) goto again; out: trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid, nr_freed, next_tid); return nr_freed; } /* * journal_clean_checkpoint_list * * Find all the written-back checkpoint buffers in the journal and release them. * If 'type' is JBD2_SHRINK_DESTROY, release all buffers unconditionally. If * 'type' is JBD2_SHRINK_BUSY_STOP, will stop release buffers if encounters a * busy buffer. To avoid wasting CPU cycles scanning the buffer list in some * cases, don't pass JBD2_SHRINK_BUSY_SKIP 'type' for this function. * * Called with j_list_lock held. */ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, enum jbd2_shrink_type type) { transaction_t *transaction, *last_transaction, *next_transaction; bool released; WARN_ON_ONCE(type == JBD2_SHRINK_BUSY_SKIP); transaction = journal->j_checkpoint_transactions; if (!transaction) return; last_transaction = transaction->t_cpprev; next_transaction = transaction; do { transaction = next_transaction; next_transaction = transaction->t_cpnext; journal_shrink_one_cp_list(transaction->t_checkpoint_list, type, &released); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if * preemption requested: */ if (need_resched()) return; /* * Stop scanning if we couldn't free the transaction. This * avoids pointless scanning of transactions which still * weren't checkpointed. */ if (!released) return; } while (transaction != last_transaction); } /* * Remove buffers from all checkpoint lists as journal is aborted and we just * need to free memory */ void jbd2_journal_destroy_checkpoint(journal_t *journal) { /* * We loop because __jbd2_journal_clean_checkpoint_list() may abort * early due to a need of rescheduling. */ while (1) { spin_lock(&journal->j_list_lock); if (!journal->j_checkpoint_transactions) { spin_unlock(&journal->j_list_lock); break; } __jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_DESTROY); spin_unlock(&journal->j_list_lock); cond_resched(); } } /* * journal_remove_checkpoint: called after a buffer has been committed * to disk (either by being write-back flushed to disk, or being * committed to the log). * * We cannot safely clean a transaction out of the log until all of the * buffer updates committed in that transaction have safely been stored * elsewhere on disk. To achieve this, all of the buffers in a * transaction need to be maintained on the transaction's checkpoint * lists until they have been rewritten, at which point this function is * called to remove the buffer from the existing transaction's * checkpoint lists. * * The function returns 1 if it frees the transaction, 0 otherwise. * The function can free jh and bh. * * This function is called with j_list_lock held. */ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) { struct transaction_chp_stats_s *stats; transaction_t *transaction; journal_t *journal; JBUFFER_TRACE(jh, "entry"); transaction = jh->b_cp_transaction; if (!transaction) { JBUFFER_TRACE(jh, "not on transaction"); return 0; } journal = transaction->t_journal; JBUFFER_TRACE(jh, "removing from transaction"); __buffer_unlink(jh); jh->b_cp_transaction = NULL; percpu_counter_dec(&journal->j_checkpoint_jh_count); jbd2_journal_put_journal_head(jh); /* Is this transaction empty? */ if (transaction->t_checkpoint_list) return 0; /* * There is one special case to worry about: if we have just pulled the * buffer off a running or committing transaction's checkpoing list, * then even if the checkpoint list is empty, the transaction obviously * cannot be dropped! * * The locking here around t_state is a bit sleazy. * See the comment at the end of jbd2_journal_commit_transaction(). */ if (transaction->t_state != T_FINISHED) return 0; /* * OK, that was the last buffer for the transaction, we can now * safely remove this transaction from the log. */ stats = &transaction->t_chp_stats; if (stats->cs_chp_time) stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time, jiffies); trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev, transaction->t_tid, stats); __jbd2_journal_drop_transaction(journal, transaction); jbd2_journal_free_transaction(transaction); return 1; } /* * Check the checkpoint buffer and try to remove it from the checkpoint * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if * it frees the transaction, 0 otherwise. * * This function is called with j_list_lock held. */ int jbd2_journal_try_remove_checkpoint(struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); if (jh->b_transaction) return -EBUSY; if (!trylock_buffer(bh)) return -EBUSY; if (buffer_dirty(bh)) { unlock_buffer(bh); return -EBUSY; } unlock_buffer(bh); /* * Buffer is clean and the IO has finished (we held the buffer * lock) so the checkpoint is done. We can safely remove the * buffer from this transaction. */ JBUFFER_TRACE(jh, "remove from checkpoint list"); return __jbd2_journal_remove_checkpoint(jh); } /* * journal_insert_checkpoint: put a committed buffer onto a checkpoint * list so that we know when it is safe to clean the transaction out of * the log. * * Called with the journal locked. * Called with j_list_lock held. */ void __jbd2_journal_insert_checkpoint(struct journal_head *jh, transaction_t *transaction) { JBUFFER_TRACE(jh, "entry"); J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); /* Get reference for checkpointing transaction */ jbd2_journal_grab_journal_head(jh2bh(jh)); jh->b_cp_transaction = transaction; if (!transaction->t_checkpoint_list) { jh->b_cpnext = jh->b_cpprev = jh; } else { jh->b_cpnext = transaction->t_checkpoint_list; jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev; jh->b_cpprev->b_cpnext = jh; jh->b_cpnext->b_cpprev = jh; } transaction->t_checkpoint_list = jh; percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count); } /* * We've finished with this transaction structure: adios... * * The transaction must have no links except for the checkpoint by this * point. * * Called with the journal locked. * Called with j_list_lock held. */ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction) { assert_spin_locked(&journal->j_list_lock); journal->j_shrink_transaction = NULL; if (transaction->t_cpnext) { transaction->t_cpnext->t_cpprev = transaction->t_cpprev; transaction->t_cpprev->t_cpnext = transaction->t_cpnext; if (journal->j_checkpoint_transactions == transaction) journal->j_checkpoint_transactions = transaction->t_cpnext; if (journal->j_checkpoint_transactions == transaction) journal->j_checkpoint_transactions = NULL; } J_ASSERT(transaction->t_state == T_FINISHED); J_ASSERT(transaction->t_buffers == NULL); J_ASSERT(transaction->t_forget == NULL); J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); trace_jbd2_drop_transaction(journal, transaction); jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid); }
2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 // SPDX-License-Identifier: GPL-2.0 #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_ipv4.h> #include <net/netfilter/nf_tables_ipv6.h> #include <net/route.h> #include <net/ip.h> #ifdef CONFIG_NF_TABLES_IPV4 static unsigned int nf_route_table_hook4(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { const struct iphdr *iph; struct nft_pktinfo pkt; __be32 saddr, daddr; unsigned int ret; u32 mark; int err; u8 tos; nft_set_pktinfo(&pkt, skb, state); nft_set_pktinfo_ipv4(&pkt); mark = skb->mark; iph = ip_hdr(skb); saddr = iph->saddr; daddr = iph->daddr; tos = iph->tos; ret = nft_do_chain(&pkt, priv); if (ret == NF_ACCEPT) { iph = ip_hdr(skb); if (iph->saddr != saddr || iph->daddr != daddr || skb->mark != mark || iph->tos != tos) { err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } } return ret; } static const struct nft_chain_type nft_chain_route_ipv4 = { .name = "route", .type = NFT_CHAIN_T_ROUTE, .family = NFPROTO_IPV4, .hook_mask = (1 << NF_INET_LOCAL_OUT), .hooks = { [NF_INET_LOCAL_OUT] = nf_route_table_hook4, }, }; #endif #ifdef CONFIG_NF_TABLES_IPV6 static unsigned int nf_route_table_hook6(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct in6_addr saddr, daddr; struct nft_pktinfo pkt; u32 mark, flowlabel; unsigned int ret; u8 hop_limit; int err; nft_set_pktinfo(&pkt, skb, state); nft_set_pktinfo_ipv6(&pkt); /* save source/dest address, mark, hoplimit, flowlabel, priority */ memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr)); mark = skb->mark; hop_limit = ipv6_hdr(skb)->hop_limit; /* flowlabel and prio (includes version, which shouldn't change either)*/ flowlabel = *((u32 *)ipv6_hdr(skb)); ret = nft_do_chain(&pkt, priv); if (ret == NF_ACCEPT && (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u32 *)ipv6_hdr(skb)))) { err = nf_ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } return ret; } static const struct nft_chain_type nft_chain_route_ipv6 = { .name = "route", .type = NFT_CHAIN_T_ROUTE, .family = NFPROTO_IPV6, .hook_mask = (1 << NF_INET_LOCAL_OUT), .hooks = { [NF_INET_LOCAL_OUT] = nf_route_table_hook6, }, }; #endif #ifdef CONFIG_NF_TABLES_INET static unsigned int nf_route_table_inet(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; switch (state->pf) { case NFPROTO_IPV4: return nf_route_table_hook4(priv, skb, state); case NFPROTO_IPV6: return nf_route_table_hook6(priv, skb, state); default: nft_set_pktinfo(&pkt, skb, state); break; } return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_route_inet = { .name = "route", .type = NFT_CHAIN_T_ROUTE, .family = NFPROTO_INET, .hook_mask = (1 << NF_INET_LOCAL_OUT), .hooks = { [NF_INET_LOCAL_OUT] = nf_route_table_inet, }, }; #endif void __init nft_chain_route_init(void) { #ifdef CONFIG_NF_TABLES_IPV6 nft_register_chain_type(&nft_chain_route_ipv6); #endif #ifdef CONFIG_NF_TABLES_IPV4 nft_register_chain_type(&nft_chain_route_ipv4); #endif #ifdef CONFIG_NF_TABLES_INET nft_register_chain_type(&nft_chain_route_inet); #endif } void __exit nft_chain_route_fini(void) { #ifdef CONFIG_NF_TABLES_IPV6 nft_unregister_chain_type(&nft_chain_route_ipv6); #endif #ifdef CONFIG_NF_TABLES_IPV4 nft_unregister_chain_type(&nft_chain_route_ipv4); #endif #ifdef CONFIG_NF_TABLES_INET nft_unregister_chain_type(&nft_chain_route_inet); #endif }
1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2011-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:net,iface type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 nomatch flag support added */ /* 2 /0 support added */ /* 3 Counters support added */ /* 4 Comments support added */ /* 5 Forceadd support added */ /* 6 skbinfo support added */ /* 7 interface wildcard support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:net,iface"); /* Type specific function prefix */ #define HTYPE hash_netiface #define IP_SET_HASH_WITH_NETS #define IP_SET_HASH_WITH_MULTI #define IP_SET_HASH_WITH_NET0 #define STRSCPY(a, b) strscpy(a, b, IFNAMSIZ) /* IPv4 variant */ struct hash_netiface4_elem_hashed { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; }; /* Member elements */ struct hash_netiface4_elem { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; u8 wildcard; char iface[IFNAMSIZ]; }; /* Common functions */ static bool hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, const struct hash_netiface4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && (ip1->wildcard ? strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : strcmp(ip1->iface, ip2->iface) == 0); } static int hash_netiface4_do_data_match(const struct hash_netiface4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netiface4_data_set_flags(struct hash_netiface4_elem *elem, u32 flags) { elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH; } static void hash_netiface4_data_reset_flags(struct hash_netiface4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) { elem->ip &= ip_set_netmask(cidr); elem->cidr = cidr; } static bool hash_netiface4_data_list(struct sk_buff *skb, const struct hash_netiface4_elem *data) { u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netiface4_data_next(struct hash_netiface4_elem *next, const struct hash_netiface4_elem *d) { next->ip = d->ip; } #define MTYPE hash_netiface4 #define HOST_MASK 32 #define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) #include "ip_set_hash_gen.h" #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static const char *get_physindev_name(const struct sk_buff *skb, struct net *net) { struct net_device *dev = nf_bridge_get_physindev(skb, net); return dev ? dev->name : NULL; } static const char *get_physoutdev_name(const struct sk_buff *skb) { struct net_device *dev = nf_bridge_get_physoutdev(skb); return dev ? dev->name : NULL; } #endif static int hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct hash_netiface4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .elem = 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); e.ip &= ip_set_netmask(e.cidr); #define IFACE(dir) (par->state->dir ? par->state->dir->name : "") #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) : get_physoutdev_name(skb); if (!eiface) return -EINVAL; STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_netiface4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (e.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ); if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) e.wildcard = 1; } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { e.ip = htonl(ip & ip_set_hostmask(e.cidr)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip_to < ip) swap(ip, ip_to); if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip, ip_to, e.cidr); } if (retried) ip = ntohl(h->next.ip); do { i++; e.ip = htonl(ip); if (i > IPSET_MAX_RANGE) { hash_netiface4_data_next(&h->next, &e); return -ERANGE; } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } while (ip++ < ip_to); return ret; } /* IPv6 variant */ struct hash_netiface6_elem_hashed { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; }; struct hash_netiface6_elem { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; u8 wildcard; char iface[IFNAMSIZ]; }; /* Common functions */ static bool hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, const struct hash_netiface6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && (ip1->wildcard ? strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : strcmp(ip1->iface, ip2->iface) == 0); } static int hash_netiface6_do_data_match(const struct hash_netiface6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netiface6_data_set_flags(struct hash_netiface6_elem *elem, u32 flags) { elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH; } static void hash_netiface6_data_reset_flags(struct hash_netiface6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip, cidr); elem->cidr = cidr; } static bool hash_netiface6_data_list(struct sk_buff *skb, const struct hash_netiface6_elem *data) { u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netiface6_data_next(struct hash_netiface6_elem *next, const struct hash_netiface6_elem *d) { } #undef MTYPE #undef HOST_MASK #define MTYPE hash_netiface6 #define HOST_MASK 128 #define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct hash_netiface6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .elem = 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6_netmask(&e.ip, e.cidr); if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) : get_physoutdev_name(skb); if (!eiface) return -EINVAL; STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (e.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ip6_netmask(&e.ip, e.cidr); nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ); if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) e.wildcard = 1; } ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } static struct ip_set_type hash_netiface_type __read_mostly = { .name = "hash:net,iface", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_TWO, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_netiface_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_netiface_init(void) { return ip_set_type_register(&hash_netiface_type); } static void __exit hash_netiface_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_netiface_type); } module_init(hash_netiface_init); module_exit(hash_netiface_fini);
8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 10 4 5 6 5 6 10 10 10 10 10 10 10 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 /* * Linear conversion Plug-In * Copyright (c) 1999 by Jaroslav Kysela <perex@perex.cz>, * Abramo Bagnara <abramo@alsa-project.org> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/pcm.h> #include "pcm_plugin.h" /* * Basic linear conversion plugin */ struct linear_priv { int cvt_endian; /* need endian conversion? */ unsigned int src_ofs; /* byte offset in source format */ unsigned int dst_ofs; /* byte soffset in destination format */ unsigned int copy_ofs; /* byte offset in temporary u32 data */ unsigned int dst_bytes; /* byte size of destination format */ unsigned int copy_bytes; /* bytes to copy per conversion */ unsigned int flip; /* MSB flip for signeness, done after endian conv */ }; static inline void do_convert(struct linear_priv *data, unsigned char *dst, unsigned char *src) { unsigned int tmp = 0; unsigned char *p = (unsigned char *)&tmp; memcpy(p + data->copy_ofs, src + data->src_ofs, data->copy_bytes); if (data->cvt_endian) tmp = swab32(tmp); tmp ^= data->flip; memcpy(dst, p + data->dst_ofs, data->dst_bytes); } static void convert(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { struct linear_priv *data = (struct linear_priv *)plugin->extra_data; int channel; int nchannels = plugin->src_format.channels; for (channel = 0; channel < nchannels; ++channel) { char *src; char *dst; int src_step, dst_step; snd_pcm_uframes_t frames1; if (!src_channels[channel].enabled) { if (dst_channels[channel].wanted) snd_pcm_area_silence(&dst_channels[channel].area, 0, frames, plugin->dst_format.format); dst_channels[channel].enabled = 0; continue; } dst_channels[channel].enabled = 1; src = src_channels[channel].area.addr + src_channels[channel].area.first / 8; dst = dst_channels[channel].area.addr + dst_channels[channel].area.first / 8; src_step = src_channels[channel].area.step / 8; dst_step = dst_channels[channel].area.step / 8; frames1 = frames; while (frames1-- > 0) { do_convert(data, dst, src); src += src_step; dst += dst_step; } } } static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) return -ENXIO; if (frames == 0) return 0; #ifdef CONFIG_SND_DEBUG { unsigned int channel; for (channel = 0; channel < plugin->src_format.channels; channel++) { if (snd_BUG_ON(src_channels[channel].area.first % 8 || src_channels[channel].area.step % 8)) return -ENXIO; if (snd_BUG_ON(dst_channels[channel].area.first % 8 || dst_channels[channel].area.step % 8)) return -ENXIO; } } #endif if (frames > dst_channels[0].frames) frames = dst_channels[0].frames; convert(plugin, src_channels, dst_channels, frames); return frames; } static void init_data(struct linear_priv *data, snd_pcm_format_t src_format, snd_pcm_format_t dst_format) { int src_le, dst_le, src_bytes, dst_bytes; src_bytes = snd_pcm_format_width(src_format) / 8; dst_bytes = snd_pcm_format_width(dst_format) / 8; src_le = snd_pcm_format_little_endian(src_format) > 0; dst_le = snd_pcm_format_little_endian(dst_format) > 0; data->dst_bytes = dst_bytes; data->cvt_endian = src_le != dst_le; data->copy_bytes = src_bytes < dst_bytes ? src_bytes : dst_bytes; if (src_le) { data->copy_ofs = 4 - data->copy_bytes; data->src_ofs = src_bytes - data->copy_bytes; } else data->src_ofs = snd_pcm_format_physical_width(src_format) / 8 - src_bytes; if (dst_le) data->dst_ofs = 4 - data->dst_bytes; else data->dst_ofs = snd_pcm_format_physical_width(dst_format) / 8 - dst_bytes; if (snd_pcm_format_signed(src_format) != snd_pcm_format_signed(dst_format)) { if (dst_le) data->flip = (__force u32)cpu_to_le32(0x80000000); else data->flip = (__force u32)cpu_to_be32(0x80000000); } } int snd_pcm_plugin_build_linear(struct snd_pcm_substream *plug, struct snd_pcm_plugin_format *src_format, struct snd_pcm_plugin_format *dst_format, struct snd_pcm_plugin **r_plugin) { int err; struct linear_priv *data; struct snd_pcm_plugin *plugin; if (snd_BUG_ON(!r_plugin)) return -ENXIO; *r_plugin = NULL; if (snd_BUG_ON(src_format->rate != dst_format->rate)) return -ENXIO; if (snd_BUG_ON(src_format->channels != dst_format->channels)) return -ENXIO; if (snd_BUG_ON(!snd_pcm_format_linear(src_format->format) || !snd_pcm_format_linear(dst_format->format))) return -ENXIO; err = snd_pcm_plugin_build(plug, "linear format conversion", src_format, dst_format, sizeof(struct linear_priv), &plugin); if (err < 0) return err; data = (struct linear_priv *)plugin->extra_data; init_data(data, src_format->format, dst_format->format); plugin->transfer = linear_transfer; *r_plugin = plugin; return 0; }
147 123 147 99 95 97 97 145 147 145 147 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/gcd.h> #include <linux/export.h> /* * This implements the binary GCD algorithm. (Often attributed to Stein, * but as Knuth has noted, appears in a first-century Chinese math text.) * * This is faster than the division-based algorithm even on x86, which * has decent hardware division. */ DEFINE_STATIC_KEY_TRUE(efficient_ffs_key); #if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) /* If __ffs is available, the even/odd algorithm benchmarks slower. */ static unsigned long binary_gcd(unsigned long a, unsigned long b) { unsigned long r = a | b; b >>= __ffs(b); if (b == 1) return r & -r; for (;;) { a >>= __ffs(a); if (a == 1) return r & -r; if (a == b) return a << __ffs(r); if (a < b) swap(a, b); a -= b; } } #endif /* If normalization is done by loops, the even/odd algorithm is a win. */ /** * gcd - calculate and return the greatest common divisor of 2 unsigned longs * @a: first value * @b: second value */ unsigned long gcd(unsigned long a, unsigned long b) { unsigned long r = a | b; if (!a || !b) return r; #if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) if (static_branch_likely(&efficient_ffs_key)) return binary_gcd(a, b); #endif /* Isolate lsbit of r */ r &= -r; while (!(b & r)) b >>= 1; if (b == r) return r; for (;;) { while (!(a & r)) a >>= 1; if (a == r) return r; if (a == b) return a; if (a < b) swap(a, b); a -= b; a >>= 1; if (a & r) a += b; a >>= 1; } } EXPORT_SYMBOL_GPL(gcd);
10 10 10 3 3 2 10 3 3 2 1 10 10 2 3 1 2 1 1 1 2 1 1 1 23 2 5 4 2 2 8 36 3 3 3 3 3 1 1 1 7 1 7 2 6 6 6 1 5 1 4 5 5 5 5 5 2 6 8 8 4 4 1 3 4 4 7 6 3 1 5 5 2 2 4 5 6 7 7 7 1 6 7 7 7 7 3 2 3 3 3 3 2 2 2 1 1 1 1 1 1 3 4 5 1 1 1 1 1 1 1 1 1 1 1 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 /* * Copyright © 2012 Red Hat * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Airlie <airlied@redhat.com> * Rob Clark <rob.clark@linaro.org> * */ #include <linux/export.h> #include <linux/dma-buf.h> #include <linux/rbtree.h> #include <linux/module.h> #include <drm/drm.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> #include <drm/drm_print.h> #include "drm_internal.h" MODULE_IMPORT_NS("DMA_BUF"); /** * DOC: overview and lifetime rules * * Similar to GEM global names, PRIME file descriptors are also used to share * buffer objects across processes. They offer additional security: as file * descriptors must be explicitly sent over UNIX domain sockets to be shared * between applications, they can't be guessed like the globally unique GEM * names. * * Drivers that support the PRIME API implement the drm_gem_object_funcs.export * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for * drivers are all individually exported for drivers which need to overwrite * or reimplement some of them. * * Reference Counting for GEM Drivers * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * On the export the &dma_buf holds a reference to the exported buffer object, * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD * IOCTL, when it first calls &drm_gem_object_funcs.export * and stores the exporting GEM object in the &dma_buf.priv field. This * reference needs to be released when the final reference to the &dma_buf * itself is dropped and its &dma_buf_ops.release function is called. For * GEM-based drivers, the &dma_buf should be exported using * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). * * Thus the chain of references always flows in one direction, avoiding loops: * importing GEM object -> dma-buf -> exported GEM bo. A further complication * are the lookup caches for import and export. These are required to guarantee * that any given object will always have only one unique userspace handle. This * is required to allow userspace to detect duplicated imports, since some GEM * drivers do fail command submissions if a given buffer object is listed more * than once. These import and export caches in &drm_prime_file_private only * retain a weak reference, which is cleaned up when the corresponding object is * released. * * Self-importing: If userspace is using PRIME as a replacement for flink then * it will get a fd->handle request for a GEM object that it created. Drivers * should detect this situation and return back the underlying object from the * dma-buf private. For GEM based drivers this is handled in * drm_gem_prime_import() already. */ struct drm_prime_member { struct dma_buf *dma_buf; uint32_t handle; struct rb_node dmabuf_rb; struct rb_node handle_rb; }; int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) { struct drm_prime_member *member; struct rb_node **p, *rb; member = kmalloc(sizeof(*member), GFP_KERNEL); if (!member) return -ENOMEM; get_dma_buf(dma_buf); member->dma_buf = dma_buf; member->handle = handle; rb = NULL; p = &prime_fpriv->dmabufs.rb_node; while (*p) { struct drm_prime_member *pos; rb = *p; pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (dma_buf > pos->dma_buf) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&member->dmabuf_rb, rb, p); rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); rb = NULL; p = &prime_fpriv->handles.rb_node; while (*p) { struct drm_prime_member *pos; rb = *p; pos = rb_entry(rb, struct drm_prime_member, handle_rb); if (handle > pos->handle) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&member->handle_rb, rb, p); rb_insert_color(&member->handle_rb, &prime_fpriv->handles); return 0; } static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle) { struct rb_node *rb; rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, handle_rb); if (member->handle == handle) return member->dma_buf; else if (member->handle < handle) rb = rb->rb_right; else rb = rb->rb_left; } return NULL; } static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) { struct rb_node *rb; rb = prime_fpriv->dmabufs.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (member->dma_buf == dma_buf) { *handle = member->handle; return 0; } else if (member->dma_buf < dma_buf) { rb = rb->rb_right; } else { rb = rb->rb_left; } } return -ENOENT; } void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle) { struct rb_node *rb; rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, handle_rb); if (member->handle == handle) { rb_erase(&member->handle_rb, &prime_fpriv->handles); rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); dma_buf_put(member->dma_buf); kfree(member); break; } else if (member->handle < handle) { rb = rb->rb_right; } else { rb = rb->rb_left; } } } void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) { mutex_init(&prime_fpriv->lock); prime_fpriv->dmabufs = RB_ROOT; prime_fpriv->handles = RB_ROOT; } void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) { /* by now drm_gem_release should've made sure the list is empty */ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); } /** * drm_gem_dmabuf_export - &dma_buf export implementation for GEM * @dev: parent device for the exported dmabuf * @exp_info: the export information used by dma_buf_export() * * This wraps dma_buf_export() for use by generic GEM drivers that are using * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take * a reference to the &drm_device and the exported &drm_gem_object (stored in * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). * * Returns the new dmabuf. */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info) { struct drm_gem_object *obj = exp_info->priv; struct dma_buf *dma_buf; dma_buf = dma_buf_export(exp_info); if (IS_ERR(dma_buf)) return dma_buf; drm_dev_get(dev); drm_gem_object_get(obj); dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping; return dma_buf; } EXPORT_SYMBOL(drm_gem_dmabuf_export); /** * drm_gem_dmabuf_release - &dma_buf release implementation for GEM * @dma_buf: buffer to be released * * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers * must use this in their &dma_buf_ops structure as the release callback. * drm_gem_dmabuf_release() should be used in conjunction with * drm_gem_dmabuf_export(). */ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; /* drop the reference on the export fd holds */ drm_gem_object_put(obj); drm_dev_put(dev); } EXPORT_SYMBOL(drm_gem_dmabuf_release); /** * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers * @dev: drm_device to import into * @file_priv: drm file-private structure * @prime_fd: fd id of the dma-buf which should be imported * @handle: pointer to storage for the handle of the imported buffer object * * This is the PRIME import function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual importing of GEM object from the dma-buf is done through the * &drm_driver.gem_prime_import driver callback. * * Returns 0 on success or a negative error code on failure. */ int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle) { struct dma_buf *dma_buf; struct drm_gem_object *obj; int ret; dma_buf = dma_buf_get(prime_fd); if (IS_ERR(dma_buf)) return PTR_ERR(dma_buf); mutex_lock(&file_priv->prime.lock); ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle); if (ret == 0) goto out_put; /* never seen this one, need to import */ mutex_lock(&dev->object_name_lock); if (dev->driver->gem_prime_import) obj = dev->driver->gem_prime_import(dev, dma_buf); else obj = drm_gem_prime_import(dev, dma_buf); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto out_unlock; } if (obj->dma_buf) { WARN_ON(obj->dma_buf != dma_buf); } else { obj->dma_buf = dma_buf; get_dma_buf(dma_buf); } /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_put(obj); if (ret) goto out_put; ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle); mutex_unlock(&file_priv->prime.lock); if (ret) goto fail; dma_buf_put(dma_buf); return 0; fail: /* hmm, if driver attached, we are relying on the free-object path * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); dma_buf_put(dma_buf); return ret; out_unlock: mutex_unlock(&dev->object_name_lock); out_put: mutex_unlock(&file_priv->prime.lock); dma_buf_put(dma_buf); return ret; } EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; if (dev->driver->prime_fd_to_handle) { return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } static struct dma_buf *export_and_register_object(struct drm_device *dev, struct drm_gem_object *obj, uint32_t flags) { struct dma_buf *dmabuf; /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { dmabuf = ERR_PTR(-ENOENT); return dmabuf; } if (obj->funcs && obj->funcs->export) dmabuf = obj->funcs->export(obj, flags); else dmabuf = drm_gem_prime_export(obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ return dmabuf; } /* * Note that callers do not need to clean up the export cache * since the check for obj->handle_count guarantees that someone * will clean it up. */ obj->dma_buf = dmabuf; get_dma_buf(obj->dma_buf); return dmabuf; } /** * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure * @handle: buffer handle to export * @flags: flags like DRM_CLOEXEC * * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. * * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it * has created, without attaching it to any file descriptors. The difference * between those two is similar to that between anon_inode_getfile() and * anon_inode_getfd(); insertion into descriptor table is something you * can not revert if any cleanup is needed, so the descriptor-returning * variants should only be used when you are past the last failure exit * and the only thing left is passing the new file descriptor to userland. * When all you need is the object itself or when you need to do something * else that might fail, use that one instead. */ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags) { struct drm_gem_object *obj; int ret = 0; struct dma_buf *dmabuf; mutex_lock(&file_priv->prime.lock); obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { dmabuf = ERR_PTR(-ENOENT); goto out_unlock; } dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); if (dmabuf) { get_dma_buf(dmabuf); goto out; } mutex_lock(&dev->object_name_lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; get_dma_buf(dmabuf); goto out_have_obj; } if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; goto out_have_obj; } dmabuf = export_and_register_object(dev, obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ mutex_unlock(&dev->object_name_lock); goto out; } out_have_obj: /* * If we've exported this buffer then cheat and add it to the import list * so we get the correct handle back. We must do this under the * protection of dev->object_name_lock to ensure that a racing gem close * ioctl doesn't miss to remove this buffer handle from the cache. */ ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle); mutex_unlock(&dev->object_name_lock); if (ret) { dma_buf_put(dmabuf); dmabuf = ERR_PTR(ret); } out: drm_gem_object_put(obj); out_unlock: mutex_unlock(&file_priv->prime.lock); return dmabuf; } EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf); /** * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure * @handle: buffer handle to export * @flags: flags like DRM_CLOEXEC * @prime_fd: pointer to storage for the fd id of the create dma-buf * * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. */ int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) { struct dma_buf *dmabuf; int fd = get_unused_fd_flags(flags); if (fd < 0) return fd; dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags); if (IS_ERR(dmabuf)) { put_unused_fd(fd); return PTR_ERR(dmabuf); } fd_install(fd, dmabuf->file); *prime_fd = fd; return 0; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; /* check flags are valid */ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL; if (dev->driver->prime_handle_to_fd) { return dev->driver->prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd); } return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd); } /** * DOC: PRIME Helpers * * Drivers can implement &drm_gem_object_funcs.export and * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions * implement dma-buf support in terms of some lower-level helpers, which are * again exported for drivers to use individually: * * Exporting buffers * ~~~~~~~~~~~~~~~~~ * * Optional pinning of buffers is handled at dma-buf attach and detach time in * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is * unimplemented, exports into another device are rejected. * * For kernel-internal access there's drm_gem_dmabuf_vmap() and * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by * drm_gem_dmabuf_mmap(). * * Note that these export helpers can only be used if the underlying backing * storage is fully coherent and either permanently pinned, or it is safe to pin * it indefinitely. * * FIXME: The underlying helper functions are named rather inconsistently. * * Importing buffers * ~~~~~~~~~~~~~~~~~ * * Importing dma-bufs using drm_gem_prime_import() relies on * &drm_driver.gem_prime_import_sg_table. * * Note that similarly to the export helpers this permanently pins the * underlying backing storage. Which is ok for scanout, but is not the best * option for sharing lots of buffers for rendering. */ /** * drm_gem_map_attach - dma_buf attach implementation for GEM * @dma_buf: buffer to attach device to * @attach: buffer attachment data * * Calls &drm_gem_object_funcs.pin for device specific handling. This can be * used as the &dma_buf_ops.attach callback. Must be used together with * drm_gem_map_detach(). * * Returns 0 on success, negative error code on failure. */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; int ret; /* * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers * that implement their own ->map_dma_buf() do not. */ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf && !obj->funcs->get_sg_table) return -ENOSYS; if (!obj->funcs->pin) return 0; ret = dma_resv_lock(obj->resv, NULL); if (ret) return ret; ret = obj->funcs->pin(obj); dma_resv_unlock(obj->resv); return ret; } EXPORT_SYMBOL(drm_gem_map_attach); /** * drm_gem_map_detach - dma_buf detach implementation for GEM * @dma_buf: buffer to detach from * @attach: attachment to be detached * * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the * &dma_buf_ops.detach callback. */ void drm_gem_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; int ret; if (!obj->funcs->unpin) return; ret = dma_resv_lock(obj->resv, NULL); if (drm_WARN_ON(obj->dev, ret)) return; obj->funcs->unpin(obj); dma_resv_unlock(obj->resv); } EXPORT_SYMBOL(drm_gem_map_detach); /** * drm_gem_map_dma_buf - map_dma_buf implementation for GEM * @attach: attachment whose scatterlist is to be returned * @dir: direction of DMA transfer * * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together * with drm_gem_unmap_dma_buf(). * * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR * on error. May return -EINTR if it is interrupted by a signal. */ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; int ret; if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); if (WARN_ON(!obj->funcs->get_sg_table)) return ERR_PTR(-ENOSYS); sgt = obj->funcs->get_sg_table(obj); if (IS_ERR(sgt)) return sgt; ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); if (ret) { sg_free_table(sgt); kfree(sgt); sgt = ERR_PTR(ret); } return sgt; } EXPORT_SYMBOL(drm_gem_map_dma_buf); /** * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM * @attach: attachment to unmap buffer from * @sgt: scatterlist info of the buffer to unmap * @dir: direction of DMA transfer * * This can be used as the &dma_buf_ops.unmap_dma_buf callback. */ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { if (!sgt) return; dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); kfree(sgt); } EXPORT_SYMBOL(drm_gem_unmap_dma_buf); /** * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM * @dma_buf: buffer to be mapped * @map: the virtual address of the buffer * * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. * The kernel virtual address is returned in map. * * Returns 0 on success or a negative errno code otherwise. */ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; return drm_gem_vmap_locked(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); /** * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM * @dma_buf: buffer to be unmapped * @map: the virtual address of the buffer * * Releases a kernel virtual mapping. This can be used as the * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. */ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; drm_gem_vunmap_locked(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); /** * drm_gem_prime_mmap - PRIME mmap function for GEM drivers * @obj: GEM object * @vma: Virtual address range * * This function sets up a userspace mapping for PRIME exported buffers using * the same codepath that is used for regular GEM buffer mapping on the DRM fd. * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is * called to set up the mapping. */ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_file *priv; struct file *fil; int ret; /* Add the fake offset */ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); if (obj->funcs && obj->funcs->mmap) { vma->vm_ops = obj->funcs->vm_ops; drm_gem_object_get(obj); ret = obj->funcs->mmap(obj, vma); if (ret) { drm_gem_object_put(obj); return ret; } vma->vm_private_data = obj; return 0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); fil = kzalloc(sizeof(*fil), GFP_KERNEL); if (!priv || !fil) { ret = -ENOMEM; goto out; } /* Used by drm_gem_mmap() to lookup the GEM object */ priv->minor = obj->dev->primary; fil->private_data = priv; ret = drm_vma_node_allow(&obj->vma_node, priv); if (ret) goto out; ret = obj->dev->driver->fops->mmap(fil, vma); drm_vma_node_revoke(&obj->vma_node, priv); out: kfree(priv); kfree(fil); return ret; } EXPORT_SYMBOL(drm_gem_prime_mmap); /** * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM * @dma_buf: buffer to be mapped * @vma: virtual address range * * Provides memory mapping for the buffer. This can be used as the * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap(). * * Returns 0 on success or a negative error code on failure. */ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { struct drm_gem_object *obj = dma_buf->priv; return drm_gem_prime_mmap(obj, vma); } EXPORT_SYMBOL(drm_gem_dmabuf_mmap); static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .attach = drm_gem_map_attach, .detach = drm_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, }; /** * drm_prime_pages_to_sg - converts a page array into an sg list * @dev: DRM device * @pages: pointer to the array of page pointers to convert * @nr_pages: length of the page vector * * This helper creates an sg table object from a set of pages * the driver is responsible for mapping the pages into the * importers address space for use with dma_buf itself. * * This is useful for implementing &drm_gem_object_funcs.get_sg_table. */ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages) { struct sg_table *sg; size_t max_segment = 0; int err; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!sg) return ERR_PTR(-ENOMEM); if (dev) max_segment = dma_max_mapping_size(dev->dev); if (max_segment == 0) max_segment = UINT_MAX; err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, (unsigned long)nr_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (err) { kfree(sg); sg = ERR_PTR(err); } return sg; } EXPORT_SYMBOL(drm_prime_pages_to_sg); /** * drm_prime_get_contiguous_size - returns the contiguous size of the buffer * @sgt: sg_table describing the buffer to check * * This helper calculates the contiguous size in the DMA address space * of the buffer described by the provided sg_table. * * This is useful for implementing * &drm_gem_object_funcs.gem_prime_import_sg_table. */ unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) { dma_addr_t expected = sg_dma_address(sgt->sgl); struct scatterlist *sg; unsigned long size = 0; int i; for_each_sgtable_dma_sg(sgt, sg, i) { unsigned int len = sg_dma_len(sg); if (!len) break; if (sg_dma_address(sg) != expected) break; expected += len; size += len; } return size; } EXPORT_SYMBOL(drm_prime_get_contiguous_size); /** * drm_gem_prime_export - helper library implementation of the export callback * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR * * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers * using the PRIME helpers. It is used as the default in * drm_gem_prime_handle_to_fd(). */ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags) { struct drm_device *dev = obj->dev; struct dma_buf_export_info exp_info = { .exp_name = KBUILD_MODNAME, /* white lie for debug */ .owner = dev->driver->fops->owner, .ops = &drm_gem_prime_dmabuf_ops, .size = obj->size, .flags = flags, .priv = obj, .resv = obj->resv, }; return drm_gem_dmabuf_export(dev, &exp_info); } EXPORT_SYMBOL(drm_gem_prime_export); /** * drm_gem_is_prime_exported_dma_buf - * checks if the DMA-BUF was exported from a GEM object belonging to @dev. * @dev: drm_device to check against * @dma_buf: dma-buf object to import * * Return: true if the DMA-BUF was exported from a GEM object belonging * to @dev, false otherwise. */ bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev, struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev); } EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf); /** * drm_gem_prime_import_dev - core implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * @attach_dev: struct device to dma_buf attach * * This is the core of drm_gem_prime_import(). It's designed to be called by * drivers who want to use a different device structure than &drm_device.dev for * attaching via dma_buf. This function calls * &drm_driver.gem_prime_import_sg_table internally. * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. */ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, struct device *attach_dev) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct drm_gem_object *obj; int ret; if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) { /* * Importing dmabuf exported from our own gem increases * refcount on gem itself instead of f_count of dmabuf. */ obj = dma_buf->priv; drm_gem_object_get(obj); return obj; } if (!dev->driver->gem_prime_import_sg_table) return ERR_PTR(-EINVAL); attach = dma_buf_attach(dma_buf, attach_dev); if (IS_ERR(attach)) return ERR_CAST(attach); get_dma_buf(dma_buf); sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; } obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail_unmap; } obj->import_attach = attach; obj->resv = dma_buf->resv; return obj; fail_unmap: dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); } EXPORT_SYMBOL(drm_gem_prime_import_dev); /** * drm_gem_prime_import - helper library implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * * This is the implementation of the gem_prime_import functions for GEM drivers * using the PRIME helpers. Drivers can use this as their * &drm_driver.gem_prime_import implementation. It is used as the default * implementation in drm_gem_prime_fd_to_handle(). * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. */ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev)); } EXPORT_SYMBOL(drm_gem_prime_import); /** * drm_prime_sg_to_page_array - convert an sg table into a page array * @sgt: scatter-gather table to convert * @pages: array of page pointers to store the pages in * @max_entries: size of the passed-in array * * Exports an sg table into an array of pages. * * This function is deprecated and strongly discouraged to be used. * The page array is only useful for page faults and those can corrupt fields * in the struct page if they are not handled by the exporting driver. */ int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages, int max_entries) { struct sg_page_iter page_iter; struct page **p = pages; for_each_sgtable_page(sgt, &page_iter, 0) { if (WARN_ON(p - pages >= max_entries)) return -1; *p++ = sg_page_iter_page(&page_iter); } return 0; } EXPORT_SYMBOL(drm_prime_sg_to_page_array); /** * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array * @sgt: scatter-gather table to convert * @addrs: array to store the dma bus address of each page * @max_entries: size of both the passed-in arrays * * Exports an sg table into an array of addresses. * * Drivers should use this in their &drm_driver.gem_prime_import_sg_table * implementation. */ int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, int max_entries) { struct sg_dma_page_iter dma_iter; dma_addr_t *a = addrs; for_each_sgtable_dma_page(sgt, &dma_iter, 0) { if (WARN_ON(a - addrs >= max_entries)) return -1; *a++ = sg_page_iter_dma_address(&dma_iter); } return 0; } EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array); /** * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object * @obj: GEM object which was created from a dma-buf * @sg: the sg-table which was pinned at import time * * This is the cleanup functions which GEM drivers need to call when they use * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. */ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) { struct dma_buf_attachment *attach; struct dma_buf *dma_buf; attach = obj->import_attach; if (sg) dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL); dma_buf = attach->dmabuf; dma_buf_detach(attach->dmabuf, attach); /* remove the reference */ dma_buf_put(dma_buf); } EXPORT_SYMBOL(drm_prime_gem_destroy);
5 5 1 5 3 5 2 4 3 3 3 3 2 1 1 1 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 // SPDX-License-Identifier: GPL-2.0-only /* * Network Service Header * * Copyright (c) 2017 Red Hat, Inc. -- Jiri Benc <jbenc@redhat.com> */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/gso.h> #include <net/nsh.h> #include <net/tun_proto.h> int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh) { struct nshhdr *nh; size_t length = nsh_hdr_len(pushed_nh); u8 next_proto; if (skb->mac_len) { next_proto = TUN_P_ETHERNET; } else { next_proto = tun_p_from_eth_p(skb->protocol); if (!next_proto) return -EAFNOSUPPORT; } /* Add the NSH header */ if (skb_cow_head(skb, length) < 0) return -ENOMEM; skb_push(skb, length); nh = (struct nshhdr *)(skb->data); memcpy(nh, pushed_nh, length); nh->np = next_proto; skb_postpush_rcsum(skb, nh, length); skb->protocol = htons(ETH_P_NSH); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_mac_len(skb); return 0; } EXPORT_SYMBOL_GPL(nsh_push); int nsh_pop(struct sk_buff *skb) { struct nshhdr *nh; size_t length; __be16 inner_proto; if (!pskb_may_pull(skb, NSH_BASE_HDR_LEN)) return -ENOMEM; nh = (struct nshhdr *)(skb->data); length = nsh_hdr_len(nh); if (length < NSH_BASE_HDR_LEN) return -EINVAL; inner_proto = tun_p_to_eth_p(nh->np); if (!pskb_may_pull(skb, length)) return -ENOMEM; if (!inner_proto) return -EAFNOSUPPORT; skb_pull_rcsum(skb, length); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_mac_len(skb); skb->protocol = inner_proto; return 0; } EXPORT_SYMBOL_GPL(nsh_pop); static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, netdev_features_t features) { unsigned int outer_hlen, mac_len, nsh_len; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 outer_proto, proto; skb_reset_network_header(skb); outer_proto = skb->protocol; outer_hlen = skb_mac_header_len(skb); mac_len = skb->mac_len; if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) goto out; nsh_len = nsh_hdr_len(nsh_hdr(skb)); if (nsh_len < NSH_BASE_HDR_LEN) goto out; if (unlikely(!pskb_may_pull(skb, nsh_len))) goto out; proto = tun_p_to_eth_p(nsh_hdr(skb)->np); if (!proto) goto out; __skb_pull(skb, nsh_len); skb_reset_mac_header(skb); skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0; skb->protocol = proto; features &= NETIF_F_SG; segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len, mac_offset, mac_len); goto out; } for (skb = segs; skb; skb = skb->next) { skb->protocol = outer_proto; __skb_push(skb, nsh_len + outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, outer_hlen); skb->mac_len = mac_len; } out: return segs; } static struct packet_offload nsh_packet_offload __read_mostly = { .type = htons(ETH_P_NSH), .priority = 15, .callbacks = { .gso_segment = nsh_gso_segment, }, }; static int __init nsh_init_module(void) { dev_add_offload(&nsh_packet_offload); return 0; } static void __exit nsh_cleanup_module(void) { dev_remove_offload(&nsh_packet_offload); } module_init(nsh_init_module); module_exit(nsh_cleanup_module); MODULE_AUTHOR("Jiri Benc <jbenc@redhat.com>"); MODULE_DESCRIPTION("NSH protocol"); MODULE_LICENSE("GPL v2");
2 20 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 /* * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #ifndef __DRM_FOURCC_H__ #define __DRM_FOURCC_H__ #include <linux/math.h> #include <linux/types.h> #include <uapi/drm/drm_fourcc.h> /** * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have */ #define DRM_FORMAT_MAX_PLANES 4u /* * DRM formats are little endian. Define host endian variants for the * most common formats here, to reduce the #ifdefs needed in drivers. * * Note that the DRM_FORMAT_BIG_ENDIAN flag should only be used in * case the format can't be specified otherwise, so we don't end up * with two values describing the same format. */ #ifdef __BIG_ENDIAN # define DRM_FORMAT_HOST_XRGB1555 (DRM_FORMAT_XRGB1555 | \ DRM_FORMAT_BIG_ENDIAN) # define DRM_FORMAT_HOST_RGB565 (DRM_FORMAT_RGB565 | \ DRM_FORMAT_BIG_ENDIAN) # define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_BGRX8888 # define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_BGRA8888 #else # define DRM_FORMAT_HOST_XRGB1555 DRM_FORMAT_XRGB1555 # define DRM_FORMAT_HOST_RGB565 DRM_FORMAT_RGB565 # define DRM_FORMAT_HOST_XRGB8888 DRM_FORMAT_XRGB8888 # define DRM_FORMAT_HOST_ARGB8888 DRM_FORMAT_ARGB8888 #endif struct drm_device; /** * struct drm_format_info - information about a DRM format */ struct drm_format_info { /** @format: 4CC format identifier (DRM_FORMAT_*) */ u32 format; /** * @depth: * * Color depth (number of bits per pixel excluding padding bits), * valid for a subset of RGB formats only. This is a legacy field, do * not use in new code and set to 0 for new formats. */ u8 depth; /** @num_planes: Number of color planes (1 to 3) */ u8 num_planes; union { /** * @cpp: * * Number of bytes per pixel (per plane), this is aliased with * @char_per_block. It is deprecated in favour of using the * triplet @char_per_block, @block_w, @block_h for better * describing the pixel format. */ u8 cpp[DRM_FORMAT_MAX_PLANES]; /** * @char_per_block: * * Number of bytes per block (per plane), where blocks are * defined as a rectangle of pixels which are stored next to * each other in a byte aligned memory region. Together with * @block_w and @block_h this is used to properly describe tiles * in tiled formats or to describe groups of pixels in packed * formats for which the memory needed for a single pixel is not * byte aligned. * * @cpp has been kept for historical reasons because there are * a lot of places in drivers where it's used. In drm core for * generic code paths the preferred way is to use * @char_per_block, drm_format_info_block_width() and * drm_format_info_block_height() which allows handling both * block and non-block formats in the same way. * * For formats that are intended to be used only with non-linear * modifiers both @cpp and @char_per_block must be 0 in the * generic format table. Drivers could supply accurate * information from their drm_mode_config.get_format_info hook * if they want the core to be validating the pitch. */ u8 char_per_block[DRM_FORMAT_MAX_PLANES]; }; /** * @block_w: * * Block width in pixels, this is intended to be accessed through * drm_format_info_block_width() */ u8 block_w[DRM_FORMAT_MAX_PLANES]; /** * @block_h: * * Block height in pixels, this is intended to be accessed through * drm_format_info_block_height() */ u8 block_h[DRM_FORMAT_MAX_PLANES]; /** @hsub: Horizontal chroma subsampling factor */ u8 hsub; /** @vsub: Vertical chroma subsampling factor */ u8 vsub; /** @has_alpha: Does the format embeds an alpha component? */ bool has_alpha; /** @is_yuv: Is it a YUV format? */ bool is_yuv; /** @is_color_indexed: Is it a color-indexed format? */ bool is_color_indexed; }; /** * drm_format_info_is_yuv_packed - check that the format info matches a YUV * format with data laid in a single plane * @info: format info * * Returns: * A boolean indicating whether the format info matches a packed YUV format. */ static inline bool drm_format_info_is_yuv_packed(const struct drm_format_info *info) { return info->is_yuv && info->num_planes == 1; } /** * drm_format_info_is_yuv_semiplanar - check that the format info matches a YUV * format with data laid in two planes (luminance and chrominance) * @info: format info * * Returns: * A boolean indicating whether the format info matches a semiplanar YUV format. */ static inline bool drm_format_info_is_yuv_semiplanar(const struct drm_format_info *info) { return info->is_yuv && info->num_planes == 2; } /** * drm_format_info_is_yuv_planar - check that the format info matches a YUV * format with data laid in three planes (one for each YUV component) * @info: format info * * Returns: * A boolean indicating whether the format info matches a planar YUV format. */ static inline bool drm_format_info_is_yuv_planar(const struct drm_format_info *info) { return info->is_yuv && info->num_planes == 3; } /** * drm_format_info_is_yuv_sampling_410 - check that the format info matches a * YUV format with 4:1:0 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:1:0 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_410(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 4 && info->vsub == 4; } /** * drm_format_info_is_yuv_sampling_411 - check that the format info matches a * YUV format with 4:1:1 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:1:1 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_411(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 4 && info->vsub == 1; } /** * drm_format_info_is_yuv_sampling_420 - check that the format info matches a * YUV format with 4:2:0 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:2:0 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_420(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 2 && info->vsub == 2; } /** * drm_format_info_is_yuv_sampling_422 - check that the format info matches a * YUV format with 4:2:2 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:2:2 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_422(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 2 && info->vsub == 1; } /** * drm_format_info_is_yuv_sampling_444 - check that the format info matches a * YUV format with 4:4:4 sub-sampling * @info: format info * * Returns: * A boolean indicating whether the format info matches a YUV format with 4:4:4 * sub-sampling. */ static inline bool drm_format_info_is_yuv_sampling_444(const struct drm_format_info *info) { return info->is_yuv && info->hsub == 1 && info->vsub == 1; } /** * drm_format_info_plane_width - width of the plane given the first plane * @info: pixel format info * @width: width of the first plane * @plane: plane index * * Returns: * The width of @plane, given that the width of the first plane is @width. */ static inline int drm_format_info_plane_width(const struct drm_format_info *info, int width, int plane) { if (!info || plane >= info->num_planes) return 0; if (plane == 0) return width; return DIV_ROUND_UP(width, info->hsub); } /** * drm_format_info_plane_height - height of the plane given the first plane * @info: pixel format info * @height: height of the first plane * @plane: plane index * * Returns: * The height of @plane, given that the height of the first plane is @height. */ static inline int drm_format_info_plane_height(const struct drm_format_info *info, int height, int plane) { if (!info || plane >= info->num_planes) return 0; if (plane == 0) return height; return DIV_ROUND_UP(height, info->vsub); } const struct drm_format_info *__drm_format_info(u32 format); const struct drm_format_info *drm_format_info(u32 format); const struct drm_format_info * drm_get_format_info(struct drm_device *dev, u32 pixel_format, u64 modifier); uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth); uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode); unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane); unsigned int drm_format_info_block_height(const struct drm_format_info *info, int plane); unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane); uint64_t drm_format_info_min_pitch(const struct drm_format_info *info, int plane, unsigned int buffer_width); #endif /* __DRM_FOURCC_H__ */
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * * Driver for Option High Speed Mobile Devices. * * Copyright (C) 2008 Option International * Filip Aben <f.aben@option.com> * Denis Joseph Barrow <d.barow@option.com> * Jan Dumon <j.dumon@option.com> * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) * <ajb@spheresystems.co.uk> * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (C) 2008 Novell, Inc. * *****************************************************************************/ /****************************************************************************** * * Description of the device: * * Interface 0: Contains the IP network interface on the bulk end points. * The multiplexed serial ports are using the interrupt and * control endpoints. * Interrupt contains a bitmap telling which multiplexed * serialport needs servicing. * * Interface 1: Diagnostics port, uses bulk only, do not submit urbs until the * port is opened, as this have a huge impact on the network port * throughput. * * Interface 2: Standard modem interface - circuit switched interface, this * can be used to make a standard ppp connection however it * should not be used in conjunction with the IP network interface * enabled for USB performance reasons i.e. if using this set * ideally disable_net=1. * *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/kmod.h> #include <linux/rfkill.h> #include <linux/ip.h> #include <linux/uaccess.h> #include <linux/usb/cdc.h> #include <net/arp.h> #include <asm/byteorder.h> #include <linux/serial_core.h> #include <linux/serial.h> #define MOD_AUTHOR "Option Wireless" #define MOD_DESCRIPTION "USB High Speed Option driver" #define HSO_MAX_NET_DEVICES 10 #define HSO__MAX_MTU 2048 #define DEFAULT_MTU 1500 #define DEFAULT_MRU 1500 #define CTRL_URB_RX_SIZE 1024 #define CTRL_URB_TX_SIZE 64 #define BULK_URB_RX_SIZE 4096 #define BULK_URB_TX_SIZE 8192 #define MUX_BULK_RX_BUF_SIZE HSO__MAX_MTU #define MUX_BULK_TX_BUF_SIZE HSO__MAX_MTU #define MUX_BULK_RX_BUF_COUNT 4 #define USB_TYPE_OPTION_VENDOR 0x20 /* These definitions are used with the struct hso_net flags element */ /* - use *_bit operations on it. (bit indices not values.) */ #define HSO_NET_RUNNING 0 #define HSO_NET_TX_TIMEOUT (HZ*10) #define HSO_SERIAL_MAGIC 0x48534f31 /* Number of ttys to handle */ #define HSO_SERIAL_TTY_MINORS 256 #define MAX_RX_URBS 2 /*****************************************************************************/ /* Debugging functions */ /*****************************************************************************/ #define hso_dbg(lvl, fmt, ...) \ do { \ if ((lvl) & debug) \ pr_info("[%d:%s] " fmt, \ __LINE__, __func__, ##__VA_ARGS__); \ } while (0) /*****************************************************************************/ /* Enumerators */ /*****************************************************************************/ enum pkt_parse_state { WAIT_IP, WAIT_DATA, WAIT_SYNC }; /*****************************************************************************/ /* Structs */ /*****************************************************************************/ struct hso_shared_int { struct usb_endpoint_descriptor *intr_endp; void *shared_intr_buf; struct urb *shared_intr_urb; struct usb_device *usb; int use_count; int ref_count; struct mutex shared_int_lock; }; struct hso_net { struct hso_device *parent; struct net_device *net; struct rfkill *rfkill; char name[24]; struct usb_endpoint_descriptor *in_endp; struct usb_endpoint_descriptor *out_endp; struct urb *mux_bulk_rx_urb_pool[MUX_BULK_RX_BUF_COUNT]; struct urb *mux_bulk_tx_urb; void *mux_bulk_rx_buf_pool[MUX_BULK_RX_BUF_COUNT]; void *mux_bulk_tx_buf; struct sk_buff *skb_rx_buf; struct sk_buff *skb_tx_buf; enum pkt_parse_state rx_parse_state; spinlock_t net_lock; unsigned short rx_buf_size; unsigned short rx_buf_missing; struct iphdr rx_ip_hdr; unsigned long flags; }; enum rx_ctrl_state{ RX_IDLE, RX_SENT, RX_PENDING }; #define BM_REQUEST_TYPE (0xa1) #define B_NOTIFICATION (0x20) #define W_VALUE (0x0) #define W_LENGTH (0x2) #define B_OVERRUN (0x1<<6) #define B_PARITY (0x1<<5) #define B_FRAMING (0x1<<4) #define B_RING_SIGNAL (0x1<<3) #define B_BREAK (0x1<<2) #define B_TX_CARRIER (0x1<<1) #define B_RX_CARRIER (0x1<<0) struct hso_serial_state_notification { u8 bmRequestType; u8 bNotification; u16 wValue; u16 wIndex; u16 wLength; u16 UART_state_bitmap; } __packed; struct hso_tiocmget { struct mutex mutex; wait_queue_head_t waitq; int intr_completed; struct usb_endpoint_descriptor *endp; struct urb *urb; struct hso_serial_state_notification *serial_state_notification; u16 prev_UART_state_bitmap; struct uart_icount icount; }; struct hso_serial { struct hso_device *parent; int magic; u8 minor; struct hso_shared_int *shared_int; /* rx/tx urb could be either a bulk urb or a control urb depending on which serial port it is used on. */ struct urb *rx_urb[MAX_RX_URBS]; u8 num_rx_urbs; u8 *rx_data[MAX_RX_URBS]; u16 rx_data_length; /* should contain allocated length */ struct urb *tx_urb; u8 *tx_data; u8 *tx_buffer; u16 tx_data_length; /* should contain allocated length */ u16 tx_data_count; u16 tx_buffer_count; struct usb_ctrlrequest ctrl_req_tx; struct usb_ctrlrequest ctrl_req_rx; struct usb_endpoint_descriptor *in_endp; struct usb_endpoint_descriptor *out_endp; enum rx_ctrl_state rx_state; u8 rts_state; u8 dtr_state; unsigned tx_urb_used:1; struct tty_port port; /* from usb_serial_port */ spinlock_t serial_lock; int (*write_data) (struct hso_serial *serial); struct hso_tiocmget *tiocmget; /* Hacks required to get flow control * working on the serial receive buffers * so as not to drop characters on the floor. */ int curr_rx_urb_idx; u8 rx_urb_filled[MAX_RX_URBS]; struct tasklet_struct unthrottle_tasklet; }; struct hso_device { union { struct hso_serial *dev_serial; struct hso_net *dev_net; } port_data; u32 port_spec; u8 is_active; u8 usb_gone; struct work_struct async_get_intf; struct work_struct async_put_intf; struct usb_device *usb; struct usb_interface *interface; struct device *dev; struct kref ref; struct mutex mutex; }; /* Type of interface */ #define HSO_INTF_MASK 0xFF00 #define HSO_INTF_MUX 0x0100 #define HSO_INTF_BULK 0x0200 /* Type of port */ #define HSO_PORT_MASK 0xFF #define HSO_PORT_NO_PORT 0x0 #define HSO_PORT_CONTROL 0x1 #define HSO_PORT_APP 0x2 #define HSO_PORT_GPS 0x3 #define HSO_PORT_PCSC 0x4 #define HSO_PORT_APP2 0x5 #define HSO_PORT_GPS_CONTROL 0x6 #define HSO_PORT_MSD 0x7 #define HSO_PORT_VOICE 0x8 #define HSO_PORT_DIAG2 0x9 #define HSO_PORT_DIAG 0x10 #define HSO_PORT_MODEM 0x11 #define HSO_PORT_NETWORK 0x12 /* Additional device info */ #define HSO_INFO_MASK 0xFF000000 #define HSO_INFO_CRC_BUG 0x01000000 /*****************************************************************************/ /* Prototypes */ /*****************************************************************************/ /* Serial driver functions */ static int hso_serial_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static void ctrl_callback(struct urb *urb); static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial); static void hso_kick_transmit(struct hso_serial *serial); /* Helper functions */ static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, struct usb_device *usb, gfp_t gfp); static void handle_usb_error(int status, const char *function, struct hso_device *hso_dev); static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, int type, int dir); static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); static void hso_free_interface(struct usb_interface *intf); static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags); static int hso_stop_serial_device(struct hso_device *hso_dev); static int hso_start_net_device(struct hso_device *hso_dev); static void hso_free_shared_int(struct hso_shared_int *shared_int); static int hso_stop_net_device(struct hso_device *hso_dev); static void hso_serial_ref_free(struct kref *ref); static void hso_std_serial_read_bulk_callback(struct urb *urb); static int hso_mux_serial_read(struct hso_serial *serial); static void async_get_intf(struct work_struct *data); static void async_put_intf(struct work_struct *data); static int hso_put_activity(struct hso_device *hso_dev); static int hso_get_activity(struct hso_device *hso_dev); static void tiocmget_intr_callback(struct urb *urb); /*****************************************************************************/ /* Helping functions */ /*****************************************************************************/ /* #define DEBUG */ static inline struct hso_net *dev2net(struct hso_device *hso_dev) { return hso_dev->port_data.dev_net; } static inline struct hso_serial *dev2ser(struct hso_device *hso_dev) { return hso_dev->port_data.dev_serial; } /* Debugging functions */ #ifdef DEBUG static void dbg_dump(int line_count, const char *func_name, unsigned char *buf, unsigned int len) { static char name[255]; sprintf(name, "hso[%d:%s]", line_count, func_name); print_hex_dump_bytes(name, DUMP_PREFIX_NONE, buf, len); } #define DUMP(buf_, len_) \ dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_) #define DUMP1(buf_, len_) \ do { \ if (0x01 & debug) \ DUMP(buf_, len_); \ } while (0) #else #define DUMP(buf_, len_) #define DUMP1(buf_, len_) #endif /* module parameters */ static int debug; static int tty_major; static int disable_net; /* driver info */ static const char driver_name[] = "hso"; static const char tty_filename[] = "ttyHS"; /* the usb driver itself (registered in hso_init) */ static struct usb_driver hso_driver; /* serial structures */ static struct tty_driver *tty_drv; static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS]; static struct hso_device *network_table[HSO_MAX_NET_DEVICES]; static DEFINE_SPINLOCK(serial_table_lock); static const s32 default_port_spec[] = { HSO_INTF_MUX | HSO_PORT_NETWORK, HSO_INTF_BULK | HSO_PORT_DIAG, HSO_INTF_BULK | HSO_PORT_MODEM, 0 }; static const s32 icon321_port_spec[] = { HSO_INTF_MUX | HSO_PORT_NETWORK, HSO_INTF_BULK | HSO_PORT_DIAG2, HSO_INTF_BULK | HSO_PORT_MODEM, HSO_INTF_BULK | HSO_PORT_DIAG, 0 }; #define default_port_device(vendor, product) \ USB_DEVICE(vendor, product), \ .driver_info = (kernel_ulong_t)default_port_spec #define icon321_port_device(vendor, product) \ USB_DEVICE(vendor, product), \ .driver_info = (kernel_ulong_t)icon321_port_spec /* list of devices we support */ static const struct usb_device_id hso_ids[] = { {default_port_device(0x0af0, 0x6711)}, {default_port_device(0x0af0, 0x6731)}, {default_port_device(0x0af0, 0x6751)}, {default_port_device(0x0af0, 0x6771)}, {default_port_device(0x0af0, 0x6791)}, {default_port_device(0x0af0, 0x6811)}, {default_port_device(0x0af0, 0x6911)}, {default_port_device(0x0af0, 0x6951)}, {default_port_device(0x0af0, 0x6971)}, {default_port_device(0x0af0, 0x7011)}, {default_port_device(0x0af0, 0x7031)}, {default_port_device(0x0af0, 0x7051)}, {default_port_device(0x0af0, 0x7071)}, {default_port_device(0x0af0, 0x7111)}, {default_port_device(0x0af0, 0x7211)}, {default_port_device(0x0af0, 0x7251)}, {default_port_device(0x0af0, 0x7271)}, {default_port_device(0x0af0, 0x7311)}, {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */ {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7381)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ {USB_DEVICE(0x0af0, 0x7701)}, {USB_DEVICE(0x0af0, 0x7706)}, {USB_DEVICE(0x0af0, 0x7801)}, {USB_DEVICE(0x0af0, 0x7901)}, {USB_DEVICE(0x0af0, 0x7A01)}, {USB_DEVICE(0x0af0, 0x7A05)}, {USB_DEVICE(0x0af0, 0x8200)}, {USB_DEVICE(0x0af0, 0x8201)}, {USB_DEVICE(0x0af0, 0x8300)}, {USB_DEVICE(0x0af0, 0x8302)}, {USB_DEVICE(0x0af0, 0x8304)}, {USB_DEVICE(0x0af0, 0x8400)}, {USB_DEVICE(0x0af0, 0x8600)}, {USB_DEVICE(0x0af0, 0x8800)}, {USB_DEVICE(0x0af0, 0x8900)}, {USB_DEVICE(0x0af0, 0x9000)}, {USB_DEVICE(0x0af0, 0x9200)}, /* Option GTM671WFS */ {USB_DEVICE(0x0af0, 0xd035)}, {USB_DEVICE(0x0af0, 0xd055)}, {USB_DEVICE(0x0af0, 0xd155)}, {USB_DEVICE(0x0af0, 0xd255)}, {USB_DEVICE(0x0af0, 0xd057)}, {USB_DEVICE(0x0af0, 0xd157)}, {USB_DEVICE(0x0af0, 0xd257)}, {USB_DEVICE(0x0af0, 0xd357)}, {USB_DEVICE(0x0af0, 0xd058)}, {USB_DEVICE(0x0af0, 0xc100)}, {} }; MODULE_DEVICE_TABLE(usb, hso_ids); /* Sysfs attribute */ static ssize_t hsotype_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hso_device *hso_dev = dev_get_drvdata(dev); char *port_name; if (!hso_dev) return 0; switch (hso_dev->port_spec & HSO_PORT_MASK) { case HSO_PORT_CONTROL: port_name = "Control"; break; case HSO_PORT_APP: port_name = "Application"; break; case HSO_PORT_APP2: port_name = "Application2"; break; case HSO_PORT_GPS: port_name = "GPS"; break; case HSO_PORT_GPS_CONTROL: port_name = "GPS Control"; break; case HSO_PORT_PCSC: port_name = "PCSC"; break; case HSO_PORT_DIAG: port_name = "Diagnostic"; break; case HSO_PORT_DIAG2: port_name = "Diagnostic2"; break; case HSO_PORT_MODEM: port_name = "Modem"; break; case HSO_PORT_NETWORK: port_name = "Network"; break; default: port_name = "Unknown"; break; } return sprintf(buf, "%s\n", port_name); } static DEVICE_ATTR_RO(hsotype); static struct attribute *hso_serial_dev_attrs[] = { &dev_attr_hsotype.attr, NULL }; ATTRIBUTE_GROUPS(hso_serial_dev); static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb) { int idx; for (idx = 0; idx < serial->num_rx_urbs; idx++) if (serial->rx_urb[idx] == urb) return idx; dev_err(serial->parent->dev, "hso_urb_to_index failed\n"); return -1; } /* converts mux value to a port spec value */ static u32 hso_mux_to_port(int mux) { u32 result; switch (mux) { case 0x1: result = HSO_PORT_CONTROL; break; case 0x2: result = HSO_PORT_APP; break; case 0x4: result = HSO_PORT_PCSC; break; case 0x8: result = HSO_PORT_GPS; break; case 0x10: result = HSO_PORT_APP2; break; default: result = HSO_PORT_NO_PORT; } return result; } /* converts port spec value to a mux value */ static u32 hso_port_to_mux(int port) { u32 result; switch (port & HSO_PORT_MASK) { case HSO_PORT_CONTROL: result = 0x0; break; case HSO_PORT_APP: result = 0x1; break; case HSO_PORT_PCSC: result = 0x2; break; case HSO_PORT_GPS: result = 0x3; break; case HSO_PORT_APP2: result = 0x4; break; default: result = 0x0; } return result; } static struct hso_serial *get_serial_by_shared_int_and_type( struct hso_shared_int *shared_int, int mux) { int i, port; port = hso_mux_to_port(mux); for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (dev2ser(serial_table[i])->shared_int == shared_int) && ((serial_table[i]->port_spec & HSO_PORT_MASK) == port)) { return dev2ser(serial_table[i]); } } return NULL; } static struct hso_serial *get_serial_by_index(unsigned index) { struct hso_serial *serial = NULL; unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); if (serial_table[index]) serial = dev2ser(serial_table[index]); spin_unlock_irqrestore(&serial_table_lock, flags); return serial; } static int obtain_minor(struct hso_serial *serial) { int index; unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) { if (serial_table[index] == NULL) { serial_table[index] = serial->parent; serial->minor = index; spin_unlock_irqrestore(&serial_table_lock, flags); return 0; } } spin_unlock_irqrestore(&serial_table_lock, flags); pr_err("%s: no free serial devices in table\n", __func__); return -1; } static void release_minor(struct hso_serial *serial) { unsigned long flags; spin_lock_irqsave(&serial_table_lock, flags); serial_table[serial->minor] = NULL; spin_unlock_irqrestore(&serial_table_lock, flags); } static void handle_usb_error(int status, const char *function, struct hso_device *hso_dev) { char *explanation; switch (status) { case -ENODEV: explanation = "no device"; break; case -ENOENT: explanation = "endpoint not enabled"; break; case -EPIPE: explanation = "endpoint stalled"; break; case -ENOSPC: explanation = "not enough bandwidth"; break; case -ESHUTDOWN: explanation = "device disabled"; break; case -EHOSTUNREACH: explanation = "device suspended"; break; case -EINVAL: case -EAGAIN: case -EFBIG: case -EMSGSIZE: explanation = "internal error"; break; case -EILSEQ: case -EPROTO: case -ETIME: case -ETIMEDOUT: explanation = "protocol error"; if (hso_dev) usb_queue_reset_device(hso_dev->interface); break; default: explanation = "unknown status"; break; } /* log a meaningful explanation of an USB status */ hso_dbg(0x1, "%s: received USB status - %s (%d)\n", function, explanation, status); } /* Network interface functions */ /* called when net interface is brought up by ifconfig */ static int hso_net_open(struct net_device *net) { struct hso_net *odev = netdev_priv(net); unsigned long flags = 0; if (!odev) { dev_err(&net->dev, "No net device !\n"); return -ENODEV; } odev->skb_tx_buf = NULL; /* setup environment */ spin_lock_irqsave(&odev->net_lock, flags); odev->rx_parse_state = WAIT_IP; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); spin_unlock_irqrestore(&odev->net_lock, flags); /* We are up and running. */ set_bit(HSO_NET_RUNNING, &odev->flags); hso_start_net_device(odev->parent); /* Tell the kernel we are ready to start receiving from it */ netif_start_queue(net); return 0; } /* called when interface is brought down by ifconfig */ static int hso_net_close(struct net_device *net) { struct hso_net *odev = netdev_priv(net); /* we don't need the queue anymore */ netif_stop_queue(net); /* no longer running */ clear_bit(HSO_NET_RUNNING, &odev->flags); hso_stop_net_device(odev->parent); /* done */ return 0; } /* USB tells is xmit done, we should start the netqueue again */ static void write_bulk_callback(struct urb *urb) { struct hso_net *odev = urb->context; int status = urb->status; /* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) { dev_err(&urb->dev->dev, "%s: device not running\n", __func__); return; } /* Do we still have a valid kernel network device? */ if (!netif_device_present(odev->net)) { dev_err(&urb->dev->dev, "%s: net device not present\n", __func__); return; } /* log status, but don't act on it, we don't need to resubmit anything * anyhow */ if (status) handle_usb_error(status, __func__, odev->parent); hso_put_activity(odev->parent); /* Tell the network interface we are ready for another frame */ netif_wake_queue(odev->net); } /* called by kernel when we need to transmit a packet */ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct hso_net *odev = netdev_priv(net); int result; /* Tell the kernel, "No more frames 'til we are done with this one." */ netif_stop_queue(net); if (hso_get_activity(odev->parent) == -EAGAIN) { odev->skb_tx_buf = skb; return NETDEV_TX_OK; } /* log if asked */ DUMP1(skb->data, skb->len); /* Copy it from kernel memory to OUR memory */ memcpy(odev->mux_bulk_tx_buf, skb->data, skb->len); hso_dbg(0x1, "len: %d/%d\n", skb->len, MUX_BULK_TX_BUF_SIZE); /* Fill in the URB for shipping it out. */ usb_fill_bulk_urb(odev->mux_bulk_tx_urb, odev->parent->usb, usb_sndbulkpipe(odev->parent->usb, odev->out_endp-> bEndpointAddress & 0x7F), odev->mux_bulk_tx_buf, skb->len, write_bulk_callback, odev); /* Deal with the Zero Length packet problem, I hope */ odev->mux_bulk_tx_urb->transfer_flags |= URB_ZERO_PACKET; /* Send the URB on its merry way. */ result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); if (result) { dev_warn(&odev->parent->interface->dev, "failed mux_bulk_tx_urb %d\n", result); net->stats.tx_errors++; netif_start_queue(net); } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); /* we're done */ return NETDEV_TX_OK; } static const struct ethtool_ops ops = { .get_link = ethtool_op_get_link }; /* called when a packet did not ack after watchdogtimeout */ static void hso_net_tx_timeout(struct net_device *net, unsigned int txqueue) { struct hso_net *odev = netdev_priv(net); if (!odev) return; /* Tell syslog we are hosed. */ dev_warn(&net->dev, "Tx timed out.\n"); /* Tear the waiting frame off the list */ if (odev->mux_bulk_tx_urb) usb_unlink_urb(odev->mux_bulk_tx_urb); /* Update statistics */ net->stats.tx_errors++; } /* make a real packet from the received USB buffer */ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt, unsigned int count, unsigned char is_eop) { unsigned short temp_bytes; unsigned short buffer_offset = 0; unsigned short frame_len; /* log if needed */ hso_dbg(0x1, "Rx %d bytes\n", count); DUMP(ip_pkt, min(128, (int)count)); while (count) { switch (odev->rx_parse_state) { case WAIT_IP: /* waiting for IP header. */ /* wanted bytes - size of ip header */ temp_bytes = (count < odev->rx_buf_missing) ? count : odev-> rx_buf_missing; memcpy(((unsigned char *)(&odev->rx_ip_hdr)) + odev->rx_buf_size, ip_pkt + buffer_offset, temp_bytes); odev->rx_buf_size += temp_bytes; buffer_offset += temp_bytes; odev->rx_buf_missing -= temp_bytes; count -= temp_bytes; if (!odev->rx_buf_missing) { /* header is complete allocate an sk_buffer and * continue to WAIT_DATA */ frame_len = ntohs(odev->rx_ip_hdr.tot_len); if ((frame_len > DEFAULT_MRU) || (frame_len < sizeof(struct iphdr))) { dev_err(&odev->net->dev, "Invalid frame (%d) length\n", frame_len); odev->rx_parse_state = WAIT_SYNC; continue; } /* Allocate an sk_buff */ odev->skb_rx_buf = netdev_alloc_skb(odev->net, frame_len); if (!odev->skb_rx_buf) { /* We got no receive buffer. */ hso_dbg(0x1, "could not allocate memory\n"); odev->rx_parse_state = WAIT_SYNC; continue; } /* Copy what we got so far. make room for iphdr * after tail. */ skb_put_data(odev->skb_rx_buf, (char *)&(odev->rx_ip_hdr), sizeof(struct iphdr)); /* ETH_HLEN */ odev->rx_buf_size = sizeof(struct iphdr); /* Filip actually use .tot_len */ odev->rx_buf_missing = frame_len - sizeof(struct iphdr); odev->rx_parse_state = WAIT_DATA; } break; case WAIT_DATA: temp_bytes = (count < odev->rx_buf_missing) ? count : odev->rx_buf_missing; /* Copy the rest of the bytes that are left in the * buffer into the waiting sk_buf. */ /* Make room for temp_bytes after tail. */ skb_put_data(odev->skb_rx_buf, ip_pkt + buffer_offset, temp_bytes); odev->rx_buf_missing -= temp_bytes; count -= temp_bytes; buffer_offset += temp_bytes; odev->rx_buf_size += temp_bytes; if (!odev->rx_buf_missing) { /* Packet is complete. Inject into stack. */ /* We have IP packet here */ odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); skb_reset_mac_header(odev->skb_rx_buf); /* Ship it off to the kernel */ netif_rx(odev->skb_rx_buf); /* No longer our buffer. */ odev->skb_rx_buf = NULL; /* update out statistics */ odev->net->stats.rx_packets++; odev->net->stats.rx_bytes += odev->rx_buf_size; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); odev->rx_parse_state = WAIT_IP; } break; case WAIT_SYNC: hso_dbg(0x1, " W_S\n"); count = 0; break; default: hso_dbg(0x1, "\n"); count--; break; } } /* Recovery mechanism for WAIT_SYNC state. */ if (is_eop) { if (odev->rx_parse_state == WAIT_SYNC) { odev->rx_parse_state = WAIT_IP; odev->rx_buf_size = 0; odev->rx_buf_missing = sizeof(struct iphdr); } } } static void fix_crc_bug(struct urb *urb, __le16 max_packet_size) { static const u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; u32 rest = urb->actual_length % le16_to_cpu(max_packet_size); if (((rest == 5) || (rest == 6)) && !memcmp(((u8 *)urb->transfer_buffer) + urb->actual_length - 4, crc_check, 4)) { urb->actual_length -= 4; } } /* Moving data from usb to kernel (in interrupt state) */ static void read_bulk_callback(struct urb *urb) { struct hso_net *odev = urb->context; struct net_device *net; int result; unsigned long flags; int status = urb->status; /* is al ok? (Filip: Who's Al ?) */ if (status) { handle_usb_error(status, __func__, odev->parent); return; } /* Sanity check */ if (!odev || !test_bit(HSO_NET_RUNNING, &odev->flags)) { hso_dbg(0x1, "BULK IN callback but driver is not active!\n"); return; } usb_mark_last_busy(urb->dev); net = odev->net; if (!netif_device_present(net)) { /* Somebody killed our network interface... */ return; } if (odev->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, odev->in_endp->wMaxPacketSize); /* do we even have a packet? */ if (urb->actual_length) { /* Handle the IP stream, add header and push it onto network * stack if the packet is complete. */ spin_lock_irqsave(&odev->net_lock, flags); packetizeRx(odev, urb->transfer_buffer, urb->actual_length, (urb->transfer_buffer_length > urb->actual_length) ? 1 : 0); spin_unlock_irqrestore(&odev->net_lock, flags); } /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame. Reuse same as received. */ usb_fill_bulk_urb(urb, odev->parent->usb, usb_rcvbulkpipe(odev->parent->usb, odev->in_endp-> bEndpointAddress & 0x7F), urb->transfer_buffer, MUX_BULK_RX_BUF_SIZE, read_bulk_callback, odev); /* Give this to the USB subsystem so it can tell us when more data * arrives. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_warn(&odev->parent->interface->dev, "%s failed submit mux_bulk_rx_urb %d\n", __func__, result); } /* Serial driver functions */ static void hso_init_termios(struct ktermios *termios) { /* * The default requirements for this device are: */ termios->c_iflag &= ~(IGNBRK /* disable ignore break */ | BRKINT /* disable break causes interrupt */ | PARMRK /* disable mark parity errors */ | ISTRIP /* disable clear high bit of input characters */ | INLCR /* disable translate NL to CR */ | IGNCR /* disable ignore CR */ | ICRNL /* disable translate CR to NL */ | IXON); /* disable enable XON/XOFF flow control */ /* disable postprocess output characters */ termios->c_oflag &= ~OPOST; termios->c_lflag &= ~(ECHO /* disable echo input characters */ | ECHONL /* disable echo new line */ | ICANON /* disable erase, kill, werase, and rprnt special characters */ | ISIG /* disable interrupt, quit, and suspend special characters */ | IEXTEN); /* disable non-POSIX special characters */ termios->c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD /* clear current baud rate */ | CBAUDEX); /* clear current buad rate */ termios->c_cflag |= CS8; /* character size 8 bits */ /* baud rate 115200 */ tty_termios_encode_baud_rate(termios, 115200, 115200); } static void _hso_serial_set_termios(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; if (!serial) { pr_err("%s: no tty structures", __func__); return; } hso_dbg(0x8, "port %d\n", serial->minor); /* * Fix up unsupported bits */ tty->termios.c_iflag &= ~IXON; /* disable enable XON/XOFF flow control */ tty->termios.c_cflag &= ~(CSIZE /* no size */ | PARENB /* disable parity bit */ | CBAUD /* clear current baud rate */ | CBAUDEX); /* clear current buad rate */ tty->termios.c_cflag |= CS8; /* character size 8 bits */ /* baud rate 115200 */ tty_encode_baud_rate(tty, 115200, 115200); } static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) { int result; /* We are done with this URB, resubmit it. Prep the USB to wait for * another frame */ usb_fill_bulk_urb(urb, serial->parent->usb, usb_rcvbulkpipe(serial->parent->usb, serial->in_endp-> bEndpointAddress & 0x7F), urb->transfer_buffer, serial->rx_data_length, hso_std_serial_read_bulk_callback, serial); /* Give this to the USB subsystem so it can tell us when more data * arrives. */ result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n", __func__, result); } } static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial) { int count; struct urb *curr_urb; while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) { curr_urb = serial->rx_urb[serial->curr_rx_urb_idx]; count = put_rxbuf_data(curr_urb, serial); if (count == -1) return; if (count == 0) { serial->curr_rx_urb_idx++; if (serial->curr_rx_urb_idx >= serial->num_rx_urbs) serial->curr_rx_urb_idx = 0; hso_resubmit_rx_bulk_urb(serial, curr_urb); } } } static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) { int count = 0; struct urb *urb; urb = serial->rx_urb[0]; if (serial->port.count > 0) { count = put_rxbuf_data(urb, serial); if (count == -1) return; } /* Re issue a read as long as we receive data. */ if (count == 0 && ((urb->actual_length != 0) || (serial->rx_state == RX_PENDING))) { serial->rx_state = RX_SENT; hso_mux_serial_read(serial); } else serial->rx_state = RX_IDLE; } /* read callback for Diag and CS port */ static void hso_std_serial_read_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; unsigned long flags; hso_dbg(0x8, "--- Got serial_read_bulk callback %02x ---\n", status); /* sanity check */ if (!serial) { hso_dbg(0x1, "serial == NULL\n"); return; } if (status) { handle_usb_error(status, __func__, serial->parent); return; } hso_dbg(0x1, "Actual length = %d\n", urb->actual_length); DUMP1(urb->transfer_buffer, urb->actual_length); /* Anyone listening? */ if (serial->port.count == 0) return; if (serial->parent->port_spec & HSO_INFO_CRC_BUG) fix_crc_bug(urb, serial->in_endp->wMaxPacketSize); /* Valid data, handle RX data */ spin_lock_irqsave(&serial->serial_lock, flags); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; put_rxbuf_data_and_resubmit_bulk_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } /* * This needs to be a tasklet otherwise we will * end up recursively calling this function. */ static void hso_unthrottle_tasklet(struct tasklet_struct *t) { struct hso_serial *serial = from_tasklet(serial, t, unthrottle_tasklet); unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); if ((serial->parent->port_spec & HSO_INTF_MUX)) put_rxbuf_data_and_resubmit_ctrl_urb(serial); else put_rxbuf_data_and_resubmit_bulk_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } static void hso_unthrottle(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; tasklet_hi_schedule(&serial->unthrottle_tasklet); } /* open the requested serial port */ static int hso_serial_open(struct tty_struct *tty, struct file *filp) { struct hso_serial *serial = get_serial_by_index(tty->index); int result; /* sanity check */ if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) { WARN_ON(1); tty->driver_data = NULL; hso_dbg(0x1, "Failed to open port\n"); return -ENODEV; } mutex_lock(&serial->parent->mutex); result = usb_autopm_get_interface(serial->parent->interface); if (result < 0) goto err_out; hso_dbg(0x1, "Opening %d\n", serial->minor); /* setup */ tty->driver_data = serial; tty_port_tty_set(&serial->port, tty); /* check for port already opened, if not set the termios */ serial->port.count++; if (serial->port.count == 1) { serial->rx_state = RX_IDLE; /* Force default termio settings */ _hso_serial_set_termios(tty); tasklet_setup(&serial->unthrottle_tasklet, hso_unthrottle_tasklet); result = hso_start_serial_device(serial->parent, GFP_KERNEL); if (result) { hso_stop_serial_device(serial->parent); serial->port.count--; } else { kref_get(&serial->parent->ref); } } else { hso_dbg(0x1, "Port was already open\n"); } usb_autopm_put_interface(serial->parent->interface); /* done */ if (result) hso_serial_tiocmset(tty, TIOCM_RTS | TIOCM_DTR, 0); err_out: mutex_unlock(&serial->parent->mutex); return result; } /* close the requested serial port */ static void hso_serial_close(struct tty_struct *tty, struct file *filp) { struct hso_serial *serial = tty->driver_data; u8 usb_gone; hso_dbg(0x1, "Closing serial port\n"); /* Open failed, no close cleanup required */ if (serial == NULL) return; mutex_lock(&serial->parent->mutex); usb_gone = serial->parent->usb_gone; if (!usb_gone) usb_autopm_get_interface(serial->parent->interface); /* reset the rts and dtr */ /* do the actual close */ serial->port.count--; if (serial->port.count <= 0) { serial->port.count = 0; tty_port_tty_set(&serial->port, NULL); if (!usb_gone) hso_stop_serial_device(serial->parent); tasklet_kill(&serial->unthrottle_tasklet); } if (!usb_gone) usb_autopm_put_interface(serial->parent->interface); mutex_unlock(&serial->parent->mutex); } /* close the requested serial port */ static ssize_t hso_serial_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct hso_serial *serial = tty->driver_data; unsigned long flags; /* sanity check */ if (serial == NULL) { pr_err("%s: serial is NULL\n", __func__); return -ENODEV; } spin_lock_irqsave(&serial->serial_lock, flags); count = min_t(size_t, serial->tx_data_length - serial->tx_buffer_count, count); memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, count); serial->tx_buffer_count += count; spin_unlock_irqrestore(&serial->serial_lock, flags); hso_kick_transmit(serial); /* done */ return count; } /* how much room is there for writing */ static unsigned int hso_serial_write_room(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; unsigned int room; unsigned long flags; spin_lock_irqsave(&serial->serial_lock, flags); room = serial->tx_data_length - serial->tx_buffer_count; spin_unlock_irqrestore(&serial->serial_lock, flags); /* return free room */ return room; } static void hso_serial_cleanup(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; if (!serial) return; kref_put(&serial->parent->ref, hso_serial_ref_free); } /* setup the term */ static void hso_serial_set_termios(struct tty_struct *tty, const struct ktermios *old) { struct hso_serial *serial = tty->driver_data; unsigned long flags; if (old) hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", (unsigned int)tty->termios.c_cflag, (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); if (serial->port.count) _hso_serial_set_termios(tty); else tty->termios = *old; spin_unlock_irqrestore(&serial->serial_lock, flags); /* done */ } /* how many characters in the buffer */ static unsigned int hso_serial_chars_in_buffer(struct tty_struct *tty) { struct hso_serial *serial = tty->driver_data; unsigned long flags; unsigned int chars; /* sanity check */ if (serial == NULL) return 0; spin_lock_irqsave(&serial->serial_lock, flags); chars = serial->tx_buffer_count; spin_unlock_irqrestore(&serial->serial_lock, flags); return chars; } static int tiocmget_submit_urb(struct hso_serial *serial, struct hso_tiocmget *tiocmget, struct usb_device *usb) { int result; if (serial->parent->usb_gone) return -ENODEV; usb_fill_int_urb(tiocmget->urb, usb, usb_rcvintpipe(usb, tiocmget->endp-> bEndpointAddress & 0x7F), tiocmget->serial_state_notification, sizeof(struct hso_serial_state_notification), tiocmget_intr_callback, serial, tiocmget->endp->bInterval); result = usb_submit_urb(tiocmget->urb, GFP_ATOMIC); if (result) { dev_warn(&usb->dev, "%s usb_submit_urb failed %d\n", __func__, result); } return result; } static void tiocmget_intr_callback(struct urb *urb) { struct hso_serial *serial = urb->context; struct hso_tiocmget *tiocmget; int status = urb->status; u16 UART_state_bitmap, prev_UART_state_bitmap; struct uart_icount *icount; struct hso_serial_state_notification *serial_state_notification; struct usb_device *usb; struct usb_interface *interface; int if_num; /* Sanity checks */ if (!serial) return; if (status) { handle_usb_error(status, __func__, serial->parent); return; } /* tiocmget is only supported on HSO_PORT_MODEM */ tiocmget = serial->tiocmget; if (!tiocmget) return; BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM); usb = serial->parent->usb; interface = serial->parent->interface; if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* wIndex should be the USB interface number of the port to which the * notification applies, which should always be the Modem port. */ serial_state_notification = tiocmget->serial_state_notification; if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || serial_state_notification->bNotification != B_NOTIFICATION || le16_to_cpu(serial_state_notification->wValue) != W_VALUE || le16_to_cpu(serial_state_notification->wIndex) != if_num || le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { dev_warn(&usb->dev, "hso received invalid serial state notification\n"); DUMP(serial_state_notification, sizeof(struct hso_serial_state_notification)); } else { unsigned long flags; UART_state_bitmap = le16_to_cpu(serial_state_notification-> UART_state_bitmap); prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; icount = &tiocmget->icount; spin_lock_irqsave(&serial->serial_lock, flags); if ((UART_state_bitmap & B_OVERRUN) != (prev_UART_state_bitmap & B_OVERRUN)) icount->parity++; if ((UART_state_bitmap & B_PARITY) != (prev_UART_state_bitmap & B_PARITY)) icount->parity++; if ((UART_state_bitmap & B_FRAMING) != (prev_UART_state_bitmap & B_FRAMING)) icount->frame++; if ((UART_state_bitmap & B_RING_SIGNAL) && !(prev_UART_state_bitmap & B_RING_SIGNAL)) icount->rng++; if ((UART_state_bitmap & B_BREAK) != (prev_UART_state_bitmap & B_BREAK)) icount->brk++; if ((UART_state_bitmap & B_TX_CARRIER) != (prev_UART_state_bitmap & B_TX_CARRIER)) icount->dsr++; if ((UART_state_bitmap & B_RX_CARRIER) != (prev_UART_state_bitmap & B_RX_CARRIER)) icount->dcd++; tiocmget->prev_UART_state_bitmap = UART_state_bitmap; spin_unlock_irqrestore(&serial->serial_lock, flags); tiocmget->intr_completed = 1; wake_up_interruptible(&tiocmget->waitq); } memset(serial_state_notification, 0, sizeof(struct hso_serial_state_notification)); tiocmget_submit_urb(serial, tiocmget, serial->parent->usb); } /* * next few functions largely stolen from drivers/serial/serial_core.c */ /* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change * - mask passed in arg for lines of interest * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) * Caller should use TIOCGICOUNT to see which one it was */ static int hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) { DECLARE_WAITQUEUE(wait, current); struct uart_icount cprev, cnow; struct hso_tiocmget *tiocmget; int ret; tiocmget = serial->tiocmget; if (!tiocmget) return -ENOENT; /* * note the counters on entry */ spin_lock_irq(&serial->serial_lock); memcpy(&cprev, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); add_wait_queue(&tiocmget->waitq, &wait); for (;;) { spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); set_current_state(TASK_INTERRUPTIBLE); if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd))) { ret = 0; break; } schedule(); /* see if a signal did it */ if (signal_pending(current)) { ret = -ERESTARTSYS; break; } cprev = cnow; } __set_current_state(TASK_RUNNING); remove_wait_queue(&tiocmget->waitq, &wait); return ret; } /* * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) * Return: write counters to the user passed counter struct * NB: both 1->0 and 0->1 transitions are counted except for * RI where only 0->1 is counted. */ static int hso_get_count(struct tty_struct *tty, struct serial_icounter_struct *icount) { struct uart_icount cnow; struct hso_serial *serial = tty->driver_data; struct hso_tiocmget *tiocmget = serial->tiocmget; memset(icount, 0, sizeof(struct serial_icounter_struct)); if (!tiocmget) return -ENOENT; spin_lock_irq(&serial->serial_lock); memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); spin_unlock_irq(&serial->serial_lock); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } static int hso_serial_tiocmget(struct tty_struct *tty) { int retval; struct hso_serial *serial = tty->driver_data; struct hso_tiocmget *tiocmget; u16 UART_state_bitmap; /* sanity check */ if (!serial) { hso_dbg(0x1, "no tty structures\n"); return -EINVAL; } spin_lock_irq(&serial->serial_lock); retval = ((serial->rts_state) ? TIOCM_RTS : 0) | ((serial->dtr_state) ? TIOCM_DTR : 0); tiocmget = serial->tiocmget; if (tiocmget) { UART_state_bitmap = le16_to_cpu( tiocmget->prev_UART_state_bitmap); if (UART_state_bitmap & B_RING_SIGNAL) retval |= TIOCM_RNG; if (UART_state_bitmap & B_RX_CARRIER) retval |= TIOCM_CD; if (UART_state_bitmap & B_TX_CARRIER) retval |= TIOCM_DSR; } spin_unlock_irq(&serial->serial_lock); return retval; } static int hso_serial_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { int val = 0; unsigned long flags; int if_num; struct hso_serial *serial = tty->driver_data; struct usb_interface *interface; /* sanity check */ if (!serial) { hso_dbg(0x1, "no tty structures\n"); return -EINVAL; } if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) return -EINVAL; interface = serial->parent->interface; if_num = interface->cur_altsetting->desc.bInterfaceNumber; spin_lock_irqsave(&serial->serial_lock, flags); if (set & TIOCM_RTS) serial->rts_state = 1; if (set & TIOCM_DTR) serial->dtr_state = 1; if (clear & TIOCM_RTS) serial->rts_state = 0; if (clear & TIOCM_DTR) serial->dtr_state = 0; if (serial->dtr_state) val |= 0x01; if (serial->rts_state) val |= 0x02; spin_unlock_irqrestore(&serial->serial_lock, flags); return usb_control_msg(serial->parent->usb, usb_sndctrlpipe(serial->parent->usb, 0), 0x22, 0x21, val, if_num, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int hso_serial_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct hso_serial *serial = tty->driver_data; int ret = 0; hso_dbg(0x8, "IOCTL cmd: %d, arg: %ld\n", cmd, arg); if (!serial) return -ENODEV; switch (cmd) { case TIOCMIWAIT: ret = hso_wait_modem_status(serial, arg); break; default: ret = -ENOIOCTLCMD; break; } return ret; } /* starts a transmit */ static void hso_kick_transmit(struct hso_serial *serial) { unsigned long flags; int res; spin_lock_irqsave(&serial->serial_lock, flags); if (!serial->tx_buffer_count) goto out; if (serial->tx_urb_used) goto out; /* Wakeup USB interface if necessary */ if (hso_get_activity(serial->parent) == -EAGAIN) goto out; /* Switch pointers around to avoid memcpy */ swap(serial->tx_buffer, serial->tx_data); serial->tx_data_count = serial->tx_buffer_count; serial->tx_buffer_count = 0; /* If serial->tx_data is set, it means we switched buffers */ if (serial->tx_data && serial->write_data) { res = serial->write_data(serial); if (res >= 0) serial->tx_urb_used = 1; } out: spin_unlock_irqrestore(&serial->serial_lock, flags); } /* make a request (for reading and writing data to muxed serial port) */ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port, struct urb *ctrl_urb, struct usb_ctrlrequest *ctrl_req, u8 *ctrl_urb_data, u32 size) { int result; int pipe; /* Sanity check */ if (!serial || !ctrl_urb || !ctrl_req) { pr_err("%s: Wrong arguments\n", __func__); return -EINVAL; } /* initialize */ ctrl_req->wValue = 0; ctrl_req->wIndex = cpu_to_le16(hso_port_to_mux(port)); ctrl_req->wLength = cpu_to_le16(size); if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) { /* Reading command */ ctrl_req->bRequestType = USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE; ctrl_req->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; pipe = usb_rcvctrlpipe(serial->parent->usb, 0); } else { /* Writing command */ ctrl_req->bRequestType = USB_DIR_OUT | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE; ctrl_req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; pipe = usb_sndctrlpipe(serial->parent->usb, 0); } /* syslog */ hso_dbg(0x2, "%s command (%02x) len: %d, port: %d\n", type == USB_CDC_GET_ENCAPSULATED_RESPONSE ? "Read" : "Write", ctrl_req->bRequestType, ctrl_req->wLength, port); /* Load ctrl urb */ ctrl_urb->transfer_flags = 0; usb_fill_control_urb(ctrl_urb, serial->parent->usb, pipe, (u8 *) ctrl_req, ctrl_urb_data, size, ctrl_callback, serial); /* Send it on merry way */ result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); if (result) { dev_err(&ctrl_urb->dev->dev, "%s failed submit ctrl_urb %d type %d\n", __func__, result, type); return result; } /* done */ return size; } /* called by intr_callback when read occurs */ static int hso_mux_serial_read(struct hso_serial *serial) { if (!serial) return -EINVAL; /* clean data */ memset(serial->rx_data[0], 0, CTRL_URB_RX_SIZE); /* make the request */ if (serial->num_rx_urbs != 1) { dev_err(&serial->parent->interface->dev, "ERROR: mux'd reads with multiple buffers " "not possible\n"); return 0; } return mux_device_request(serial, USB_CDC_GET_ENCAPSULATED_RESPONSE, serial->parent->port_spec & HSO_PORT_MASK, serial->rx_urb[0], &serial->ctrl_req_rx, serial->rx_data[0], serial->rx_data_length); } /* used for muxed serial port callback (muxed serial read) */ static void intr_callback(struct urb *urb) { struct hso_shared_int *shared_int = urb->context; struct hso_serial *serial; unsigned char *port_req; int status = urb->status; unsigned long flags; int i; usb_mark_last_busy(urb->dev); /* sanity check */ if (!shared_int) return; /* status check */ if (status) { handle_usb_error(status, __func__, NULL); return; } hso_dbg(0x8, "--- Got intr callback 0x%02X ---\n", status); /* what request? */ port_req = urb->transfer_buffer; hso_dbg(0x8, "port_req = 0x%.2X\n", *port_req); /* loop over all muxed ports to find the one sending this */ for (i = 0; i < 8; i++) { /* max 8 channels on MUX */ if (*port_req & (1 << i)) { serial = get_serial_by_shared_int_and_type(shared_int, (1 << i)); if (serial != NULL) { hso_dbg(0x1, "Pending read interrupt on port %d\n", i); spin_lock_irqsave(&serial->serial_lock, flags); if (serial->rx_state == RX_IDLE && serial->port.count > 0) { /* Setup and send a ctrl req read on * port i */ if (!serial->rx_urb_filled[0]) { serial->rx_state = RX_SENT; hso_mux_serial_read(serial); } else serial->rx_state = RX_PENDING; } else { hso_dbg(0x1, "Already a read pending on port %d or port not open\n", i); } spin_unlock_irqrestore(&serial->serial_lock, flags); } } } /* Resubmit interrupt urb */ hso_mux_submit_intr_urb(shared_int, urb->dev, GFP_ATOMIC); } /* called for writing to muxed serial port */ static int hso_mux_serial_write_data(struct hso_serial *serial) { if (NULL == serial) return -EINVAL; return mux_device_request(serial, USB_CDC_SEND_ENCAPSULATED_COMMAND, serial->parent->port_spec & HSO_PORT_MASK, serial->tx_urb, &serial->ctrl_req_tx, serial->tx_data, serial->tx_data_count); } /* write callback for Diag and CS port */ static void hso_std_serial_write_bulk_callback(struct urb *urb) { struct hso_serial *serial = urb->context; int status = urb->status; unsigned long flags; /* sanity check */ if (!serial) { hso_dbg(0x1, "serial == NULL\n"); return; } spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; } hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); hso_kick_transmit(serial); hso_dbg(0x1, "\n"); } /* called for writing diag or CS serial port */ static int hso_std_serial_write_data(struct hso_serial *serial) { int count = serial->tx_data_count; int result; usb_fill_bulk_urb(serial->tx_urb, serial->parent->usb, usb_sndbulkpipe(serial->parent->usb, serial->out_endp-> bEndpointAddress & 0x7F), serial->tx_data, serial->tx_data_count, hso_std_serial_write_bulk_callback, serial); result = usb_submit_urb(serial->tx_urb, GFP_ATOMIC); if (result) { dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); return result; } return count; } /* callback after read or write on muxed serial port */ static void ctrl_callback(struct urb *urb) { struct hso_serial *serial = urb->context; struct usb_ctrlrequest *req; int status = urb->status; unsigned long flags; /* sanity check */ if (!serial) return; spin_lock_irqsave(&serial->serial_lock, flags); serial->tx_urb_used = 0; spin_unlock_irqrestore(&serial->serial_lock, flags); if (status) { handle_usb_error(status, __func__, serial->parent); return; } /* what request? */ req = (struct usb_ctrlrequest *)(urb->setup_packet); hso_dbg(0x8, "--- Got muxed ctrl callback 0x%02X ---\n", status); hso_dbg(0x8, "Actual length of urb = %d\n", urb->actual_length); DUMP1(urb->transfer_buffer, urb->actual_length); if (req->bRequestType == (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { /* response to a read command */ serial->rx_urb_filled[0] = 1; spin_lock_irqsave(&serial->serial_lock, flags); put_rxbuf_data_and_resubmit_ctrl_urb(serial); spin_unlock_irqrestore(&serial->serial_lock, flags); } else { hso_put_activity(serial->parent); tty_port_tty_wakeup(&serial->port); /* response to a write command */ hso_kick_transmit(serial); } } /* handle RX data for serial port */ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) { struct tty_struct *tty; int count; /* Sanity check */ if (urb == NULL || serial == NULL) { hso_dbg(0x1, "serial = NULL\n"); return -2; } tty = tty_port_tty_get(&serial->port); if (tty && tty_throttled(tty)) { tty_kref_put(tty); return -1; } /* Push data to tty */ hso_dbg(0x1, "data to push to tty\n"); count = tty_buffer_request_room(&serial->port, urb->actual_length); if (count >= urb->actual_length) { tty_insert_flip_string(&serial->port, urb->transfer_buffer, urb->actual_length); tty_flip_buffer_push(&serial->port); } else { dev_warn(&serial->parent->usb->dev, "dropping data, %d bytes lost\n", urb->actual_length); } tty_kref_put(tty); serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; return 0; } /* Base driver functions */ static void hso_log_port(struct hso_device *hso_dev) { char *port_type; char port_dev[20]; switch (hso_dev->port_spec & HSO_PORT_MASK) { case HSO_PORT_CONTROL: port_type = "Control"; break; case HSO_PORT_APP: port_type = "Application"; break; case HSO_PORT_GPS: port_type = "GPS"; break; case HSO_PORT_GPS_CONTROL: port_type = "GPS control"; break; case HSO_PORT_APP2: port_type = "Application2"; break; case HSO_PORT_PCSC: port_type = "PCSC"; break; case HSO_PORT_DIAG: port_type = "Diagnostic"; break; case HSO_PORT_DIAG2: port_type = "Diagnostic2"; break; case HSO_PORT_MODEM: port_type = "Modem"; break; case HSO_PORT_NETWORK: port_type = "Network"; break; default: port_type = "Unknown"; break; } if ((hso_dev->port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { sprintf(port_dev, "%s", dev2net(hso_dev)->net->name); } else sprintf(port_dev, "/dev/%s%d", tty_filename, dev2ser(hso_dev)->minor); dev_dbg(&hso_dev->interface->dev, "HSO: Found %s port %s\n", port_type, port_dev); } static int hso_start_net_device(struct hso_device *hso_dev) { int i, result = 0; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return -ENODEV; /* send URBs for all read buffers */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { /* Prep a receive URB */ usb_fill_bulk_urb(hso_net->mux_bulk_rx_urb_pool[i], hso_dev->usb, usb_rcvbulkpipe(hso_dev->usb, hso_net->in_endp-> bEndpointAddress & 0x7F), hso_net->mux_bulk_rx_buf_pool[i], MUX_BULK_RX_BUF_SIZE, read_bulk_callback, hso_net); /* Put it out there so the device can send us stuff */ result = usb_submit_urb(hso_net->mux_bulk_rx_urb_pool[i], GFP_NOIO); if (result) dev_warn(&hso_dev->usb->dev, "%s failed mux_bulk_rx_urb[%d] %d\n", __func__, i, result); } return result; } static int hso_stop_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return -ENODEV; for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { if (hso_net->mux_bulk_rx_urb_pool[i]) usb_kill_urb(hso_net->mux_bulk_rx_urb_pool[i]); } if (hso_net->mux_bulk_tx_urb) usb_kill_urb(hso_net->mux_bulk_tx_urb); return 0; } static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags) { int i, result = 0; struct hso_serial *serial = dev2ser(hso_dev); if (!serial) return -ENODEV; /* If it is not the MUX port fill in and submit a bulk urb (already * allocated in hso_serial_start) */ if (!(serial->parent->port_spec & HSO_INTF_MUX)) { for (i = 0; i < serial->num_rx_urbs; i++) { usb_fill_bulk_urb(serial->rx_urb[i], serial->parent->usb, usb_rcvbulkpipe(serial->parent->usb, serial->in_endp-> bEndpointAddress & 0x7F), serial->rx_data[i], serial->rx_data_length, hso_std_serial_read_bulk_callback, serial); result = usb_submit_urb(serial->rx_urb[i], flags); if (result) { dev_warn(&serial->parent->usb->dev, "Failed to submit urb - res %d\n", result); break; } } } else { mutex_lock(&serial->shared_int->shared_int_lock); if (!serial->shared_int->use_count) { result = hso_mux_submit_intr_urb(serial->shared_int, hso_dev->usb, flags); } serial->shared_int->use_count++; mutex_unlock(&serial->shared_int->shared_int_lock); } if (serial->tiocmget) tiocmget_submit_urb(serial, serial->tiocmget, serial->parent->usb); return result; } static int hso_stop_serial_device(struct hso_device *hso_dev) { int i; struct hso_serial *serial = dev2ser(hso_dev); struct hso_tiocmget *tiocmget; if (!serial) return -ENODEV; for (i = 0; i < serial->num_rx_urbs; i++) { if (serial->rx_urb[i]) { usb_kill_urb(serial->rx_urb[i]); serial->rx_urb_filled[i] = 0; } } serial->curr_rx_urb_idx = 0; if (serial->tx_urb) usb_kill_urb(serial->tx_urb); if (serial->shared_int) { mutex_lock(&serial->shared_int->shared_int_lock); if (serial->shared_int->use_count && (--serial->shared_int->use_count == 0)) { struct urb *urb; urb = serial->shared_int->shared_intr_urb; if (urb) usb_kill_urb(urb); } mutex_unlock(&serial->shared_int->shared_int_lock); } tiocmget = serial->tiocmget; if (tiocmget) { wake_up_interruptible(&tiocmget->waitq); usb_kill_urb(tiocmget->urb); } return 0; } static void hso_serial_tty_unregister(struct hso_serial *serial) { tty_unregister_device(tty_drv, serial->minor); release_minor(serial); } static void hso_serial_common_free(struct hso_serial *serial) { int i; for (i = 0; i < serial->num_rx_urbs; i++) { /* unlink and free RX URB */ usb_free_urb(serial->rx_urb[i]); /* free the RX buffer */ kfree(serial->rx_data[i]); } /* unlink and free TX URB */ usb_free_urb(serial->tx_urb); kfree(serial->tx_buffer); kfree(serial->tx_data); tty_port_destroy(&serial->port); } static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, int rx_size, int tx_size) { int i; tty_port_init(&serial->port); if (obtain_minor(serial)) goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, serial->minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); if (IS_ERR(serial->parent->dev)) { release_minor(serial); goto exit2; } serial->magic = HSO_SERIAL_MAGIC; spin_lock_init(&serial->serial_lock); serial->num_rx_urbs = num_urbs; /* RX, allocate urb and initialize */ /* prepare our RX buffer */ serial->rx_data_length = rx_size; for (i = 0; i < serial->num_rx_urbs; i++) { serial->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!serial->rx_urb[i]) goto exit; serial->rx_urb[i]->transfer_buffer = NULL; serial->rx_urb[i]->transfer_buffer_length = 0; serial->rx_data[i] = kzalloc(serial->rx_data_length, GFP_KERNEL); if (!serial->rx_data[i]) goto exit; } /* TX, allocate urb and initialize */ serial->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!serial->tx_urb) goto exit; serial->tx_urb->transfer_buffer = NULL; serial->tx_urb->transfer_buffer_length = 0; /* prepare our TX buffer */ serial->tx_data_count = 0; serial->tx_buffer_count = 0; serial->tx_data_length = tx_size; serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); if (!serial->tx_data) goto exit; serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); if (!serial->tx_buffer) goto exit; return 0; exit: hso_serial_tty_unregister(serial); exit2: hso_serial_common_free(serial); return -1; } /* Creates a general hso device */ static struct hso_device *hso_create_device(struct usb_interface *intf, int port_spec) { struct hso_device *hso_dev; hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL); if (!hso_dev) return NULL; hso_dev->port_spec = port_spec; hso_dev->usb = interface_to_usbdev(intf); hso_dev->interface = intf; kref_init(&hso_dev->ref); mutex_init(&hso_dev->mutex); INIT_WORK(&hso_dev->async_get_intf, async_get_intf); INIT_WORK(&hso_dev->async_put_intf, async_put_intf); return hso_dev; } /* Removes a network device in the network device table */ static int remove_net_device(struct hso_device *hso_dev) { int i; for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == hso_dev) { network_table[i] = NULL; break; } } if (i == HSO_MAX_NET_DEVICES) return -1; return 0; } /* Frees our network device */ static void hso_free_net_device(struct hso_device *hso_dev) { int i; struct hso_net *hso_net = dev2net(hso_dev); if (!hso_net) return; remove_net_device(hso_net->parent); if (hso_net->net) unregister_netdev(hso_net->net); /* start freeing */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); kfree(hso_net->mux_bulk_rx_buf_pool[i]); hso_net->mux_bulk_rx_buf_pool[i] = NULL; } usb_free_urb(hso_net->mux_bulk_tx_urb); kfree(hso_net->mux_bulk_tx_buf); hso_net->mux_bulk_tx_buf = NULL; if (hso_net->net) free_netdev(hso_net->net); kfree(hso_dev); } static const struct net_device_ops hso_netdev_ops = { .ndo_open = hso_net_open, .ndo_stop = hso_net_close, .ndo_start_xmit = hso_net_start_xmit, .ndo_tx_timeout = hso_net_tx_timeout, }; /* initialize the network interface */ static void hso_net_init(struct net_device *net) { struct hso_net *hso_net = netdev_priv(net); hso_dbg(0x1, "sizeof hso_net is %zu\n", sizeof(*hso_net)); /* fill in the other fields */ net->netdev_ops = &hso_netdev_ops; net->watchdog_timeo = HSO_NET_TX_TIMEOUT; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; net->type = ARPHRD_NONE; net->mtu = DEFAULT_MTU - 14; net->tx_queue_len = 10; net->ethtool_ops = &ops; /* and initialize the semaphore */ spin_lock_init(&hso_net->net_lock); } /* Adds a network device in the network device table */ static int add_net_device(struct hso_device *hso_dev) { int i; for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] == NULL) { network_table[i] = hso_dev; break; } } if (i == HSO_MAX_NET_DEVICES) return -1; return 0; } static int hso_rfkill_set_block(void *data, bool blocked) { struct hso_device *hso_dev = data; int enabled = !blocked; int rv; mutex_lock(&hso_dev->mutex); if (hso_dev->usb_gone) rv = 0; else rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0), enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); mutex_unlock(&hso_dev->mutex); return rv; } static const struct rfkill_ops hso_rfkill_ops = { .set_block = hso_rfkill_set_block, }; /* Creates and sets up everything for rfkill */ static void hso_create_rfkill(struct hso_device *hso_dev, struct usb_interface *interface) { struct hso_net *hso_net = dev2net(hso_dev); struct device *dev = &hso_net->net->dev; static u32 rfkill_counter; snprintf(hso_net->name, sizeof(hso_net->name), "hso-%d", rfkill_counter++); hso_net->rfkill = rfkill_alloc(hso_net->name, &interface_to_usbdev(interface)->dev, RFKILL_TYPE_WWAN, &hso_rfkill_ops, hso_dev); if (!hso_net->rfkill) return; if (rfkill_register(hso_net->rfkill) < 0) { rfkill_destroy(hso_net->rfkill); hso_net->rfkill = NULL; dev_err(dev, "%s - Failed to register rfkill\n", __func__); return; } } static const struct device_type hso_type = { .name = "wwan", }; /* Creates our network device */ static struct hso_device *hso_create_net_device(struct usb_interface *interface, int port_spec) { int result, i; struct net_device *net; struct hso_net *hso_net; struct hso_device *hso_dev; hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; /* allocate our network device, then we can put in our private data */ /* call hso_net_init to do the basic initialization */ net = alloc_netdev(sizeof(struct hso_net), "hso%d", NET_NAME_UNKNOWN, hso_net_init); if (!net) { dev_err(&interface->dev, "Unable to create ethernet device\n"); goto err_hso_dev; } hso_net = netdev_priv(net); hso_dev->port_data.dev_net = hso_net; hso_net->net = net; hso_net->parent = hso_dev; hso_net->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_IN); if (!hso_net->in_endp) { dev_err(&interface->dev, "Can't find BULK IN endpoint\n"); goto err_net; } hso_net->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT); if (!hso_net->out_endp) { dev_err(&interface->dev, "Can't find BULK OUT endpoint\n"); goto err_net; } SET_NETDEV_DEV(net, &interface->dev); SET_NETDEV_DEVTYPE(net, &hso_type); /* start allocating */ for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_rx_urb_pool[i]) goto err_mux_bulk_rx; hso_net->mux_bulk_rx_buf_pool[i] = kzalloc(MUX_BULK_RX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_rx_buf_pool[i]) goto err_mux_bulk_rx; } hso_net->mux_bulk_tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!hso_net->mux_bulk_tx_urb) goto err_mux_bulk_rx; hso_net->mux_bulk_tx_buf = kzalloc(MUX_BULK_TX_BUF_SIZE, GFP_KERNEL); if (!hso_net->mux_bulk_tx_buf) goto err_free_tx_urb; result = add_net_device(hso_dev); if (result) { dev_err(&interface->dev, "Failed to add net device\n"); goto err_free_tx_buf; } /* registering our net device */ result = register_netdev(net); if (result) { dev_err(&interface->dev, "Failed to register device\n"); goto err_rmv_ndev; } hso_log_port(hso_dev); hso_create_rfkill(hso_dev, interface); return hso_dev; err_rmv_ndev: remove_net_device(hso_dev); err_free_tx_buf: kfree(hso_net->mux_bulk_tx_buf); err_free_tx_urb: usb_free_urb(hso_net->mux_bulk_tx_urb); err_mux_bulk_rx: for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) { usb_free_urb(hso_net->mux_bulk_rx_urb_pool[i]); kfree(hso_net->mux_bulk_rx_buf_pool[i]); } err_net: free_netdev(net); err_hso_dev: kfree(hso_dev); return NULL; } static void hso_free_tiomget(struct hso_serial *serial) { struct hso_tiocmget *tiocmget; if (!serial) return; tiocmget = serial->tiocmget; if (tiocmget) { usb_free_urb(tiocmget->urb); tiocmget->urb = NULL; serial->tiocmget = NULL; kfree(tiocmget->serial_state_notification); tiocmget->serial_state_notification = NULL; kfree(tiocmget); } } /* Frees an AT channel ( goes for both mux and non-mux ) */ static void hso_free_serial_device(struct hso_device *hso_dev) { struct hso_serial *serial = dev2ser(hso_dev); if (!serial) return; hso_serial_common_free(serial); if (serial->shared_int) { mutex_lock(&serial->shared_int->shared_int_lock); if (--serial->shared_int->ref_count == 0) hso_free_shared_int(serial->shared_int); else mutex_unlock(&serial->shared_int->shared_int_lock); } hso_free_tiomget(serial); kfree(serial); kfree(hso_dev); } /* Creates a bulk AT channel */ static struct hso_device *hso_create_bulk_serial_device( struct usb_interface *interface, int port) { struct hso_device *hso_dev; struct hso_serial *serial; int num_urbs; struct hso_tiocmget *tiocmget; hso_dev = hso_create_device(interface, port); if (!hso_dev) return NULL; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) goto exit; serial->parent = hso_dev; hso_dev->port_data.dev_serial = serial; if ((port & HSO_PORT_MASK) == HSO_PORT_MODEM) { num_urbs = 2; serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget), GFP_KERNEL); if (!serial->tiocmget) goto exit; serial->tiocmget->serial_state_notification = kzalloc(sizeof(struct hso_serial_state_notification), GFP_KERNEL); if (!serial->tiocmget->serial_state_notification) goto exit; tiocmget = serial->tiocmget; tiocmget->endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT, USB_DIR_IN); if (!tiocmget->endp) { dev_err(&interface->dev, "Failed to find INT IN ep\n"); goto exit; } tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); if (!tiocmget->urb) goto exit; mutex_init(&tiocmget->mutex); init_waitqueue_head(&tiocmget->waitq); } else { num_urbs = 1; } if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE, BULK_URB_TX_SIZE)) goto exit; serial->in_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_IN); if (!serial->in_endp) { dev_err(&interface->dev, "Failed to find BULK IN ep\n"); goto exit2; } if (! (serial->out_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { dev_err(&interface->dev, "Failed to find BULK OUT ep\n"); goto exit2; } serial->write_data = hso_std_serial_write_data; /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; exit2: hso_serial_tty_unregister(serial); hso_serial_common_free(serial); exit: hso_free_tiomget(serial); kfree(serial); kfree(hso_dev); return NULL; } /* Creates a multiplexed AT channel */ static struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface, int port, struct hso_shared_int *mux) { struct hso_device *hso_dev; struct hso_serial *serial; int port_spec; port_spec = HSO_INTF_MUX; port_spec &= ~HSO_PORT_MASK; port_spec |= hso_mux_to_port(port); if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NO_PORT) return NULL; hso_dev = hso_create_device(interface, port_spec); if (!hso_dev) return NULL; serial = kzalloc(sizeof(*serial), GFP_KERNEL); if (!serial) goto err_free_dev; hso_dev->port_data.dev_serial = serial; serial->parent = hso_dev; if (hso_serial_common_create (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE)) goto err_free_serial; serial->tx_data_length--; serial->write_data = hso_mux_serial_write_data; serial->shared_int = mux; mutex_lock(&serial->shared_int->shared_int_lock); serial->shared_int->ref_count++; mutex_unlock(&serial->shared_int->shared_int_lock); /* setup the proc dirs and files if needed */ hso_log_port(hso_dev); /* done, return it */ return hso_dev; err_free_serial: kfree(serial); err_free_dev: kfree(hso_dev); return NULL; } static void hso_free_shared_int(struct hso_shared_int *mux) { usb_free_urb(mux->shared_intr_urb); kfree(mux->shared_intr_buf); mutex_unlock(&mux->shared_int_lock); kfree(mux); } static struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface) { struct hso_shared_int *mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return NULL; mux->intr_endp = hso_get_ep(interface, USB_ENDPOINT_XFER_INT, USB_DIR_IN); if (!mux->intr_endp) { dev_err(&interface->dev, "Can't find INT IN endpoint\n"); goto exit; } mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!mux->shared_intr_urb) goto exit; mux->shared_intr_buf = kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), GFP_KERNEL); if (!mux->shared_intr_buf) goto exit; mutex_init(&mux->shared_int_lock); return mux; exit: kfree(mux->shared_intr_buf); usb_free_urb(mux->shared_intr_urb); kfree(mux); return NULL; } /* Gets the port spec for a certain interface */ static int hso_get_config_data(struct usb_interface *interface) { struct usb_device *usbdev = interface_to_usbdev(interface); u8 *config_data = kmalloc(17, GFP_KERNEL); u32 if_num = interface->cur_altsetting->desc.bInterfaceNumber; s32 result; if (!config_data) return -ENOMEM; if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x86, 0xC0, 0, 0, config_data, 17, USB_CTRL_SET_TIMEOUT) != 0x11) { kfree(config_data); return -EIO; } /* check if we have a valid interface */ if (if_num > 16) { kfree(config_data); return -EINVAL; } switch (config_data[if_num]) { case 0x0: result = 0; break; case 0x1: result = HSO_PORT_DIAG; break; case 0x2: result = HSO_PORT_GPS; break; case 0x3: result = HSO_PORT_GPS_CONTROL; break; case 0x4: result = HSO_PORT_APP; break; case 0x5: result = HSO_PORT_APP2; break; case 0x6: result = HSO_PORT_CONTROL; break; case 0x7: result = HSO_PORT_NETWORK; break; case 0x8: result = HSO_PORT_MODEM; break; case 0x9: result = HSO_PORT_MSD; break; case 0xa: result = HSO_PORT_PCSC; break; case 0xb: result = HSO_PORT_VOICE; break; default: result = 0; } if (result) result |= HSO_INTF_BULK; if (config_data[16] & 0x1) result |= HSO_INFO_CRC_BUG; kfree(config_data); return result; } /* called once for each interface upon device insertion */ static int hso_probe(struct usb_interface *interface, const struct usb_device_id *id) { int mux, i, if_num, port_spec; unsigned char port_mask; struct hso_device *hso_dev = NULL; struct hso_shared_int *shared_int; struct hso_device *tmp_dev = NULL; if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) { dev_err(&interface->dev, "Not our interface\n"); return -ENODEV; } if_num = interface->cur_altsetting->desc.bInterfaceNumber; /* Get the interface/port specification from either driver_info or from * the device itself */ if (id->driver_info) { /* if_num is controlled by the device, driver_info is a 0 terminated * array. Make sure, the access is in bounds! */ for (i = 0; i <= if_num; ++i) if (((u32 *)(id->driver_info))[i] == 0) goto exit; port_spec = ((u32 *)(id->driver_info))[if_num]; } else { port_spec = hso_get_config_data(interface); if (port_spec < 0) goto exit; } /* Check if we need to switch to alt interfaces prior to port * configuration */ if (interface->num_altsetting > 1) usb_set_interface(interface_to_usbdev(interface), if_num, 1); interface->needs_remote_wakeup = 1; /* Allocate new hso device(s) */ switch (port_spec & HSO_INTF_MASK) { case HSO_INTF_MUX: if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { /* Create the network device */ if (!disable_net) { hso_dev = hso_create_net_device(interface, port_spec); if (!hso_dev) goto exit; tmp_dev = hso_dev; } } if (hso_get_mux_ports(interface, &port_mask)) /* TODO: de-allocate everything */ goto exit; shared_int = hso_create_shared_int(interface); if (!shared_int) goto exit; for (i = 1, mux = 0; i < 0x100; i = i << 1, mux++) { if (port_mask & i) { hso_dev = hso_create_mux_serial_device( interface, i, shared_int); if (!hso_dev) goto exit; } } if (tmp_dev) hso_dev = tmp_dev; break; case HSO_INTF_BULK: /* It's a regular bulk interface */ if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { if (!disable_net) hso_dev = hso_create_net_device(interface, port_spec); } else { hso_dev = hso_create_bulk_serial_device(interface, port_spec); } if (!hso_dev) goto exit; break; default: goto exit; } /* save our data pointer in this device */ usb_set_intfdata(interface, hso_dev); /* done */ return 0; exit: hso_free_interface(interface); return -ENODEV; } /* device removed, cleaning up */ static void hso_disconnect(struct usb_interface *interface) { hso_free_interface(interface); /* remove reference of our private data */ usb_set_intfdata(interface, NULL); } static void async_get_intf(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, async_get_intf); usb_autopm_get_interface(hso_dev->interface); } static void async_put_intf(struct work_struct *data) { struct hso_device *hso_dev = container_of(data, struct hso_device, async_put_intf); usb_autopm_put_interface(hso_dev->interface); } static int hso_get_activity(struct hso_device *hso_dev) { if (hso_dev->usb->state == USB_STATE_SUSPENDED) { if (!hso_dev->is_active) { hso_dev->is_active = 1; schedule_work(&hso_dev->async_get_intf); } } if (hso_dev->usb->state != USB_STATE_CONFIGURED) return -EAGAIN; usb_mark_last_busy(hso_dev->usb); return 0; } static int hso_put_activity(struct hso_device *hso_dev) { if (hso_dev->usb->state != USB_STATE_SUSPENDED) { if (hso_dev->is_active) { hso_dev->is_active = 0; schedule_work(&hso_dev->async_put_intf); return -EAGAIN; } } hso_dev->is_active = 0; return 0; } /* called by kernel when we need to suspend device */ static int hso_suspend(struct usb_interface *iface, pm_message_t message) { int i, result; /* Stop all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { result = hso_stop_serial_device(serial_table[i]); if (result) goto out; } } /* Stop all network ports */ for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == iface)) { result = hso_stop_net_device(network_table[i]); if (result) goto out; } } out: return 0; } /* called by kernel when we need to resume device */ static int hso_resume(struct usb_interface *iface) { int i, result = 0; struct hso_net *hso_net; /* Start all serial ports */ for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == iface)) { if (dev2ser(serial_table[i])->port.count) { result = hso_start_serial_device(serial_table[i], GFP_NOIO); hso_kick_transmit(dev2ser(serial_table[i])); if (result) goto out; } } } /* Start all network ports */ for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == iface)) { hso_net = dev2net(network_table[i]); if (hso_net->flags & IFF_UP) { /* First transmit any lingering data, then restart the device. */ if (hso_net->skb_tx_buf) { dev_dbg(&iface->dev, "Transmitting" " lingering data\n"); hso_net_start_xmit(hso_net->skb_tx_buf, hso_net->net); hso_net->skb_tx_buf = NULL; } result = hso_start_net_device(network_table[i]); if (result) goto out; } } } out: return result; } static void hso_serial_ref_free(struct kref *ref) { struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); hso_free_serial_device(hso_dev); } static void hso_free_interface(struct usb_interface *interface) { struct hso_serial *serial; int i; for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { if (serial_table[i] && (serial_table[i]->interface == interface)) { serial = dev2ser(serial_table[i]); tty_port_tty_hangup(&serial->port, false); mutex_lock(&serial->parent->mutex); serial->parent->usb_gone = 1; mutex_unlock(&serial->parent->mutex); cancel_work_sync(&serial_table[i]->async_put_intf); cancel_work_sync(&serial_table[i]->async_get_intf); hso_serial_tty_unregister(serial); kref_put(&serial->parent->ref, hso_serial_ref_free); } } for (i = 0; i < HSO_MAX_NET_DEVICES; i++) { if (network_table[i] && (network_table[i]->interface == interface)) { struct rfkill *rfk = dev2net(network_table[i])->rfkill; /* hso_stop_net_device doesn't stop the net queue since * traffic needs to start it again when suspended */ netif_stop_queue(dev2net(network_table[i])->net); hso_stop_net_device(network_table[i]); cancel_work_sync(&network_table[i]->async_put_intf); cancel_work_sync(&network_table[i]->async_get_intf); if (rfk) { rfkill_unregister(rfk); rfkill_destroy(rfk); } hso_free_net_device(network_table[i]); } } } /* Helper functions */ /* Get the endpoint ! */ static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, int type, int dir) { int i; struct usb_host_interface *iface = intf->cur_altsetting; struct usb_endpoint_descriptor *endp; for (i = 0; i < iface->desc.bNumEndpoints; i++) { endp = &iface->endpoint[i].desc; if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && (usb_endpoint_type(endp) == type)) return endp; } return NULL; } /* Get the byte that describes which ports are enabled */ static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports) { int i; struct usb_host_interface *iface = intf->cur_altsetting; if (iface->extralen == 3) { *ports = iface->extra[2]; return 0; } for (i = 0; i < iface->desc.bNumEndpoints; i++) { if (iface->endpoint[i].extralen == 3) { *ports = iface->endpoint[i].extra[2]; return 0; } } return -1; } /* interrupt urb needs to be submitted, used for serial read of muxed port */ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int, struct usb_device *usb, gfp_t gfp) { int result; usb_fill_int_urb(shared_int->shared_intr_urb, usb, usb_rcvintpipe(usb, shared_int->intr_endp->bEndpointAddress & 0x7F), shared_int->shared_intr_buf, 1, intr_callback, shared_int, shared_int->intr_endp->bInterval); result = usb_submit_urb(shared_int->shared_intr_urb, gfp); if (result) dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__, result); return result; } /* operations setup of the serial interface */ static const struct tty_operations hso_serial_ops = { .open = hso_serial_open, .close = hso_serial_close, .write = hso_serial_write, .write_room = hso_serial_write_room, .cleanup = hso_serial_cleanup, .ioctl = hso_serial_ioctl, .set_termios = hso_serial_set_termios, .chars_in_buffer = hso_serial_chars_in_buffer, .tiocmget = hso_serial_tiocmget, .tiocmset = hso_serial_tiocmset, .get_icount = hso_get_count, .unthrottle = hso_unthrottle }; static struct usb_driver hso_driver = { .name = driver_name, .probe = hso_probe, .disconnect = hso_disconnect, .id_table = hso_ids, .suspend = hso_suspend, .resume = hso_resume, .reset_resume = hso_resume, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; static int __init hso_init(void) { int result; /* allocate our driver using the proper amount of supported minors */ tty_drv = tty_alloc_driver(HSO_SERIAL_TTY_MINORS, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); if (IS_ERR(tty_drv)) return PTR_ERR(tty_drv); /* fill in all needed values */ tty_drv->driver_name = driver_name; tty_drv->name = tty_filename; /* if major number is provided as parameter, use that one */ if (tty_major) tty_drv->major = tty_major; tty_drv->minor_start = 0; tty_drv->type = TTY_DRIVER_TYPE_SERIAL; tty_drv->subtype = SERIAL_TYPE_NORMAL; tty_drv->init_termios = tty_std_termios; hso_init_termios(&tty_drv->init_termios); tty_set_operations(tty_drv, &hso_serial_ops); /* register the tty driver */ result = tty_register_driver(tty_drv); if (result) { pr_err("%s - tty_register_driver failed(%d)\n", __func__, result); goto err_free_tty; } /* register this module as an usb driver */ result = usb_register(&hso_driver); if (result) { pr_err("Could not register hso driver - error: %d\n", result); goto err_unreg_tty; } /* done */ return 0; err_unreg_tty: tty_unregister_driver(tty_drv); err_free_tty: tty_driver_kref_put(tty_drv); return result; } static void __exit hso_exit(void) { tty_unregister_driver(tty_drv); /* deregister the usb driver */ usb_deregister(&hso_driver); tty_driver_kref_put(tty_drv); } /* Module definitions */ module_init(hso_init); module_exit(hso_exit); MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESCRIPTION); MODULE_LICENSE("GPL"); /* change the debug level (eg: insmod hso.ko debug=0x04) */ MODULE_PARM_DESC(debug, "debug level mask [0x01 | 0x02 | 0x04 | 0x08 | 0x10]"); module_param(debug, int, 0644); /* set the major tty number (eg: insmod hso.ko tty_major=245) */ MODULE_PARM_DESC(tty_major, "Set the major tty number"); module_param(tty_major, int, 0644); /* disable network interface (eg: insmod hso.ko disable_net=1) */ MODULE_PARM_DESC(disable_net, "Disable the network interface"); module_param(disable_net, int, 0644);
74 74 48 48 15 6 6 6 6 4 4 4 4 4 4 4 1 3 3 3 3 3 4 4 50 26 26 23 26 23 21 21 2 2 7 7 7 7 7 7 7 2 2 2 2 2 48 48 48 21 40 47 7 21 21 21 21 21 21 21 2 2 2 7 7 7 7 7 31 31 31 28 26 26 26 26 26 24 4 4 26 26 26 4 51 51 51 26 26 26 26 2 26 26 26 26 26 26 26 26 26 26 25 26 25 26 21 21 26 26 26 26 26 23 22 14 21 21 21 21 2 2 25 25 26 2 2 2 2 2 2 2 2 26 30 31 31 31 29 31 2 50 71 26 26 26 26 26 6 26 26 26 6 26 26 26 26 26 26 26 26 26 63 63 63 62 63 65 37 45 31 31 31 65 64 17 65 50 51 51 47 48 48 48 15 37 37 48 47 8 3 6 6 5 2 45 45 43 47 46 12 12 19 29 31 31 30 31 31 31 31 31 30 7 31 30 36 36 35 48 48 48 29 54 36 2 2 2 2 2 36 36 36 38 38 38 38 36 35 36 13 36 12 12 12 4 4 4 4 4 12 4 2 2 2 2 2 4 1 1 2 2 2 2 56 55 55 56 56 54 56 30 56 56 28 56 28 29 56 56 5 25 25 30 1376 1372 1376 1377 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 // SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/swapfile.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie */ #include <linux/blkdev.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/shmem_fs.h> #include <linux/blk-cgroup.h> #include <linux/random.h> #include <linux/writeback.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/backing-dev.h> #include <linux/mutex.h> #include <linux/capability.h> #include <linux/syscalls.h> #include <linux/memcontrol.h> #include <linux/poll.h> #include <linux/oom.h> #include <linux/swapfile.h> #include <linux/export.h> #include <linux/sort.h> #include <linux/completion.h> #include <linux/suspend.h> #include <linux/zswap.h> #include <linux/plist.h> #include <asm/tlbflush.h> #include <linux/leafops.h> #include <linux/swap_cgroup.h> #include "swap_table.h" #include "internal.h" #include "swap.h" static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); static void swap_entries_free(struct swap_info_struct *si, struct swap_cluster_info *ci, swp_entry_t entry, unsigned int nr_pages); static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries); static bool folio_swapcache_freeable(struct folio *folio); static void move_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci, struct list_head *list, enum swap_cluster_flags new_flags); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; atomic_long_t nr_swap_pages; /* * Some modules use swappable objects and may try to swap them out under * memory pressure (via the shrinker). Before doing so, they may wish to * check to see if any swap space is available. */ EXPORT_SYMBOL_GPL(nr_swap_pages); /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ long total_swap_pages; #define DEF_SWAP_PRIO -1 unsigned long swapfile_maximum_size; #ifdef CONFIG_MIGRATION bool swap_migration_ad_supported; #endif /* CONFIG_MIGRATION */ static const char Bad_file[] = "Bad swap file entry "; static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; /* * all active swap_info_structs * protected with swap_lock, and ordered by priority. */ static PLIST_HEAD(swap_active_head); /* * all available (active, not full) swap_info_structs * protected with swap_avail_lock, ordered by priority. * This is used by folio_alloc_swap() instead of swap_active_head * because swap_active_head includes all swap_info_structs, * but folio_alloc_swap() doesn't need to look at full ones. * This uses its own lock instead of swap_lock because when a * swap_info_struct changes between not-full/full, it needs to * add/remove itself to/from this list, but the swap_info_struct->lock * is held and the locking order requires swap_lock to be taken * before any swap_info_struct->lock. */ static PLIST_HEAD(swap_avail_head); static DEFINE_SPINLOCK(swap_avail_lock); struct swap_info_struct *swap_info[MAX_SWAPFILES]; static struct kmem_cache *swap_table_cachep; static DEFINE_MUTEX(swapon_mutex); static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); /* Activity counter to indicate that a swapon or swapoff has occurred */ static atomic_t proc_poll_event = ATOMIC_INIT(0); atomic_t nr_rotate_swap = ATOMIC_INIT(0); struct percpu_swap_cluster { struct swap_info_struct *si[SWAP_NR_ORDERS]; unsigned long offset[SWAP_NR_ORDERS]; local_lock_t lock; }; static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = { .si = { NULL }, .offset = { SWAP_ENTRY_INVALID }, .lock = INIT_LOCAL_LOCK(), }; /* May return NULL on invalid type, caller must check for NULL return */ static struct swap_info_struct *swap_type_to_info(int type) { if (type >= MAX_SWAPFILES) return NULL; return READ_ONCE(swap_info[type]); /* rcu_dereference() */ } /* May return NULL on invalid entry, caller must check for NULL return */ static struct swap_info_struct *swap_entry_to_info(swp_entry_t entry) { return swap_type_to_info(swp_type(entry)); } static inline unsigned char swap_count(unsigned char ent) { return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ } /* * Use the second highest bit of inuse_pages counter as the indicator * if one swap device is on the available plist, so the atomic can * still be updated arithmetically while having special data embedded. * * inuse_pages counter is the only thing indicating if a device should * be on avail_lists or not (except swapon / swapoff). By embedding the * off-list bit in the atomic counter, updates no longer need any lock * to check the list status. * * This bit will be set if the device is not on the plist and not * usable, will be cleared if the device is on the plist. */ #define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2)) #define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT) static long swap_usage_in_pages(struct swap_info_struct *si) { return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; } /* Reclaim the swap entry anyway if possible */ #define TTRS_ANYWAY 0x1 /* * Reclaim the swap entry if there are no more mappings of the * corresponding page */ #define TTRS_UNMAPPED 0x2 /* Reclaim the swap entry if swap is getting full */ #define TTRS_FULL 0x4 static bool swap_only_has_cache(struct swap_info_struct *si, unsigned long offset, int nr_pages) { unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; do { VM_BUG_ON(!(*map & SWAP_HAS_CACHE)); if (*map != SWAP_HAS_CACHE) return false; } while (++map < map_end); return true; } static bool swap_is_last_map(struct swap_info_struct *si, unsigned long offset, int nr_pages, bool *has_cache) { unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; unsigned char count = *map; if (swap_count(count) != 1 && swap_count(count) != SWAP_MAP_SHMEM) return false; while (++map < map_end) { if (*map != count) return false; } *has_cache = !!(count & SWAP_HAS_CACHE); return true; } /* * returns number of pages in the folio that backs the swap entry. If positive, * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no * folio was associated with the swap entry. */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { const swp_entry_t entry = swp_entry(si->type, offset); struct swap_cluster_info *ci; struct folio *folio; int ret, nr_pages; bool need_reclaim; again: folio = swap_cache_get_folio(entry); if (!folio) return 0; nr_pages = folio_nr_pages(folio); ret = -nr_pages; /* * We hold a folio lock here. We have to use trylock for * avoiding deadlock. This is a special case and you should * use folio_free_swap() with explicit folio_lock() in usual * operations. */ if (!folio_trylock(folio)) goto out; /* * Offset could point to the middle of a large folio, or folio * may no longer point to the expected offset before it's locked. */ if (!folio_matches_swap_entry(folio, entry)) { folio_unlock(folio); folio_put(folio); goto again; } offset = swp_offset(folio->swap); need_reclaim = ((flags & TTRS_ANYWAY) || ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))); if (!need_reclaim || !folio_swapcache_freeable(folio)) goto out_unlock; /* * It's safe to delete the folio from swap cache only if the folio's * swap_map is HAS_CACHE only, which means the slots have no page table * reference or pending writeback, and can't be allocated to others. */ ci = swap_cluster_lock(si, offset); need_reclaim = swap_only_has_cache(si, offset, nr_pages); swap_cluster_unlock(ci); if (!need_reclaim) goto out_unlock; swap_cache_del_folio(folio); folio_set_dirty(folio); ret = nr_pages; out_unlock: folio_unlock(folio); out: folio_put(folio); return ret; } static inline struct swap_extent *first_se(struct swap_info_struct *sis) { struct rb_node *rb = rb_first(&sis->swap_extent_root); return rb_entry(rb, struct swap_extent, rb_node); } static inline struct swap_extent *next_se(struct swap_extent *se) { struct rb_node *rb = rb_next(&se->rb_node); return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL; } /* * swapon tell device that all the old swap contents can be discarded, * to allow the swap device to optimize its wear-levelling. */ static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; sector_t start_block; sector_t nr_blocks; int err = 0; /* Do not discard the swap header page! */ se = first_se(si); start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL); if (err) return err; cond_resched(); } for (se = next_se(se); se; se = next_se(se)) { start_block = se->start_block << (PAGE_SHIFT - 9); nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL); if (err) break; cond_resched(); } return err; /* That will often be -EOPNOTSUPP */ } static struct swap_extent * offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) { struct swap_extent *se; struct rb_node *rb; rb = sis->swap_extent_root.rb_node; while (rb) { se = rb_entry(rb, struct swap_extent, rb_node); if (offset < se->start_page) rb = rb->rb_left; else if (offset >= se->start_page + se->nr_pages) rb = rb->rb_right; else return se; } /* It *must* be present */ BUG(); } sector_t swap_folio_sector(struct folio *folio) { struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); struct swap_extent *se; sector_t sector; pgoff_t offset; offset = swp_offset(folio->swap); se = offset_to_swap_extent(sis, offset); sector = se->start_block + (offset - se->start_page); return sector << (PAGE_SHIFT - 9); } /* * swap allocation tell device that a cluster of swap can now be discarded, * to allow the swap device to optimize its wear-levelling. */ static void discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) { struct swap_extent *se = offset_to_swap_extent(si, start_page); while (nr_pages) { pgoff_t offset = start_page - se->start_page; sector_t start_block = se->start_block + offset; sector_t nr_blocks = se->nr_pages - offset; if (nr_blocks > nr_pages) nr_blocks = nr_pages; start_page += nr_blocks; nr_pages -= nr_blocks; start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_NOIO)) break; se = next_se(se); } } #define LATENCY_LIMIT 256 static inline bool cluster_is_empty(struct swap_cluster_info *info) { return info->count == 0; } static inline bool cluster_is_discard(struct swap_cluster_info *info) { return info->flags == CLUSTER_FLAG_DISCARD; } static inline bool cluster_table_is_alloced(struct swap_cluster_info *ci) { return rcu_dereference_protected(ci->table, lockdep_is_held(&ci->lock)); } static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order) { if (unlikely(ci->flags > CLUSTER_FLAG_USABLE)) return false; if (!cluster_table_is_alloced(ci)) return false; if (!order) return true; return cluster_is_empty(ci) || order == ci->order; } static inline unsigned int cluster_index(struct swap_info_struct *si, struct swap_cluster_info *ci) { return ci - si->cluster_info; } static inline unsigned int cluster_offset(struct swap_info_struct *si, struct swap_cluster_info *ci) { return cluster_index(si, ci) * SWAPFILE_CLUSTER; } static struct swap_table *swap_table_alloc(gfp_t gfp) { struct folio *folio; if (!SWP_TABLE_USE_PAGE) return kmem_cache_zalloc(swap_table_cachep, gfp); folio = folio_alloc(gfp | __GFP_ZERO, 0); if (folio) return folio_address(folio); return NULL; } static void swap_table_free_folio_rcu_cb(struct rcu_head *head) { struct folio *folio; folio = page_folio(container_of(head, struct page, rcu_head)); folio_put(folio); } static void swap_table_free(struct swap_table *table) { if (!SWP_TABLE_USE_PAGE) { kmem_cache_free(swap_table_cachep, table); return; } call_rcu(&(folio_page(virt_to_folio(table), 0)->rcu_head), swap_table_free_folio_rcu_cb); } static void swap_cluster_free_table(struct swap_cluster_info *ci) { unsigned int ci_off; struct swap_table *table; /* Only empty cluster's table is allow to be freed */ lockdep_assert_held(&ci->lock); VM_WARN_ON_ONCE(!cluster_is_empty(ci)); for (ci_off = 0; ci_off < SWAPFILE_CLUSTER; ci_off++) VM_WARN_ON_ONCE(!swp_tb_is_null(__swap_table_get(ci, ci_off))); table = (void *)rcu_dereference_protected(ci->table, true); rcu_assign_pointer(ci->table, NULL); swap_table_free(table); } /* * Allocate swap table for one cluster. Attempt an atomic allocation first, * then fallback to sleeping allocation. */ static struct swap_cluster_info * swap_cluster_alloc_table(struct swap_info_struct *si, struct swap_cluster_info *ci) { struct swap_table *table; /* * Only cluster isolation from the allocator does table allocation. * Swap allocator uses percpu clusters and holds the local lock. */ lockdep_assert_held(&ci->lock); lockdep_assert_held(&this_cpu_ptr(&percpu_swap_cluster)->lock); /* The cluster must be free and was just isolated from the free list. */ VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci)); table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); if (table) { rcu_assign_pointer(ci->table, table); return ci; } /* * Try a sleep allocation. Each isolated free cluster may cause * a sleep allocation, but there is a limited number of them, so * the potential recursive allocation is limited. */ spin_unlock(&ci->lock); if (!(si->flags & SWP_SOLIDSTATE)) spin_unlock(&si->global_cluster_lock); local_unlock(&percpu_swap_cluster.lock); table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | GFP_KERNEL); /* * Back to atomic context. We might have migrated to a new CPU with a * usable percpu cluster. But just keep using the isolated cluster to * make things easier. Migration indicates a slight change of workload * so using a new free cluster might not be a bad idea, and the worst * could happen with ignoring the percpu cluster is fragmentation, * which is acceptable since this fallback and race is rare. */ local_lock(&percpu_swap_cluster.lock); if (!(si->flags & SWP_SOLIDSTATE)) spin_lock(&si->global_cluster_lock); spin_lock(&ci->lock); /* Nothing except this helper should touch a dangling empty cluster. */ if (WARN_ON_ONCE(cluster_table_is_alloced(ci))) { if (table) swap_table_free(table); return ci; } if (!table) { move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); spin_unlock(&ci->lock); return NULL; } rcu_assign_pointer(ci->table, table); return ci; } static void move_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci, struct list_head *list, enum swap_cluster_flags new_flags) { VM_WARN_ON(ci->flags == new_flags); BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX); lockdep_assert_held(&ci->lock); spin_lock(&si->lock); if (ci->flags == CLUSTER_FLAG_NONE) list_add_tail(&ci->list, list); else list_move_tail(&ci->list, list); spin_unlock(&si->lock); ci->flags = new_flags; } /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); schedule_work(&si->discard_work); } static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { swap_cluster_free_table(ci); move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); ci->order = 0; } /* * Isolate and lock the first cluster that is not contented on a list, * clean its flag before taken off-list. Cluster flag must be in sync * with list status, so cluster updaters can always know the cluster * list status without touching si lock. * * Note it's possible that all clusters on a list are contented so * this returns NULL for an non-empty list. */ static struct swap_cluster_info *isolate_lock_cluster( struct swap_info_struct *si, struct list_head *list) { struct swap_cluster_info *ci, *found = NULL; spin_lock(&si->lock); list_for_each_entry(ci, list, list) { if (!spin_trylock(&ci->lock)) continue; /* We may only isolate and clear flags of following lists */ VM_BUG_ON(!ci->flags); VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE && ci->flags != CLUSTER_FLAG_FULL); list_del(&ci->list); ci->flags = CLUSTER_FLAG_NONE; found = ci; break; } spin_unlock(&si->lock); if (found && !cluster_table_is_alloced(found)) { /* Only an empty free cluster's swap table can be freed. */ VM_WARN_ON_ONCE(list != &si->free_clusters); VM_WARN_ON_ONCE(!cluster_is_empty(found)); return swap_cluster_alloc_table(si, found); } return found; } /* * Doing discard actually. After a cluster discard is finished, the cluster * will be added to free cluster list. Discard cluster is a bit special as * they don't participate in allocation or reclaim, so clusters marked as * CLUSTER_FLAG_DISCARD must remain off-list or on discard list. */ static bool swap_do_scheduled_discard(struct swap_info_struct *si) { struct swap_cluster_info *ci; bool ret = false; unsigned int idx; spin_lock(&si->lock); while (!list_empty(&si->discard_clusters)) { ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); /* * Delete the cluster from list to prepare for discard, but keep * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be * pointing to it, or ran into by relocate_cluster. */ list_del(&ci->list); idx = cluster_index(si, ci); spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&ci->lock); /* * Discard is done, clear its flags as it's off-list, then * return the cluster to allocation list. */ ci->flags = CLUSTER_FLAG_NONE; __free_cluster(si, ci); spin_unlock(&ci->lock); ret = true; spin_lock(&si->lock); } spin_unlock(&si->lock); return ret; } static void swap_discard_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, discard_work); swap_do_scheduled_discard(si); } static void swap_users_ref_free(struct percpu_ref *ref) { struct swap_info_struct *si; si = container_of(ref, struct swap_info_struct, users); complete(&si->comp); } /* * Must be called after freeing if ci->count == 0, moves the cluster to free * or discard list. */ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->count != 0); VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); lockdep_assert_held(&ci->lock); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed * after discard. */ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { swap_cluster_schedule_discard(si, ci); return; } __free_cluster(si, ci); } /* * Must be called after freeing if ci->count != 0, moves the cluster to * nonfull list. */ static void partial_free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER); lockdep_assert_held(&ci->lock); if (ci->flags != CLUSTER_FLAG_NONFULL) move_cluster(si, ci, &si->nonfull_clusters[ci->order], CLUSTER_FLAG_NONFULL); } /* * Must be called after allocation, moves the cluster to full or frag list. * Note: allocation doesn't acquire si lock, and may drop the ci lock for * reclaim, so the cluster could be any where when called. */ static void relocate_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { lockdep_assert_held(&ci->lock); /* Discard cluster must remain off-list or on discard list */ if (cluster_is_discard(ci)) return; if (!ci->count) { if (ci->flags != CLUSTER_FLAG_FREE) free_cluster(si, ci); } else if (ci->count != SWAPFILE_CLUSTER) { if (ci->flags != CLUSTER_FLAG_FRAG) move_cluster(si, ci, &si->frag_clusters[ci->order], CLUSTER_FLAG_FRAG); } else { if (ci->flags != CLUSTER_FLAG_FULL) move_cluster(si, ci, &si->full_clusters, CLUSTER_FLAG_FULL); } } /* * The cluster corresponding to @offset will be accounted as having one bad * slot. The cluster will not be added to the free cluster list, and its * usage counter will be increased by 1. Only used for initialization. */ static int swap_cluster_setup_bad_slot(struct swap_cluster_info *cluster_info, unsigned long offset) { unsigned long idx = offset / SWAPFILE_CLUSTER; struct swap_table *table; struct swap_cluster_info *ci; ci = cluster_info + idx; if (!ci->table) { table = swap_table_alloc(GFP_KERNEL); if (!table) return -ENOMEM; rcu_assign_pointer(ci->table, table); } ci->count++; WARN_ON(ci->count > SWAPFILE_CLUSTER); WARN_ON(ci->flags); return 0; } static bool cluster_reclaim_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long start, unsigned long end) { unsigned char *map = si->swap_map; unsigned long offset = start; int nr_reclaim; spin_unlock(&ci->lock); do { switch (READ_ONCE(map[offset])) { case 0: offset++; break; case SWAP_HAS_CACHE: nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); if (nr_reclaim > 0) offset += nr_reclaim; else goto out; break; default: goto out; } } while (offset < end); out: spin_lock(&ci->lock); /* * Recheck the range no matter reclaim succeeded or not, the slot * could have been be freed while we are not holding the lock. */ for (offset = start; offset < end; offset++) if (READ_ONCE(map[offset])) return false; return true; } static bool cluster_scan_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long start, unsigned int nr_pages, bool *need_reclaim) { unsigned long offset, end = start + nr_pages; unsigned char *map = si->swap_map; if (cluster_is_empty(ci)) return true; for (offset = start; offset < end; offset++) { switch (READ_ONCE(map[offset])) { case 0: continue; case SWAP_HAS_CACHE: if (!vm_swap_full()) return false; *need_reclaim = true; continue; default: return false; } } return true; } /* * Currently, the swap table is not used for count tracking, just * do a sanity check here to ensure nothing leaked, so the swap * table should be empty upon freeing. */ static void swap_cluster_assert_table_empty(struct swap_cluster_info *ci, unsigned int start, unsigned int nr) { unsigned int ci_off = start % SWAPFILE_CLUSTER; unsigned int ci_end = ci_off + nr; unsigned long swp_tb; if (IS_ENABLED(CONFIG_DEBUG_VM)) { do { swp_tb = __swap_table_get(ci, ci_off); VM_WARN_ON_ONCE(!swp_tb_is_null(swp_tb)); } while (++ci_off < ci_end); } } static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned int start, unsigned char usage, unsigned int order) { unsigned int nr_pages = 1 << order; lockdep_assert_held(&ci->lock); if (!(si->flags & SWP_WRITEOK)) return false; /* * The first allocation in a cluster makes the * cluster exclusive to this order */ if (cluster_is_empty(ci)) ci->order = order; memset(si->swap_map + start, usage, nr_pages); swap_cluster_assert_table_empty(ci, start, nr_pages); swap_range_alloc(si, nr_pages); ci->count += nr_pages; return true; } /* Try use a new cluster for current CPU and allocate from it. */ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long offset, unsigned int order, unsigned char usage) { unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER); unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); unsigned int nr_pages = 1 << order; bool need_reclaim, ret; lockdep_assert_held(&ci->lock); if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) goto out; for (end -= nr_pages; offset <= end; offset += nr_pages) { need_reclaim = false; if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) continue; if (need_reclaim) { ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); /* * Reclaim drops ci->lock and cluster could be used * by another order. Not checking flag as off-list * cluster has no flag set, and change of list * won't cause fragmentation. */ if (!cluster_is_usable(ci, order)) goto out; if (cluster_is_empty(ci)) offset = start; /* Reclaim failed but cluster is usable, try next */ if (!ret) continue; } if (!cluster_alloc_range(si, ci, offset, usage, order)) break; found = offset; offset += nr_pages; if (ci->count < SWAPFILE_CLUSTER && offset <= end) next = offset; break; } out: relocate_cluster(si, ci); swap_cluster_unlock(ci); if (si->flags & SWP_SOLIDSTATE) { this_cpu_write(percpu_swap_cluster.offset[order], next); this_cpu_write(percpu_swap_cluster.si[order], si); } else { si->global_cluster->next[order] = next; } return found; } static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, struct list_head *list, unsigned int order, unsigned char usage, bool scan_all) { unsigned int found = SWAP_ENTRY_INVALID; do { struct swap_cluster_info *ci = isolate_lock_cluster(si, list); unsigned long offset; if (!ci) break; offset = cluster_offset(si, ci); found = alloc_swap_scan_cluster(si, ci, offset, order, usage); if (found) break; } while (scan_all); return found; } static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) { long to_scan = 1; unsigned long offset, end; struct swap_cluster_info *ci; unsigned char *map = si->swap_map; int nr_reclaim; if (force) to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; while ((ci = isolate_lock_cluster(si, &si->full_clusters))) { offset = cluster_offset(si, ci); end = min(si->max, offset + SWAPFILE_CLUSTER); to_scan--; while (offset < end) { if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { spin_unlock(&ci->lock); nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); spin_lock(&ci->lock); if (nr_reclaim) { offset += abs(nr_reclaim); continue; } } offset++; } /* in case no swap cache is reclaimed */ if (ci->flags == CLUSTER_FLAG_NONE) relocate_cluster(si, ci); swap_cluster_unlock(ci); if (to_scan <= 0) break; } } static void swap_reclaim_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, reclaim_work); swap_reclaim_full_clusters(si, true); } /* * Try to allocate swap entries with specified order and try set a new * cluster for current CPU too. */ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, unsigned char usage) { struct swap_cluster_info *ci; unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; /* * Swapfile is not block device so unable * to allocate large entries. */ if (order && !(si->flags & SWP_BLKDEV)) return 0; if (!(si->flags & SWP_SOLIDSTATE)) { /* Serialize HDD SWAP allocation for each device. */ spin_lock(&si->global_cluster_lock); offset = si->global_cluster->next[order]; if (offset == SWAP_ENTRY_INVALID) goto new_cluster; ci = swap_cluster_lock(si, offset); /* Cluster could have been used by another order */ if (cluster_is_usable(ci, order)) { if (cluster_is_empty(ci)) offset = cluster_offset(si, ci); found = alloc_swap_scan_cluster(si, ci, offset, order, usage); } else { swap_cluster_unlock(ci); } if (found) goto done; } new_cluster: /* * If the device need discard, prefer new cluster over nonfull * to spread out the writes. */ if (si->flags & SWP_PAGE_DISCARD) { found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, false); if (found) goto done; } if (order < PMD_ORDER) { found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], order, usage, true); if (found) goto done; } if (!(si->flags & SWP_PAGE_DISCARD)) { found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, false); if (found) goto done; } /* Try reclaim full clusters if free and nonfull lists are drained */ if (vm_swap_full()) swap_reclaim_full_clusters(si, false); if (order < PMD_ORDER) { /* * Scan only one fragment cluster is good enough. Order 0 * allocation will surely success, and large allocation * failure is not critical. Scanning one cluster still * keeps the list rotated and reclaimed (for HAS_CACHE). */ found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, usage, false); if (found) goto done; } if (order) goto done; /* Order 0 stealing from higher order */ for (int o = 1; o < SWAP_NR_ORDERS; o++) { /* * Clusters here have at least one usable slots and can't fail order 0 * allocation, but reclaim may drop si->lock and race with another user. */ found = alloc_swap_scan_list(si, &si->frag_clusters[o], 0, usage, true); if (found) goto done; found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], 0, usage, true); if (found) goto done; } done: if (!(si->flags & SWP_SOLIDSTATE)) spin_unlock(&si->global_cluster_lock); return found; } /* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */ static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) { unsigned long pages; spin_lock(&swap_avail_lock); if (swapoff) { /* * Forcefully remove it. Clear the SWP_WRITEOK flags for * swapoff here so it's synchronized by both si->lock and * swap_avail_lock, to ensure the result can be seen by * add_to_avail_list. */ lockdep_assert_held(&si->lock); si->flags &= ~SWP_WRITEOK; atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); } else { /* * If not called by swapoff, take it off-list only if it's * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly * si->inuse_pages == pages), any concurrent slot freeing, * or device already removed from plist by someone else * will make this return false. */ pages = si->pages; if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, pages | SWAP_USAGE_OFFLIST_BIT)) goto skip; } plist_del(&si->avail_list, &swap_avail_head); skip: spin_unlock(&swap_avail_lock); } /* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */ static void add_to_avail_list(struct swap_info_struct *si, bool swapon) { long val; unsigned long pages; spin_lock(&swap_avail_lock); /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */ if (swapon) { lockdep_assert_held(&si->lock); si->flags |= SWP_WRITEOK; } else { if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) goto skip; } if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) goto skip; val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); /* * When device is full and device is on the plist, only one updater will * see (inuse_pages == si->pages) and will call del_from_avail_list. If * that updater happen to be here, just skip adding. */ pages = si->pages; if (val == pages) { /* Just like the cmpxchg in del_from_avail_list */ if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, pages | SWAP_USAGE_OFFLIST_BIT)) goto skip; } plist_add(&si->avail_list, &swap_avail_head); skip: spin_unlock(&swap_avail_lock); } /* * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock * within each cluster, so the total contribution to the global counter should * always be positive and cannot exceed the total number of usable slots. */ static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) { long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); /* * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set, * remove it from the plist. */ if (unlikely(val == si->pages)) { del_from_avail_list(si, false); return true; } return false; } static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) { long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); /* * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set, * add it to the plist. */ if (unlikely(val & SWAP_USAGE_OFFLIST_BIT)) add_to_avail_list(si, false); } static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries) { if (swap_usage_add(si, nr_entries)) { if (vm_swap_full()) schedule_work(&si->reclaim_work); } atomic_long_sub(nr_entries, &nr_swap_pages); } static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { unsigned long begin = offset; unsigned long end = offset + nr_entries - 1; void (*swap_slot_free_notify)(struct block_device *, unsigned long); unsigned int i; /* * Use atomic clear_bit operations only on zeromap instead of non-atomic * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes. */ for (i = 0; i < nr_entries; i++) { clear_bit(offset + i, si->zeromap); zswap_invalidate(swp_entry(si->type, offset + i)); } if (si->flags & SWP_BLKDEV) swap_slot_free_notify = si->bdev->bd_disk->fops->swap_slot_free_notify; else swap_slot_free_notify = NULL; while (offset <= end) { arch_swap_invalidate_page(si->type, offset); if (swap_slot_free_notify) swap_slot_free_notify(si->bdev, offset); offset++; } __swap_cache_clear_shadow(swp_entry(si->type, begin), nr_entries); /* * Make sure that try_to_unuse() observes si->inuse_pages reaching 0 * only after the above cleanups are done. */ smp_wmb(); atomic_long_add(nr_entries, &nr_swap_pages); swap_usage_sub(si, nr_entries); } static bool get_swap_device_info(struct swap_info_struct *si) { if (!percpu_ref_tryget_live(&si->users)) return false; /* * Guarantee the si->users are checked before accessing other * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is * up to dated. * * Paired with the spin_unlock() after setup_swap_info() in * enable_swap_info(), and smp_wmb() in swapoff. */ smp_rmb(); return true; } /* * Fast path try to get swap entries with specified order from current * CPU's swap entry pool (a cluster). */ static bool swap_alloc_fast(swp_entry_t *entry, int order) { struct swap_cluster_info *ci; struct swap_info_struct *si; unsigned int offset, found = SWAP_ENTRY_INVALID; /* * Once allocated, swap_info_struct will never be completely freed, * so checking it's liveness by get_swap_device_info is enough. */ si = this_cpu_read(percpu_swap_cluster.si[order]); offset = this_cpu_read(percpu_swap_cluster.offset[order]); if (!si || !offset || !get_swap_device_info(si)) return false; ci = swap_cluster_lock(si, offset); if (cluster_is_usable(ci, order)) { if (cluster_is_empty(ci)) offset = cluster_offset(si, ci); found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE); if (found) *entry = swp_entry(si->type, found); } else { swap_cluster_unlock(ci); } put_swap_device(si); return !!found; } /* Rotate the device and switch to a new cluster */ static void swap_alloc_slow(swp_entry_t *entry, int order) { unsigned long offset; struct swap_info_struct *si, *next; spin_lock(&swap_avail_lock); start_over: plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { /* Rotate the device and switch to a new cluster */ plist_requeue(&si->avail_list, &swap_avail_head); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); put_swap_device(si); if (offset) { *entry = swp_entry(si->type, offset); return; } if (order) return; } spin_lock(&swap_avail_lock); /* * if we got here, it's likely that si was almost full before, * multiple callers probably all tried to get a page from the * same si and it filled up before we could get one; or, the si * filled up between us dropping swap_avail_lock. * Since we dropped the swap_avail_lock, the swap_avail_list * may have been modified; so if next is still in the * swap_avail_head list then try it, otherwise start over if we * have not gotten any slots. */ if (plist_node_empty(&next->avail_list)) goto start_over; } spin_unlock(&swap_avail_lock); } /* * Discard pending clusters in a synchronized way when under high pressure. * Return: true if any cluster is discarded. */ static bool swap_sync_discard(void) { bool ret = false; struct swap_info_struct *si, *next; spin_lock(&swap_lock); start_over: plist_for_each_entry_safe(si, next, &swap_active_head, list) { spin_unlock(&swap_lock); if (get_swap_device_info(si)) { if (si->flags & SWP_PAGE_DISCARD) ret = swap_do_scheduled_discard(si); put_swap_device(si); } if (ret) return true; spin_lock(&swap_lock); if (plist_node_empty(&next->list)) goto start_over; } spin_unlock(&swap_lock); return false; } /** * folio_alloc_swap - allocate swap space for a folio * @folio: folio we want to move to swap * * Allocate swap space for the folio and add the folio to the * swap cache. * * Context: Caller needs to hold the folio lock. * Return: Whether the folio was added to the swap cache. */ int folio_alloc_swap(struct folio *folio) { unsigned int order = folio_order(folio); unsigned int size = 1 << order; swp_entry_t entry = {}; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); if (order) { /* * Reject large allocation when THP_SWAP is disabled, * the caller should split the folio and try again. */ if (!IS_ENABLED(CONFIG_THP_SWAP)) return -EAGAIN; /* * Allocation size should never exceed cluster size * (HPAGE_PMD_SIZE). */ if (size > SWAPFILE_CLUSTER) { VM_WARN_ON_ONCE(1); return -EINVAL; } } again: local_lock(&percpu_swap_cluster.lock); if (!swap_alloc_fast(&entry, order)) swap_alloc_slow(&entry, order); local_unlock(&percpu_swap_cluster.lock); if (unlikely(!order && !entry.val)) { if (swap_sync_discard()) goto again; } /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ if (mem_cgroup_try_charge_swap(folio, entry)) goto out_free; if (!entry.val) return -ENOMEM; swap_cache_add_folio(folio, entry, NULL); return 0; out_free: put_swap_folio(folio, entry); return -ENOMEM; } static struct swap_info_struct *_swap_info_get(swp_entry_t entry) { struct swap_info_struct *si; unsigned long offset; if (!entry.val) goto out; si = swap_entry_to_info(entry); if (!si) goto bad_nofile; if (data_race(!(si->flags & SWP_USED))) goto bad_device; offset = swp_offset(entry); if (offset >= si->max) goto bad_offset; if (data_race(!si->swap_map[swp_offset(entry)])) goto bad_free; return si; bad_free: pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val); goto out; bad_offset: pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); goto out; bad_device: pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val); goto out; bad_nofile: pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); out: return NULL; } static unsigned char swap_entry_put_locked(struct swap_info_struct *si, struct swap_cluster_info *ci, swp_entry_t entry, unsigned char usage) { unsigned long offset = swp_offset(entry); unsigned char count; unsigned char has_cache; count = si->swap_map[offset]; has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); has_cache = 0; } else if (count == SWAP_MAP_SHMEM) { /* * Or we could insist on shmem.c using a special * swap_shmem_free() and free_shmem_swap_and_cache()... */ count = 0; } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { if (count == COUNT_CONTINUED) { if (swap_count_continued(si, offset, count)) count = SWAP_MAP_MAX | COUNT_CONTINUED; else count = SWAP_MAP_MAX; } else count--; } usage = count | has_cache; if (usage) WRITE_ONCE(si->swap_map[offset], usage); else swap_entries_free(si, ci, entry, 1); return usage; } /* * When we get a swap entry, if there aren't some other ways to * prevent swapoff, such as the folio in swap cache is locked, RCU * reader side is locked, etc., the swap entry may become invalid * because of swapoff. Then, we need to enclose all swap related * functions with get_swap_device() and put_swap_device(), unless the * swap functions call get/put_swap_device() by themselves. * * RCU reader side lock (including any spinlock) is sufficient to * prevent swapoff, because synchronize_rcu() is called in swapoff() * before freeing data structures. * * Check whether swap entry is valid in the swap device. If so, * return pointer to swap_info_struct, and keep the swap entry valid * via preventing the swap device from being swapoff, until * put_swap_device() is called. Otherwise return NULL. * * Notice that swapoff or swapoff+swapon can still happen before the * percpu_ref_tryget_live() in get_swap_device() or after the * percpu_ref_put() in put_swap_device() if there isn't any other way * to prevent swapoff. The caller must be prepared for that. For * example, the following situation is possible. * * CPU1 CPU2 * do_swap_page() * ... swapoff+swapon * __read_swap_cache_async() * swapcache_prepare() * __swap_duplicate() * // check swap_map * // verify PTE not changed * * In __swap_duplicate(), the swap_map need to be checked before * changing partly because the specified swap entry may be for another * swap device which has been swapoff. And in do_swap_page(), after * the page is read from the swap device, the PTE is verified not * changed with the page table locked to check whether the swap device * has been swapoff or swapoff+swapon. */ struct swap_info_struct *get_swap_device(swp_entry_t entry) { struct swap_info_struct *si; unsigned long offset; if (!entry.val) goto out; si = swap_entry_to_info(entry); if (!si) goto bad_nofile; if (!get_swap_device_info(si)) goto out; offset = swp_offset(entry); if (offset >= si->max) goto put_out; return si; bad_nofile: pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); out: return NULL; put_out: pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); percpu_ref_put(&si->users); return NULL; } static void swap_entries_put_cache(struct swap_info_struct *si, swp_entry_t entry, int nr) { unsigned long offset = swp_offset(entry); struct swap_cluster_info *ci; ci = swap_cluster_lock(si, offset); if (swap_only_has_cache(si, offset, nr)) { swap_entries_free(si, ci, entry, nr); } else { for (int i = 0; i < nr; i++, entry.val++) swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE); } swap_cluster_unlock(ci); } static bool swap_entries_put_map(struct swap_info_struct *si, swp_entry_t entry, int nr) { unsigned long offset = swp_offset(entry); struct swap_cluster_info *ci; bool has_cache = false; unsigned char count; int i; if (nr <= 1) goto fallback; count = swap_count(data_race(si->swap_map[offset])); if (count != 1 && count != SWAP_MAP_SHMEM) goto fallback; ci = swap_cluster_lock(si, offset); if (!swap_is_last_map(si, offset, nr, &has_cache)) { goto locked_fallback; } if (!has_cache) swap_entries_free(si, ci, entry, nr); else for (i = 0; i < nr; i++) WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); swap_cluster_unlock(ci); return has_cache; fallback: ci = swap_cluster_lock(si, offset); locked_fallback: for (i = 0; i < nr; i++, entry.val++) { count = swap_entry_put_locked(si, ci, entry, 1); if (count == SWAP_HAS_CACHE) has_cache = true; } swap_cluster_unlock(ci); return has_cache; } /* * Only functions with "_nr" suffix are able to free entries spanning * cross multi clusters, so ensure the range is within a single cluster * when freeing entries with functions without "_nr" suffix. */ static bool swap_entries_put_map_nr(struct swap_info_struct *si, swp_entry_t entry, int nr) { int cluster_nr, cluster_rest; unsigned long offset = swp_offset(entry); bool has_cache = false; cluster_rest = SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER; while (nr) { cluster_nr = min(nr, cluster_rest); has_cache |= swap_entries_put_map(si, entry, cluster_nr); cluster_rest = SWAPFILE_CLUSTER; nr -= cluster_nr; entry.val += cluster_nr; } return has_cache; } /* * Check if it's the last ref of swap entry in the freeing path. * Qualified value includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM. */ static inline bool __maybe_unused swap_is_last_ref(unsigned char count) { return (count == SWAP_HAS_CACHE) || (count == 1) || (count == SWAP_MAP_SHMEM); } /* * Drop the last ref of swap entries, caller have to ensure all entries * belong to the same cgroup and cluster. */ static void swap_entries_free(struct swap_info_struct *si, struct swap_cluster_info *ci, swp_entry_t entry, unsigned int nr_pages) { unsigned long offset = swp_offset(entry); unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; /* It should never free entries across different clusters */ VM_BUG_ON(ci != __swap_offset_to_cluster(si, offset + nr_pages - 1)); VM_BUG_ON(cluster_is_empty(ci)); VM_BUG_ON(ci->count < nr_pages); ci->count -= nr_pages; do { VM_BUG_ON(!swap_is_last_ref(*map)); *map = 0; } while (++map < map_end); mem_cgroup_uncharge_swap(entry, nr_pages); swap_range_free(si, offset, nr_pages); swap_cluster_assert_table_empty(ci, offset, nr_pages); if (!ci->count) free_cluster(si, ci); else partial_free_cluster(si, ci); } /* * Caller has made sure that the swap device corresponding to entry * is still around or has not been recycled. */ void swap_free_nr(swp_entry_t entry, int nr_pages) { int nr; struct swap_info_struct *sis; unsigned long offset = swp_offset(entry); sis = _swap_info_get(entry); if (!sis) return; while (nr_pages) { nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); swap_entries_put_map(sis, swp_entry(sis->type, offset), nr); offset += nr; nr_pages -= nr; } } /* * Called after dropping swapcache to decrease refcnt to swap entries. */ void put_swap_folio(struct folio *folio, swp_entry_t entry) { struct swap_info_struct *si; int size = 1 << swap_entry_order(folio_order(folio)); si = _swap_info_get(entry); if (!si) return; swap_entries_put_cache(si, entry, size); } int __swap_count(swp_entry_t entry) { struct swap_info_struct *si = __swap_entry_to_info(entry); pgoff_t offset = swp_offset(entry); return swap_count(si->swap_map[offset]); } /* * How many references to @entry are currently swapped out? * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; int count; ci = swap_cluster_lock(si, offset); count = swap_count(si->swap_map[offset]); swap_cluster_unlock(ci); return !!count; } /* * How many references to @entry are currently swapped out? * This considers COUNT_CONTINUED so it returns exact answer. */ int swp_swapcount(swp_entry_t entry) { int count, tmp_count, n; struct swap_info_struct *si; struct swap_cluster_info *ci; struct page *page; pgoff_t offset; unsigned char *map; si = _swap_info_get(entry); if (!si) return 0; offset = swp_offset(entry); ci = swap_cluster_lock(si, offset); count = swap_count(si->swap_map[offset]); if (!(count & COUNT_CONTINUED)) goto out; count &= ~COUNT_CONTINUED; n = SWAP_MAP_MAX + 1; page = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; VM_BUG_ON(page_private(page) != SWP_CONTINUED); do { page = list_next_entry(page, lru); map = kmap_local_page(page); tmp_count = map[offset]; kunmap_local(map); count += (tmp_count & ~COUNT_CONTINUED) * n; n *= (SWAP_CONT_MAX + 1); } while (tmp_count & COUNT_CONTINUED); out: swap_cluster_unlock(ci); return count; } static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, swp_entry_t entry, int order) { struct swap_cluster_info *ci; unsigned char *map = si->swap_map; unsigned int nr_pages = 1 << order; unsigned long roffset = swp_offset(entry); unsigned long offset = round_down(roffset, nr_pages); int i; bool ret = false; ci = swap_cluster_lock(si, offset); if (nr_pages == 1) { if (swap_count(map[roffset])) ret = true; goto unlock_out; } for (i = 0; i < nr_pages; i++) { if (swap_count(map[offset + i])) { ret = true; break; } } unlock_out: swap_cluster_unlock(ci); return ret; } static bool folio_swapped(struct folio *folio) { swp_entry_t entry = folio->swap; struct swap_info_struct *si = _swap_info_get(entry); if (!si) return false; if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) return swap_entry_swapped(si, entry); return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } static bool folio_swapcache_freeable(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!folio_test_swapcache(folio)) return false; if (folio_test_writeback(folio)) return false; /* * Once hibernation has begun to create its image of memory, * there's a danger that one of the calls to folio_free_swap() * - most probably a call from __try_to_reclaim_swap() while * hibernation is allocating its own swap pages for the image, * but conceivably even a call from memory reclaim - will free * the swap from a folio which has already been recorded in the * image as a clean swapcache folio, and then reuse its swap for * another page of the image. On waking from hibernation, the * original folio might be freed under memory pressure, then * later read back in from swap, now with the wrong data. * * Hibernation suspends storage while it is writing the image * to disk so check that here. */ if (pm_suspended_storage()) return false; return true; } /** * folio_free_swap() - Free the swap space used for this folio. * @folio: The folio to remove. * * If swap is getting full, or if there are no more mappings of this folio, * then call folio_free_swap to free its swap space. * * Return: true if we were able to release the swap space. */ bool folio_free_swap(struct folio *folio) { if (!folio_swapcache_freeable(folio)) return false; if (folio_swapped(folio)) return false; swap_cache_del_folio(folio); folio_set_dirty(folio); return true; } /** * free_swap_and_cache_nr() - Release reference on range of swap entries and * reclaim their cache if no more references remain. * @entry: First entry of range. * @nr: Number of entries in range. * * For each swap entry in the contiguous range, release a reference. If any swap * entries become free, try to reclaim their underlying folios, if present. The * offset range is defined by [entry.offset, entry.offset + nr). */ void free_swap_and_cache_nr(swp_entry_t entry, int nr) { const unsigned long start_offset = swp_offset(entry); const unsigned long end_offset = start_offset + nr; struct swap_info_struct *si; bool any_only_cache = false; unsigned long offset; si = get_swap_device(entry); if (!si) return; if (WARN_ON(end_offset > si->max)) goto out; /* * First free all entries in the range. */ any_only_cache = swap_entries_put_map_nr(si, entry, nr); /* * Short-circuit the below loop if none of the entries had their * reference drop to zero. */ if (!any_only_cache) goto out; /* * Now go back over the range trying to reclaim the swap cache. */ for (offset = start_offset; offset < end_offset; offset += nr) { nr = 1; if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { /* * Folios are always naturally aligned in swap so * advance forward to the next boundary. Zero means no * folio was found for the swap entry, so advance by 1 * in this case. Negative value means folio was found * but could not be reclaimed. Here we can still advance * to the next boundary. */ nr = __try_to_reclaim_swap(si, offset, TTRS_UNMAPPED | TTRS_FULL); if (nr == 0) nr = 1; else if (nr < 0) nr = -nr; nr = ALIGN(offset + 1, nr) - offset; } } out: put_swap_device(si); } #ifdef CONFIG_HIBERNATION swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_info(type); unsigned long offset; swp_entry_t entry = {0}; if (!si) goto fail; /* This is called for allocating swap entry, not cache */ if (get_swap_device_info(si)) { if (si->flags & SWP_WRITEOK) { /* * Grab the local lock to be complaint * with swap table allocation. */ local_lock(&percpu_swap_cluster.lock); offset = cluster_alloc_swap_entry(si, 0, 1); local_unlock(&percpu_swap_cluster.lock); if (offset) entry = swp_entry(si->type, offset); } put_swap_device(si); } fail: return entry; } /* * Find the swap type that corresponds to given device (if any). * * @offset - number of the PAGE_SIZE-sized block of the device, starting * from 0, in which the swap header is expected to be located. * * This is needed for the suspend to disk (aka swsusp). */ int swap_type_of(dev_t device, sector_t offset) { int type; if (!device) return -1; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; if (device == sis->bdev->bd_dev) { struct swap_extent *se = first_se(sis); if (se->start_block == offset) { spin_unlock(&swap_lock); return type; } } } spin_unlock(&swap_lock); return -ENODEV; } int find_first_swap(dev_t *device) { int type; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; *device = sis->bdev->bd_dev; spin_unlock(&swap_lock); return type; } spin_unlock(&swap_lock); return -ENODEV; } /* * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev * corresponding to given index in swap_info (swap type). */ sector_t swapdev_block(int type, pgoff_t offset) { struct swap_info_struct *si = swap_type_to_info(type); struct swap_extent *se; if (!si || !(si->flags & SWP_WRITEOK)) return 0; se = offset_to_swap_extent(si, offset); return se->start_block + (offset - se->start_page); } /* * Return either the total number of swap pages of given type, or the number * of free pages of that type (depending on @free) * * This is needed for software suspend */ unsigned int count_swap_pages(int type, int free) { unsigned int n = 0; spin_lock(&swap_lock); if ((unsigned int)type < nr_swapfiles) { struct swap_info_struct *sis = swap_info[type]; spin_lock(&sis->lock); if (sis->flags & SWP_WRITEOK) { n = sis->pages; if (free) n -= swap_usage_in_pages(sis); } spin_unlock(&sis->lock); } spin_unlock(&swap_lock); return n; } #endif /* CONFIG_HIBERNATION */ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) { return pte_same(pte_swp_clear_flags(pte), swp_pte); } /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to * force COW, vm_page_prot omits write permission from any private vma. */ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct folio *folio) { struct page *page; struct folio *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; bool hwpoisoned = false; int ret = 1; /* * If the folio is removed from swap cache by others, continue to * unuse other PTEs. try_to_unuse may try again if we missed this one. */ if (!folio_matches_swap_entry(folio, entry)) return 0; swapcache = folio; folio = ksm_might_need_to_copy(folio, vma, addr); if (unlikely(!folio)) return -ENOMEM; else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { hwpoisoned = true; folio = swapcache; } page = folio_file_page(folio, swp_offset(entry)); if (PageHWPoison(page)) hwpoisoned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), swp_entry_to_pte(entry)))) { ret = 0; goto out; } old_pte = ptep_get(pte); if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); if (hwpoisoned) { swp_entry = make_hwpoison_entry(page); } else { swp_entry = make_poisoned_swp_entry(); } new_pte = swp_entry_to_pte(swp_entry); ret = 0; goto setpte; } /* * Some architectures may have to restore extra metadata to the page * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ arch_swap_restore(folio_swap(entry, folio), folio); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); folio_get(folio); if (folio == swapcache) { rmap_t rmap_flags = RMAP_NONE; /* * See do_swap_page(): writeback would be problematic. * However, we do a folio_wait_writeback() just before this * call and have the folio locked. */ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; /* * We currently only expect small !anon folios, which are either * fully exclusive or fully shared. If we ever get large folios * here, we have to be careful. */ if (!folio_test_anon(folio)) { VM_WARN_ON_ONCE(folio_test_large(folio)); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); folio_add_new_anon_rmap(folio, vma, addr, rmap_flags); } else { folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); } } else { /* ksm created a completely new copy */ folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); if (pte_swp_soft_dirty(old_pte)) new_pte = pte_mksoft_dirty(new_pte); if (pte_swp_uffd_wp(old_pte)) new_pte = pte_mkuffd_wp(new_pte); setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); swap_free(entry); out: if (pte) pte_unmap_unlock(pte, ptl); if (folio != swapcache) { folio_unlock(folio); folio_put(folio); } return ret; } static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned int type) { pte_t *pte = NULL; struct swap_info_struct *si; si = swap_info[type]; do { struct folio *folio; unsigned long offset; unsigned char swp_count; softleaf_t entry; int ret; pte_t ptent; if (!pte++) { pte = pte_offset_map(pmd, addr); if (!pte) break; } ptent = ptep_get_lockless(pte); entry = softleaf_from_pte(ptent); if (!softleaf_is_swap(entry)) continue; if (swp_type(entry) != type) continue; offset = swp_offset(entry); pte_unmap(pte); pte = NULL; folio = swap_cache_get_folio(entry); if (!folio) { struct vm_fault vmf = { .vma = vma, .address = addr, .real_address = addr, .pmd = pmd, }; folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf); } if (!folio) { swp_count = READ_ONCE(si->swap_map[offset]); if (swp_count == 0 || swp_count == SWAP_MAP_BAD) continue; return -ENOMEM; } folio_lock(folio); folio_wait_writeback(folio); ret = unuse_pte(vma, pmd, addr, entry, folio); if (ret < 0) { folio_unlock(folio); folio_put(folio); return ret; } folio_free_swap(folio); folio_unlock(folio); folio_put(folio); } while (addr += PAGE_SIZE, addr != end); if (pte) pte_unmap(pte); return 0; } static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned int type) { pmd_t *pmd; unsigned long next; int ret; pmd = pmd_offset(pud, addr); do { cond_resched(); next = pmd_addr_end(addr, end); ret = unuse_pte_range(vma, pmd, addr, next, type); if (ret) return ret; } while (pmd++, addr = next, addr != end); return 0; } static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned int type) { pud_t *pud; unsigned long next; int ret; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; ret = unuse_pmd_range(vma, pud, addr, next, type); if (ret) return ret; } while (pud++, addr = next, addr != end); return 0; } static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned int type) { p4d_t *p4d; unsigned long next; int ret; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; ret = unuse_pud_range(vma, p4d, addr, next, type); if (ret) return ret; } while (p4d++, addr = next, addr != end); return 0; } static int unuse_vma(struct vm_area_struct *vma, unsigned int type) { pgd_t *pgd; unsigned long addr, end, next; int ret; addr = vma->vm_start; end = vma->vm_end; pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; ret = unuse_p4d_range(vma, pgd, addr, next, type); if (ret) return ret; } while (pgd++, addr = next, addr != end); return 0; } static int unuse_mm(struct mm_struct *mm, unsigned int type) { struct vm_area_struct *vma; int ret = 0; VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); if (check_stable_address_space(mm)) goto unlock; for_each_vma(vmi, vma) { if (vma->anon_vma && !is_vm_hugetlb_page(vma)) { ret = unuse_vma(vma, type); if (ret) break; } cond_resched(); } unlock: mmap_read_unlock(mm); return ret; } /* * Scan swap_map from current position to next entry still in use. * Return 0 if there are no inuse entries after prev till end of * the map. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, unsigned int prev) { unsigned int i; unsigned char count; /* * No need for swap_lock here: we're just looking * for whether an entry is in use, not modifying it; false * hits are okay, and sys_swapoff() has already prevented new * allocations from this area (while holding swap_lock). */ for (i = prev + 1; i < si->max; i++) { count = READ_ONCE(si->swap_map[i]); if (count && swap_count(count) != SWAP_MAP_BAD) break; if ((i % LATENCY_LIMIT) == 0) cond_resched(); } if (i == si->max) i = 0; return i; } static int try_to_unuse(unsigned int type) { struct mm_struct *prev_mm; struct mm_struct *mm; struct list_head *p; int retval = 0; struct swap_info_struct *si = swap_info[type]; struct folio *folio; swp_entry_t entry; unsigned int i; if (!swap_usage_in_pages(si)) goto success; retry: retval = shmem_unuse(type); if (retval) return retval; prev_mm = &init_mm; mmget(prev_mm); spin_lock(&mmlist_lock); p = &init_mm.mmlist; while (swap_usage_in_pages(si) && !signal_pending(current) && (p = p->next) != &init_mm.mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!mmget_not_zero(mm)) continue; spin_unlock(&mmlist_lock); mmput(prev_mm); prev_mm = mm; retval = unuse_mm(mm, type); if (retval) { mmput(prev_mm); return retval; } /* * Make sure that we aren't completely killing * interactive performance. */ cond_resched(); spin_lock(&mmlist_lock); } spin_unlock(&mmlist_lock); mmput(prev_mm); i = 0; while (swap_usage_in_pages(si) && !signal_pending(current) && (i = find_next_to_unuse(si, i)) != 0) { entry = swp_entry(type, i); folio = swap_cache_get_folio(entry); if (!folio) continue; /* * It is conceivable that a racing task removed this folio from * swap cache just before we acquired the page lock. The folio * might even be back in swap cache on another swap area. But * that is okay, folio_free_swap() only removes stale folios. */ folio_lock(folio); folio_wait_writeback(folio); folio_free_swap(folio); folio_unlock(folio); folio_put(folio); } /* * Lets check again to see if there are still swap entries in the map. * If yes, we would need to do retry the unuse logic again. * Under global memory pressure, swap entries can be reinserted back * into process space after the mmlist loop above passes over them. * * Limit the number of retries? No: when mmget_not_zero() * above fails, that mm is likely to be freeing swap from * exit_mmap(), which proceeds at its own independent pace; * and even shmem_writeout() could have been preempted after * folio_alloc_swap(), temporarily hiding that swap. It's easy * and robust (though cpu-intensive) just to keep retrying. */ if (swap_usage_in_pages(si)) { if (!signal_pending(current)) goto retry; return -EINTR; } success: /* * Make sure that further cleanups after try_to_unuse() returns happen * after swap_range_free() reduces si->inuse_pages to 0. */ smp_mb(); return 0; } /* * After a successful try_to_unuse, if no swap is now in use, we know * we can empty the mmlist. swap_lock must be held on entry and exit. * Note that mmlist_lock nests inside swap_lock, and an mm must be * added to the mmlist just after page_duplicate - before would be racy. */ static void drain_mmlist(void) { struct list_head *p, *next; unsigned int type; for (type = 0; type < nr_swapfiles; type++) if (swap_usage_in_pages(swap_info[type])) return; spin_lock(&mmlist_lock); list_for_each_safe(p, next, &init_mm.mmlist) list_del_init(p); spin_unlock(&mmlist_lock); } /* * Free all of a swapdev's extent information */ static void destroy_swap_extents(struct swap_info_struct *sis) { while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { struct rb_node *rb = sis->swap_extent_root.rb_node; struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); rb_erase(rb, &sis->swap_extent_root); kfree(se); } if (sis->flags & SWP_ACTIVATED) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; sis->flags &= ~SWP_ACTIVATED; if (mapping->a_ops->swap_deactivate) mapping->a_ops->swap_deactivate(swap_file); } } /* * Add a block range (and the corresponding page range) into this swapdev's * extent tree. * * This function rather assumes that it is called in ascending page order. */ int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) { struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; struct swap_extent *se; struct swap_extent *new_se; /* * place the new node at the right most since the * function is called in ascending page order. */ while (*link) { parent = *link; link = &parent->rb_right; } if (parent) { se = rb_entry(parent, struct swap_extent, rb_node); BUG_ON(se->start_page + se->nr_pages != start_page); if (se->start_block + se->nr_pages == start_block) { /* Merge it */ se->nr_pages += nr_pages; return 0; } } /* No merge, insert a new extent. */ new_se = kmalloc(sizeof(*se), GFP_KERNEL); if (new_se == NULL) return -ENOMEM; new_se->start_page = start_page; new_se->nr_pages = nr_pages; new_se->start_block = start_block; rb_link_node(&new_se->rb_node, parent, link); rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); return 1; } EXPORT_SYMBOL_GPL(add_swap_extent); /* * A `swap extent' is a simple thing which maps a contiguous range of pages * onto a contiguous range of disk blocks. A rbtree of swap extents is * built at swapon time and is then used at swap_writepage/swap_read_folio * time for locating where on disk a page belongs. * * If the swapfile is an S_ISBLK block device, a single extent is installed. * This is done so that the main operating code can treat S_ISBLK and S_ISREG * swap files identically. * * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK * swapfiles are handled *identically* after swapon time. * * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray * blocks are found which do not fall within the PAGE_SIZE alignment * requirements, they are simply tossed out - we will never use those blocks * for swapping. * * For all swap devices we set S_SWAPFILE across the life of the swapon. This * prevents users from writing to the swap device, which will corrupt memory. * * The amount of disk space which a single swap extent represents varies. * Typically it is in the 1-4 megabyte range. So we can have hundreds of * extents in the rbtree. - akpm. */ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; int ret; if (S_ISBLK(inode->i_mode)) { ret = add_swap_extent(sis, 0, sis->max, 0); *span = sis->pages; return ret; } if (mapping->a_ops->swap_activate) { ret = mapping->a_ops->swap_activate(sis, swap_file, span); if (ret < 0) return ret; sis->flags |= SWP_ACTIVATED; if ((sis->flags & SWP_FS_OPS) && sio_pool_init() != 0) { destroy_swap_extents(sis); return -ENOMEM; } return ret; } return generic_swapfile_activate(sis, swap_file, span); } static void setup_swap_info(struct swap_info_struct *si, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info, unsigned long *zeromap) { si->prio = prio; /* * the plist prio is negated because plist ordering is * low-to-high, while swap ordering is high-to-low */ si->list.prio = -si->prio; si->avail_list.prio = -si->prio; si->swap_map = swap_map; si->cluster_info = cluster_info; si->zeromap = zeromap; } static void _enable_swap_info(struct swap_info_struct *si) { atomic_long_add(si->pages, &nr_swap_pages); total_swap_pages += si->pages; assert_spin_locked(&swap_lock); plist_add(&si->list, &swap_active_head); /* Add back to available list */ add_to_avail_list(si, true); } static void enable_swap_info(struct swap_info_struct *si, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info, unsigned long *zeromap) { spin_lock(&swap_lock); spin_lock(&si->lock); setup_swap_info(si, prio, swap_map, cluster_info, zeromap); spin_unlock(&si->lock); spin_unlock(&swap_lock); /* * Finished initializing swap device, now it's safe to reference it. */ percpu_ref_resurrect(&si->users); spin_lock(&swap_lock); spin_lock(&si->lock); _enable_swap_info(si); spin_unlock(&si->lock); spin_unlock(&swap_lock); } static void reinsert_swap_info(struct swap_info_struct *si) { spin_lock(&swap_lock); spin_lock(&si->lock); setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap); _enable_swap_info(si); spin_unlock(&si->lock); spin_unlock(&swap_lock); } /* * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range * see the updated flags, so there will be no more allocations. */ static void wait_for_allocation(struct swap_info_struct *si) { unsigned long offset; unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); struct swap_cluster_info *ci; BUG_ON(si->flags & SWP_WRITEOK); for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { ci = swap_cluster_lock(si, offset); swap_cluster_unlock(ci); } } static void free_cluster_info(struct swap_cluster_info *cluster_info, unsigned long maxpages) { struct swap_cluster_info *ci; int i, nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); if (!cluster_info) return; for (i = 0; i < nr_clusters; i++) { ci = cluster_info + i; /* Cluster with bad marks count will have a remaining table */ spin_lock(&ci->lock); if (rcu_dereference_protected(ci->table, true)) { ci->count = 0; swap_cluster_free_table(ci); } spin_unlock(&ci->lock); } kvfree(cluster_info); } /* * Called after swap device's reference count is dead, so * neither scan nor allocation will use it. */ static void flush_percpu_swap_cluster(struct swap_info_struct *si) { int cpu, i; struct swap_info_struct **pcp_si; for_each_possible_cpu(cpu) { pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); /* * Invalidate the percpu swap cluster cache, si->users * is dead, so no new user will point to it, just flush * any existing user. */ for (i = 0; i < SWAP_NR_ORDERS; i++) cmpxchg(&pcp_si[i], si, NULL); } } SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; unsigned char *swap_map; unsigned long *zeromap; struct swap_cluster_info *cluster_info; struct file *swap_file, *victim; struct address_space *mapping; struct inode *inode; struct filename *pathname; unsigned int maxpages; int err, found = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; BUG_ON(!current->mm); pathname = getname(specialfile); if (IS_ERR(pathname)) return PTR_ERR(pathname); victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); err = PTR_ERR(victim); if (IS_ERR(victim)) goto out; mapping = victim->f_mapping; spin_lock(&swap_lock); plist_for_each_entry(p, &swap_active_head, list) { if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) { found = 1; break; } } } if (!found) { err = -EINVAL; spin_unlock(&swap_lock); goto out_dput; } if (!security_vm_enough_memory_mm(current->mm, p->pages)) vm_unacct_memory(p->pages); else { err = -ENOMEM; spin_unlock(&swap_lock); goto out_dput; } spin_lock(&p->lock); del_from_avail_list(p, true); plist_del(&p->list, &swap_active_head); atomic_long_sub(p->pages, &nr_swap_pages); total_swap_pages -= p->pages; spin_unlock(&p->lock); spin_unlock(&swap_lock); wait_for_allocation(p); set_current_oom_origin(); err = try_to_unuse(p->type); clear_current_oom_origin(); if (err) { /* re-insert swap space back into swap_list */ reinsert_swap_info(p); goto out_dput; } /* * Wait for swap operations protected by get/put_swap_device() * to complete. Because of synchronize_rcu() here, all swap * operations protected by RCU reader side lock (including any * spinlock) will be waited too. This makes it easy to * prevent folio_test_swapcache() and the following swap cache * operations from racing with swapoff. */ percpu_ref_kill(&p->users); synchronize_rcu(); wait_for_completion(&p->comp); flush_work(&p->discard_work); flush_work(&p->reclaim_work); flush_percpu_swap_cluster(p); destroy_swap_extents(p); if (p->flags & SWP_CONTINUED) free_swap_count_continuations(p); if (!(p->flags & SWP_SOLIDSTATE)) atomic_dec(&nr_rotate_swap); mutex_lock(&swapon_mutex); spin_lock(&swap_lock); spin_lock(&p->lock); drain_mmlist(); swap_file = p->swap_file; p->swap_file = NULL; swap_map = p->swap_map; p->swap_map = NULL; zeromap = p->zeromap; p->zeromap = NULL; maxpages = p->max; cluster_info = p->cluster_info; p->max = 0; p->cluster_info = NULL; spin_unlock(&p->lock); spin_unlock(&swap_lock); arch_swap_invalidate_area(p->type); zswap_swapoff(p->type); mutex_unlock(&swapon_mutex); kfree(p->global_cluster); p->global_cluster = NULL; vfree(swap_map); kvfree(zeromap); free_cluster_info(cluster_info, maxpages); /* Destroy swap account information */ swap_cgroup_swapoff(p->type); inode = mapping->host; inode_lock(inode); inode->i_flags &= ~S_SWAPFILE; inode_unlock(inode); filp_close(swap_file, NULL); /* * Clear the SWP_USED flag after all resources are freed so that swapon * can reuse this swap_info in alloc_swap_info() safely. It is ok to * not hold p->lock after we cleared its SWP_WRITEOK. */ spin_lock(&swap_lock); p->flags = 0; spin_unlock(&swap_lock); err = 0; atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); out_dput: filp_close(victim, NULL); out: putname(pathname); return err; } #ifdef CONFIG_PROC_FS static __poll_t swaps_poll(struct file *file, poll_table *wait) { struct seq_file *seq = file->private_data; poll_wait(file, &proc_poll_wait, wait); if (seq->poll_event != atomic_read(&proc_poll_event)) { seq->poll_event = atomic_read(&proc_poll_event); return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; } return EPOLLIN | EPOLLRDNORM; } /* iterator */ static void *swap_start(struct seq_file *swap, loff_t *pos) { struct swap_info_struct *si; int type; loff_t l = *pos; mutex_lock(&swapon_mutex); if (!l) return SEQ_START_TOKEN; for (type = 0; (si = swap_type_to_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; if (!--l) return si; } return NULL; } static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) { struct swap_info_struct *si = v; int type; if (v == SEQ_START_TOKEN) type = 0; else type = si->type + 1; ++(*pos); for (; (si = swap_type_to_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; return si; } return NULL; } static void swap_stop(struct seq_file *swap, void *v) { mutex_unlock(&swapon_mutex); } static int swap_show(struct seq_file *swap, void *v) { struct swap_info_struct *si = v; struct file *file; int len; unsigned long bytes, inuse; if (si == SEQ_START_TOKEN) { seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); return 0; } bytes = K(si->pages); inuse = K(swap_usage_in_pages(si)); file = si->swap_file; len = seq_file_path(swap, file, " \t\n\\"); seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file_inode(file)->i_mode) ? "partition" : "file\t", bytes, bytes < 10000000 ? "\t" : "", inuse, inuse < 10000000 ? "\t" : "", si->prio); return 0; } static const struct seq_operations swaps_op = { .start = swap_start, .next = swap_next, .stop = swap_stop, .show = swap_show }; static int swaps_open(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &swaps_op); if (ret) return ret; seq = file->private_data; seq->poll_event = atomic_read(&proc_poll_event); return 0; } static const struct proc_ops swaps_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = swaps_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release, .proc_poll = swaps_poll, }; static int __init procswaps_init(void) { proc_create("swaps", 0, NULL, &swaps_proc_ops); return 0; } __initcall(procswaps_init); #endif /* CONFIG_PROC_FS */ #ifdef MAX_SWAPFILES_CHECK static int __init max_swapfiles_check(void) { MAX_SWAPFILES_CHECK(); return 0; } late_initcall(max_swapfiles_check); #endif static struct swap_info_struct *alloc_swap_info(void) { struct swap_info_struct *p; struct swap_info_struct *defer = NULL; unsigned int type; p = kvzalloc(sizeof(struct swap_info_struct), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (percpu_ref_init(&p->users, swap_users_ref_free, PERCPU_REF_INIT_DEAD, GFP_KERNEL)) { kvfree(p); return ERR_PTR(-ENOMEM); } spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { if (!(swap_info[type]->flags & SWP_USED)) break; } if (type >= MAX_SWAPFILES) { spin_unlock(&swap_lock); percpu_ref_exit(&p->users); kvfree(p); return ERR_PTR(-EPERM); } if (type >= nr_swapfiles) { p->type = type; /* * Publish the swap_info_struct after initializing it. * Note that kvzalloc() above zeroes all its fields. */ smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */ nr_swapfiles++; } else { defer = p; p = swap_info[type]; /* * Do not memset this entry: a racing procfs swap_next() * would be relying on p->type to remain valid. */ } p->swap_extent_root = RB_ROOT; plist_node_init(&p->list, 0); plist_node_init(&p->avail_list, 0); p->flags = SWP_USED; spin_unlock(&swap_lock); if (defer) { percpu_ref_exit(&defer->users); kvfree(defer); } spin_lock_init(&p->lock); spin_lock_init(&p->cont_lock); atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT); init_completion(&p->comp); return p; } static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) { if (S_ISBLK(inode->i_mode)) { si->bdev = I_BDEV(inode); /* * Zoned block devices contain zones that have a sequential * write only restriction. Hence zoned block devices are not * suitable for swapping. Disallow them here. */ if (bdev_is_zoned(si->bdev)) return -EINVAL; si->flags |= SWP_BLKDEV; } else if (S_ISREG(inode->i_mode)) { si->bdev = inode->i_sb->s_bdev; } return 0; } /* * Find out how many pages are allowed for a single swap device. There * are two limiting factors: * 1) the number of bits for the swap offset in the swp_entry_t type, and * 2) the number of bits in the swap pte, as defined by the different * architectures. * * In order to find the largest possible bit mask, a swap entry with * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, * decoded to a swp_entry_t again, and finally the swap offset is * extracted. * * This will mask all the bits from the initial ~0UL mask that can't * be encoded in either the swp_entry_t or the architecture definition * of a swap pte. */ unsigned long generic_max_swapfile_size(void) { swp_entry_t entry = swp_entry(0, ~0UL); const pte_t pte = softleaf_to_pte(entry); /* * Since the PTE can be an invalid softleaf entry (e.g. the none PTE), * we need to do this manually. */ entry = __pte_to_swp_entry(pte); entry = swp_entry(__swp_type(entry), __swp_offset(entry)); return swp_offset(entry) + 1; } /* Can be overridden by an architecture for additional checks. */ __weak unsigned long arch_max_swapfile_size(void) { return generic_max_swapfile_size(); } static unsigned long read_swap_header(struct swap_info_struct *si, union swap_header *swap_header, struct inode *inode) { int i; unsigned long maxpages; unsigned long swapfilepages; unsigned long last_page; if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { pr_err("Unable to find swap-space signature\n"); return 0; } /* swap partition endianness hack... */ if (swab32(swap_header->info.version) == 1) { swab32s(&swap_header->info.version); swab32s(&swap_header->info.last_page); swab32s(&swap_header->info.nr_badpages); if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) return 0; for (i = 0; i < swap_header->info.nr_badpages; i++) swab32s(&swap_header->info.badpages[i]); } /* Check the swap header's sub-version */ if (swap_header->info.version != 1) { pr_warn("Unable to handle swap header version %d\n", swap_header->info.version); return 0; } maxpages = swapfile_maximum_size; last_page = swap_header->info.last_page; if (!last_page) { pr_warn("Empty swap-file\n"); return 0; } if (last_page > maxpages) { pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", K(maxpages), K(last_page)); } if (maxpages > last_page) { maxpages = last_page + 1; /* p->max is an unsigned int: don't overflow it */ if ((unsigned int)maxpages == 0) maxpages = UINT_MAX; } if (!maxpages) return 0; swapfilepages = i_size_read(inode) >> PAGE_SHIFT; if (swapfilepages && maxpages > swapfilepages) { pr_warn("Swap area shorter than signature indicates\n"); return 0; } if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) return 0; if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) return 0; return maxpages; } static int setup_swap_map(struct swap_info_struct *si, union swap_header *swap_header, unsigned char *swap_map, unsigned long maxpages) { unsigned long i; swap_map[0] = SWAP_MAP_BAD; /* omit header page */ for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; if (page_nr == 0 || page_nr > swap_header->info.last_page) return -EINVAL; if (page_nr < maxpages) { swap_map[page_nr] = SWAP_MAP_BAD; si->pages--; } } if (!si->pages) { pr_warn("Empty swap-file\n"); return -EINVAL; } return 0; } static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, union swap_header *swap_header, unsigned long maxpages) { unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); struct swap_cluster_info *cluster_info; int err = -ENOMEM; unsigned long i; cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL); if (!cluster_info) goto err; for (i = 0; i < nr_clusters; i++) spin_lock_init(&cluster_info[i].lock); if (!(si->flags & SWP_SOLIDSTATE)) { si->global_cluster = kmalloc(sizeof(*si->global_cluster), GFP_KERNEL); if (!si->global_cluster) goto err; for (i = 0; i < SWAP_NR_ORDERS; i++) si->global_cluster->next[i] = SWAP_ENTRY_INVALID; spin_lock_init(&si->global_cluster_lock); } /* * Mark unusable pages as unavailable. The clusters aren't * marked free yet, so no list operations are involved yet. * * See setup_swap_map(): header page, bad pages, * and the EOF part of the last cluster. */ err = swap_cluster_setup_bad_slot(cluster_info, 0); if (err) goto err; for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; if (page_nr >= maxpages) continue; err = swap_cluster_setup_bad_slot(cluster_info, page_nr); if (err) goto err; } for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) { err = swap_cluster_setup_bad_slot(cluster_info, i); if (err) goto err; } INIT_LIST_HEAD(&si->free_clusters); INIT_LIST_HEAD(&si->full_clusters); INIT_LIST_HEAD(&si->discard_clusters); for (i = 0; i < SWAP_NR_ORDERS; i++) { INIT_LIST_HEAD(&si->nonfull_clusters[i]); INIT_LIST_HEAD(&si->frag_clusters[i]); } for (i = 0; i < nr_clusters; i++) { struct swap_cluster_info *ci = &cluster_info[i]; if (ci->count) { ci->flags = CLUSTER_FLAG_NONFULL; list_add_tail(&ci->list, &si->nonfull_clusters[0]); } else { ci->flags = CLUSTER_FLAG_FREE; list_add_tail(&ci->list, &si->free_clusters); } } return cluster_info; err: free_cluster_info(cluster_info, maxpages); return ERR_PTR(err); } SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) { struct swap_info_struct *si; struct filename *name; struct file *swap_file = NULL; struct address_space *mapping; struct dentry *dentry; int prio; int error; union swap_header *swap_header; int nr_extents; sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; unsigned long *zeromap = NULL; struct swap_cluster_info *cluster_info = NULL; struct folio *folio = NULL; struct inode *inode = NULL; bool inced_nr_rotate_swap = false; if (swap_flags & ~SWAP_FLAGS_VALID) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; si = alloc_swap_info(); if (IS_ERR(si)) return PTR_ERR(si); INIT_WORK(&si->discard_work, swap_discard_work); INIT_WORK(&si->reclaim_work, swap_reclaim_work); name = getname(specialfile); if (IS_ERR(name)) { error = PTR_ERR(name); name = NULL; goto bad_swap; } swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0); if (IS_ERR(swap_file)) { error = PTR_ERR(swap_file); swap_file = NULL; goto bad_swap; } si->swap_file = swap_file; mapping = swap_file->f_mapping; dentry = swap_file->f_path.dentry; inode = mapping->host; error = claim_swapfile(si, inode); if (unlikely(error)) goto bad_swap; inode_lock(inode); if (d_unlinked(dentry) || cant_mount(dentry)) { error = -ENOENT; goto bad_swap_unlock_inode; } if (IS_SWAPFILE(inode)) { error = -EBUSY; goto bad_swap_unlock_inode; } /* * The swap subsystem needs a major overhaul to support this. * It doesn't work yet so just disable it for now. */ if (mapping_min_folio_order(mapping) > 0) { error = -EINVAL; goto bad_swap_unlock_inode; } /* * Read the swap header. */ if (!mapping->a_ops->read_folio) { error = -EINVAL; goto bad_swap_unlock_inode; } folio = read_mapping_folio(mapping, 0, swap_file); if (IS_ERR(folio)) { error = PTR_ERR(folio); goto bad_swap_unlock_inode; } swap_header = kmap_local_folio(folio, 0); maxpages = read_swap_header(si, swap_header, inode); if (unlikely(!maxpages)) { error = -EINVAL; goto bad_swap_unlock_inode; } si->max = maxpages; si->pages = maxpages - 1; nr_extents = setup_swap_extents(si, &span); if (nr_extents < 0) { error = nr_extents; goto bad_swap_unlock_inode; } if (si->pages != si->max - 1) { pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max); error = -EINVAL; goto bad_swap_unlock_inode; } maxpages = si->max; /* OK, set up the swap map and apply the bad block list */ swap_map = vzalloc(maxpages); if (!swap_map) { error = -ENOMEM; goto bad_swap_unlock_inode; } error = swap_cgroup_swapon(si->type, maxpages); if (error) goto bad_swap_unlock_inode; error = setup_swap_map(si, swap_header, swap_map, maxpages); if (error) goto bad_swap_unlock_inode; /* * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might * be above MAX_PAGE_ORDER incase of a large swap file. */ zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!zeromap) { error = -ENOMEM; goto bad_swap_unlock_inode; } if (si->bdev && bdev_stable_writes(si->bdev)) si->flags |= SWP_STABLE_WRITES; if (si->bdev && bdev_synchronous(si->bdev)) si->flags |= SWP_SYNCHRONOUS_IO; if (si->bdev && bdev_nonrot(si->bdev)) { si->flags |= SWP_SOLIDSTATE; } else { atomic_inc(&nr_rotate_swap); inced_nr_rotate_swap = true; } cluster_info = setup_clusters(si, swap_header, maxpages); if (IS_ERR(cluster_info)) { error = PTR_ERR(cluster_info); cluster_info = NULL; goto bad_swap_unlock_inode; } if ((swap_flags & SWAP_FLAG_DISCARD) && si->bdev && bdev_max_discard_sectors(si->bdev)) { /* * When discard is enabled for swap with no particular * policy flagged, we set all swap discard flags here in * order to sustain backward compatibility with older * swapon(8) releases. */ si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | SWP_PAGE_DISCARD); /* * By flagging sys_swapon, a sysadmin can tell us to * either do single-time area discards only, or to just * perform discards for released swap page-clusters. * Now it's time to adjust the p->flags accordingly. */ if (swap_flags & SWAP_FLAG_DISCARD_ONCE) si->flags &= ~SWP_PAGE_DISCARD; else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) si->flags &= ~SWP_AREA_DISCARD; /* issue a swapon-time discard if it's still required */ if (si->flags & SWP_AREA_DISCARD) { int err = discard_swap(si); if (unlikely(err)) pr_err("swapon: discard_swap(%p): %d\n", si, err); } } error = zswap_swapon(si->type, maxpages); if (error) goto bad_swap_unlock_inode; /* * Flush any pending IO and dirty mappings before we start using this * swap device. */ inode->i_flags |= S_SWAPFILE; error = inode_drain_writes(inode); if (error) { inode->i_flags &= ~S_SWAPFILE; goto free_swap_zswap; } mutex_lock(&swapon_mutex); prio = DEF_SWAP_PRIO; if (swap_flags & SWAP_FLAG_PREFER) prio = swap_flags & SWAP_FLAG_PRIO_MASK; enable_swap_info(si, prio, swap_map, cluster_info, zeromap); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", K(si->pages), name->name, si->prio, nr_extents, K((unsigned long long)span), (si->flags & SWP_SOLIDSTATE) ? "SS" : "", (si->flags & SWP_DISCARDABLE) ? "D" : "", (si->flags & SWP_AREA_DISCARD) ? "s" : "", (si->flags & SWP_PAGE_DISCARD) ? "c" : ""); mutex_unlock(&swapon_mutex); atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); error = 0; goto out; free_swap_zswap: zswap_swapoff(si->type); bad_swap_unlock_inode: inode_unlock(inode); bad_swap: kfree(si->global_cluster); si->global_cluster = NULL; inode = NULL; destroy_swap_extents(si); swap_cgroup_swapoff(si->type); spin_lock(&swap_lock); si->swap_file = NULL; si->flags = 0; spin_unlock(&swap_lock); vfree(swap_map); kvfree(zeromap); if (cluster_info) free_cluster_info(cluster_info, maxpages); if (inced_nr_rotate_swap) atomic_dec(&nr_rotate_swap); if (swap_file) filp_close(swap_file, NULL); out: if (!IS_ERR_OR_NULL(folio)) folio_release_kmap(folio, swap_header); if (name) putname(name); if (inode) inode_unlock(inode); return error; } void si_swapinfo(struct sysinfo *val) { unsigned int type; unsigned long nr_to_be_unused = 0; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *si = swap_info[type]; if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) nr_to_be_unused += swap_usage_in_pages(si); } val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); } /* * Verify that nr swap entries are valid and increment their swap map counts. * * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) { struct swap_info_struct *si; struct swap_cluster_info *ci; unsigned long offset; unsigned char count; unsigned char has_cache; int err, i; si = swap_entry_to_info(entry); if (WARN_ON_ONCE(!si)) { pr_err("%s%08lx\n", Bad_file, entry.val); return -EINVAL; } offset = swp_offset(entry); VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); VM_WARN_ON(usage == 1 && nr > 1); ci = swap_cluster_lock(si, offset); err = 0; for (i = 0; i < nr; i++) { count = si->swap_map[offset + i]; /* * swapin_readahead() doesn't check if a swap entry is valid, so the * swap entry could be SWAP_MAP_BAD. Check here with lock held. */ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { err = -ENOENT; goto unlock_out; } has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (!count && !has_cache) { err = -ENOENT; } else if (usage == SWAP_HAS_CACHE) { if (has_cache) err = -EEXIST; } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) { err = -EINVAL; } if (err) goto unlock_out; } for (i = 0; i < nr; i++) { count = si->swap_map[offset + i]; has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (usage == SWAP_HAS_CACHE) has_cache = SWAP_HAS_CACHE; else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) count += usage; else if (swap_count_continued(si, offset + i, count)) count = COUNT_CONTINUED; else { /* * Don't need to rollback changes, because if * usage == 1, there must be nr == 1. */ err = -ENOMEM; goto unlock_out; } WRITE_ONCE(si->swap_map[offset + i], count | has_cache); } unlock_out: swap_cluster_unlock(ci); return err; } /* * Help swapoff by noting that swap entry belongs to shmem/tmpfs * (in which case its reference count is never incremented). */ void swap_shmem_alloc(swp_entry_t entry, int nr) { __swap_duplicate(entry, SWAP_MAP_SHMEM, nr); } /* * Increase reference count of swap entry by 1. * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required * but could not be atomically allocated. Returns 0, just as if it succeeded, * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which * might occur if a page table entry has got corrupted. */ int swap_duplicate(swp_entry_t entry) { int err = 0; while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM) err = add_swap_count_continuation(entry, GFP_ATOMIC); return err; } /* * @entry: first swap entry from which we allocate nr swap cache. * * Called when allocating swap cache for existing swap entries, * This can return error codes. Returns 0 at success. * -EEXIST means there is a swap cache. * Note: return code is different from swap_duplicate(). */ int swapcache_prepare(swp_entry_t entry, int nr) { return __swap_duplicate(entry, SWAP_HAS_CACHE, nr); } /* * Caller should ensure entries belong to the same folio so * the entries won't span cross cluster boundary. */ void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { swap_entries_put_cache(si, entry, nr); } /* * add_swap_count_continuation - called when a swap count is duplicated * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's * page of the original vmalloc'ed swap_map, to hold the continuation count * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. * * These continuation pages are seldom referenced: the common paths all work * on the original swap_map, only referring to a continuation page when the * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. * * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) * can be called after dropping locks. */ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) { struct swap_info_struct *si; struct swap_cluster_info *ci; struct page *head; struct page *page; struct page *list_page; pgoff_t offset; unsigned char count; int ret = 0; /* * When debugging, it's easier to use __GFP_ZERO here; but it's better * for latency not to zero a page while GFP_ATOMIC and holding locks. */ page = alloc_page(gfp_mask | __GFP_HIGHMEM); si = get_swap_device(entry); if (!si) { /* * An acceptable race has occurred since the failing * __swap_duplicate(): the swap device may be swapoff */ goto outer; } offset = swp_offset(entry); ci = swap_cluster_lock(si, offset); count = swap_count(si->swap_map[offset]); if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { /* * The higher the swap count, the more likely it is that tasks * will race to add swap count continuation: we need to avoid * over-provisioning. */ goto out; } if (!page) { ret = -ENOMEM; goto out; } head = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; spin_lock(&si->cont_lock); /* * Page allocation does not initialize the page's lru field, * but it does always reset its private field. */ if (!page_private(head)) { BUG_ON(count & COUNT_CONTINUED); INIT_LIST_HEAD(&head->lru); set_page_private(head, SWP_CONTINUED); si->flags |= SWP_CONTINUED; } list_for_each_entry(list_page, &head->lru, lru) { unsigned char *map; /* * If the previous map said no continuation, but we've found * a continuation page, free our allocation and use this one. */ if (!(count & COUNT_CONTINUED)) goto out_unlock_cont; map = kmap_local_page(list_page) + offset; count = *map; kunmap_local(map); /* * If this continuation count now has some space in it, * free our allocation and use this one. */ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) goto out_unlock_cont; } list_add_tail(&page->lru, &head->lru); page = NULL; /* now it's attached, don't free it */ out_unlock_cont: spin_unlock(&si->cont_lock); out: swap_cluster_unlock(ci); put_swap_device(si); outer: if (page) __free_page(page); return ret; } /* * swap_count_continued - when the original swap_map count is incremented * from SWAP_MAP_MAX, check if there is already a continuation page to carry * into, carry if so, or else fail until a new continuation page is allocated; * when the original swap_map count is decremented from 0 with continuation, * borrow from the continuation and report whether it still holds more. * Called while __swap_duplicate() or caller of swap_entry_put_locked() * holds cluster lock. */ static bool swap_count_continued(struct swap_info_struct *si, pgoff_t offset, unsigned char count) { struct page *head; struct page *page; unsigned char *map; bool ret; head = vmalloc_to_page(si->swap_map + offset); if (page_private(head) != SWP_CONTINUED) { BUG_ON(count & COUNT_CONTINUED); return false; /* need to add count continuation */ } spin_lock(&si->cont_lock); offset &= ~PAGE_MASK; page = list_next_entry(head, lru); map = kmap_local_page(page) + offset; if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ goto init_map; /* jump over SWAP_CONT_MAX checks */ if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ /* * Think of how you add 1 to 999 */ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { kunmap_local(map); page = list_next_entry(page, lru); BUG_ON(page == head); map = kmap_local_page(page) + offset; } if (*map == SWAP_CONT_MAX) { kunmap_local(map); page = list_next_entry(page, lru); if (page == head) { ret = false; /* add count continuation */ goto out; } map = kmap_local_page(page) + offset; init_map: *map = 0; /* we didn't zero the page */ } *map += 1; kunmap_local(map); while ((page = list_prev_entry(page, lru)) != head) { map = kmap_local_page(page) + offset; *map = COUNT_CONTINUED; kunmap_local(map); } ret = true; /* incremented */ } else { /* decrementing */ /* * Think of how you subtract 1 from 1000 */ BUG_ON(count != COUNT_CONTINUED); while (*map == COUNT_CONTINUED) { kunmap_local(map); page = list_next_entry(page, lru); BUG_ON(page == head); map = kmap_local_page(page) + offset; } BUG_ON(*map == 0); *map -= 1; if (*map == 0) count = 0; kunmap_local(map); while ((page = list_prev_entry(page, lru)) != head) { map = kmap_local_page(page) + offset; *map = SWAP_CONT_MAX | count; count = COUNT_CONTINUED; kunmap_local(map); } ret = count == COUNT_CONTINUED; } out: spin_unlock(&si->cont_lock); return ret; } /* * free_swap_count_continuations - swapoff free all the continuation pages * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. */ static void free_swap_count_continuations(struct swap_info_struct *si) { pgoff_t offset; for (offset = 0; offset < si->max; offset += PAGE_SIZE) { struct page *head; head = vmalloc_to_page(si->swap_map + offset); if (page_private(head)) { struct page *page, *next; list_for_each_entry_safe(page, next, &head->lru, lru) { list_del(&page->lru); __free_page(page); } } } } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) static bool __has_usable_swap(void) { return !plist_head_empty(&swap_active_head); } void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { struct swap_info_struct *si; if (!(gfp & __GFP_IO)) return; if (!__has_usable_swap()) return; if (!blk_cgroup_congested()) return; /* * We've already scheduled a throttle, avoid taking the global swap * lock. */ if (current->throttle_disk) return; spin_lock(&swap_avail_lock); plist_for_each_entry(si, &swap_avail_head, avail_list) { if (si->bdev) { blkcg_schedule_throttle(si->bdev->bd_disk, true); break; } } spin_unlock(&swap_avail_lock); } #endif static int __init swapfile_init(void) { swapfile_maximum_size = arch_max_swapfile_size(); /* * Once a cluster is freed, it's swap table content is read * only, and all swap cache readers (swap_cache_*) verifies * the content before use. So it's safe to use RCU slab here. */ if (!SWP_TABLE_USE_PAGE) swap_table_cachep = kmem_cache_create("swap_table", sizeof(struct swap_table), 0, SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, NULL); #ifdef CONFIG_MIGRATION if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS)) swap_migration_ad_supported = true; #endif /* CONFIG_MIGRATION */ return 0; } subsys_initcall(swapfile_init);
8 8 8 6 4 1 1 10 8 6 1 6 7 7 6 7 5 5 2 3 3 2 3 2 2 2 2 2 1 2 2 1 1 1 1 4 4 1 3 4 4 2 2 1 2 2 2 1 1 3 3 2 2 1 2 2 3 1 1 4 4 4 4 4 1 3 3 3 2 2 2 2 1 1 3 3 3 2 1 1 1 3 2 1 1 2 1 1 1 1 3 3 3 3 3 3 3 2 3 1 1 1 2 2 3 3 3 2 1 1 3 3 3 3 3 3 3 3 1 2 2 2 2 1 2 2 3 3 2 1 3 1 1 1 1 1 1 1 1 1 1 1 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 // SPDX-License-Identifier: GPL-2.0+ /* * A virtual v4l2-mem2mem example device. * * This is a virtual device driver for testing mem-to-mem vb2 framework. * It simulates a device that uses memory buffers for both source and * destination, processes the data and issues an "irq" (simulated by a delayed * workqueue). * The device is capable of multi-instance, multi-buffer-per-transaction * operation (via the mem2mem framework). * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * Pawel Osciak, <pawel@osciak.com> * Marek Szyprowski, <m.szyprowski@samsung.com> */ #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-vmalloc.h> #include <media/v4l2-common.h> MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.2"); MODULE_ALIAS("mem2mem_testdev"); static unsigned int debug; module_param(debug, uint, 0644); MODULE_PARM_DESC(debug, "debug level"); /* Default transaction time in msec */ static unsigned int default_transtime = 40; /* Max 25 fps */ module_param(default_transtime, uint, 0644); MODULE_PARM_DESC(default_transtime, "default transaction time in ms"); static unsigned int multiplanar = 1; module_param(multiplanar, uint, 0644); MODULE_PARM_DESC(multiplanar, "1 (default) creates a single planar device, 2 creates multiplanar device."); #define MIN_W 32 #define MIN_H 32 #define MAX_W 640 #define MAX_H 480 /* Pixel alignment for non-bayer formats */ #define WIDTH_ALIGN 2 #define HEIGHT_ALIGN 1 /* Pixel alignment for bayer formats */ #define BAYER_WIDTH_ALIGN 2 #define BAYER_HEIGHT_ALIGN 2 /* Flags that indicate a format can be used for capture/output */ #define MEM2MEM_CAPTURE BIT(0) #define MEM2MEM_OUTPUT BIT(1) #define MEM2MEM_NAME "vim2m" /* Per queue */ #define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME /* In bytes, per queue */ #define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024) /* Flags that indicate processing mode */ #define MEM2MEM_HFLIP BIT(0) #define MEM2MEM_VFLIP BIT(1) #define dprintk(dev, lvl, fmt, arg...) \ v4l2_dbg(lvl, debug, &(dev)->v4l2_dev, "%s: " fmt, __func__, ## arg) static void vim2m_dev_release(struct device *dev) {} static struct platform_device vim2m_pdev = { .name = MEM2MEM_NAME, .dev.release = vim2m_dev_release, }; struct vim2m_fmt { u32 fourcc; int depth; /* Types the format can be used for */ u32 types; }; static struct vim2m_fmt formats[] = { { .fourcc = V4L2_PIX_FMT_RGB565, /* rrrrrggg gggbbbbb */ .depth = 16, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { .fourcc = V4L2_PIX_FMT_RGB565X, /* gggbbbbb rrrrrggg */ .depth = 16, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { .fourcc = V4L2_PIX_FMT_RGB24, .depth = 24, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { .fourcc = V4L2_PIX_FMT_BGR24, .depth = 24, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, .types = MEM2MEM_CAPTURE, }, { .fourcc = V4L2_PIX_FMT_SBGGR8, .depth = 8, .types = MEM2MEM_CAPTURE, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .depth = 8, .types = MEM2MEM_CAPTURE, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .depth = 8, .types = MEM2MEM_CAPTURE, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .depth = 8, .types = MEM2MEM_CAPTURE, }, }; #define NUM_FORMATS ARRAY_SIZE(formats) /* Per-queue, driver-specific private data */ struct vim2m_q_data { unsigned int width; unsigned int height; unsigned int num_mem_planes; unsigned int sizeimage[VIDEO_MAX_PLANES]; unsigned int sequence; struct vim2m_fmt *fmt; }; enum { V4L2_M2M_SRC = 0, V4L2_M2M_DST = 1, }; #define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000) #define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001) static struct vim2m_fmt *find_format(u32 fourcc) { struct vim2m_fmt *fmt; unsigned int k; for (k = 0; k < NUM_FORMATS; k++) { fmt = &formats[k]; if (fmt->fourcc == fourcc) break; } if (k == NUM_FORMATS) return NULL; return &formats[k]; } static void get_alignment(u32 fourcc, unsigned int *walign, unsigned int *halign) { switch (fourcc) { case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: *walign = BAYER_WIDTH_ALIGN; *halign = BAYER_HEIGHT_ALIGN; return; default: *walign = WIDTH_ALIGN; *halign = HEIGHT_ALIGN; return; } } struct vim2m_dev { struct v4l2_device v4l2_dev; struct video_device vfd; struct media_device mdev; atomic_t num_inst; struct mutex dev_mutex; struct v4l2_m2m_dev *m2m_dev; bool multiplanar; }; struct vim2m_ctx { struct v4l2_fh fh; struct vim2m_dev *dev; struct v4l2_ctrl_handler hdl; /* Processed buffers in this transaction */ u8 num_processed; /* Transaction length (i.e. how many buffers per transaction) */ u32 translen; /* Transaction time (i.e. simulated processing time) in milliseconds */ u32 transtime; struct mutex vb_mutex; struct delayed_work work_run; /* Abort requested by m2m */ int aborting; /* Processing mode */ int mode; enum v4l2_colorspace colorspace; enum v4l2_ycbcr_encoding ycbcr_enc; enum v4l2_xfer_func xfer_func; enum v4l2_quantization quant; /* Source and destination queue data */ struct vim2m_q_data q_data[2]; }; static inline struct vim2m_ctx *file2ctx(struct file *file) { return container_of(file_to_v4l2_fh(file), struct vim2m_ctx, fh); } static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: return &ctx->q_data[V4L2_M2M_SRC]; case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return &ctx->q_data[V4L2_M2M_DST]; default: return NULL; } } static const char *type_name(enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: return "Output"; case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: return "Capture"; default: return "Invalid"; } } static void copy_line(struct vim2m_q_data *q_data_out, u8 *src, u8 *dst, bool reverse) { int x, depth = q_data_out->fmt->depth >> 3; if (!reverse) { memcpy(dst, src, q_data_out->width * depth); } else { for (x = 0; x < q_data_out->width >> 1; x++) { memcpy(dst, src, depth); memcpy(dst + depth, src - depth, depth); src -= depth << 1; dst += depth << 1; } return; } } static void copy_two_pixels(struct vim2m_q_data *q_data_in, struct vim2m_q_data *q_data_out, u8 *src[2], u8 **dst, int ypos, bool reverse) { struct vim2m_fmt *out = q_data_out->fmt; struct vim2m_fmt *in = q_data_in->fmt; u8 _r[2], _g[2], _b[2], *r, *g, *b; int i; /* Step 1: read two consecutive pixels from src pointer */ r = _r; g = _g; b = _b; switch (in->fourcc) { case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */ for (i = 0; i < 2; i++) { u16 pix = le16_to_cpu(*(__le16 *)(src[i])); *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07; *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03; *b++ = (u8)((pix & 0x1f) << 3) | 0x07; } break; case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */ for (i = 0; i < 2; i++) { u16 pix = be16_to_cpu(*(__be16 *)(src[i])); *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07; *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03; *b++ = (u8)((pix & 0x1f) << 3) | 0x07; } break; default: case V4L2_PIX_FMT_RGB24: for (i = 0; i < 2; i++) { *r++ = src[i][0]; *g++ = src[i][1]; *b++ = src[i][2]; } break; case V4L2_PIX_FMT_BGR24: for (i = 0; i < 2; i++) { *b++ = src[i][0]; *g++ = src[i][1]; *r++ = src[i][2]; } break; } /* Step 2: store two consecutive points, reversing them if needed */ r = _r; g = _g; b = _b; switch (out->fourcc) { case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */ for (i = 0; i < 2; i++) { u16 pix; __le16 *dst_pix = (__le16 *)*dst; pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) | (*b >> 3); *dst_pix = cpu_to_le16(pix); *dst += 2; } return; case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */ for (i = 0; i < 2; i++) { u16 pix; __be16 *dst_pix = (__be16 *)*dst; pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) | (*b >> 3); *dst_pix = cpu_to_be16(pix); *dst += 2; } return; case V4L2_PIX_FMT_RGB24: for (i = 0; i < 2; i++) { *(*dst)++ = *r++; *(*dst)++ = *g++; *(*dst)++ = *b++; } return; case V4L2_PIX_FMT_BGR24: for (i = 0; i < 2; i++) { *(*dst)++ = *b++; *(*dst)++ = *g++; *(*dst)++ = *r++; } return; case V4L2_PIX_FMT_YUYV: default: { u8 y, y1, u, v; y = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b) + 524288) >> 15); u = ((-4878 * (*r) - 9578 * (*g) + 14456 * (*b) + 4210688) >> 15); v = ((14456 * (*r++) - 12105 * (*g++) - 2351 * (*b++) + 4210688) >> 15); y1 = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b) + 524288) >> 15); *(*dst)++ = y; *(*dst)++ = u; *(*dst)++ = y1; *(*dst)++ = v; return; } case V4L2_PIX_FMT_SBGGR8: if (!(ypos & 1)) { *(*dst)++ = *b; *(*dst)++ = *++g; } else { *(*dst)++ = *g; *(*dst)++ = *++r; } return; case V4L2_PIX_FMT_SGBRG8: if (!(ypos & 1)) { *(*dst)++ = *g; *(*dst)++ = *++b; } else { *(*dst)++ = *r; *(*dst)++ = *++g; } return; case V4L2_PIX_FMT_SGRBG8: if (!(ypos & 1)) { *(*dst)++ = *g; *(*dst)++ = *++r; } else { *(*dst)++ = *b; *(*dst)++ = *++g; } return; case V4L2_PIX_FMT_SRGGB8: if (!(ypos & 1)) { *(*dst)++ = *r; *(*dst)++ = *++g; } else { *(*dst)++ = *g; *(*dst)++ = *++b; } return; } } static int device_process(struct vim2m_ctx *ctx, struct vb2_v4l2_buffer *in_vb, struct vb2_v4l2_buffer *out_vb) { struct vim2m_dev *dev = ctx->dev; struct vim2m_q_data *q_data_in, *q_data_out; u8 *p_in, *p_line, *p_in_x[2], *p, *p_out; unsigned int width, height, bytesperline, bytes_per_pixel; unsigned int x, y, y_in, y_out, x_int, x_fract, x_err, x_offset; int start, end, step; q_data_in = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); if (!q_data_in) return 0; bytesperline = (q_data_in->width * q_data_in->fmt->depth) >> 3; bytes_per_pixel = q_data_in->fmt->depth >> 3; q_data_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); if (!q_data_out) return 0; /* As we're doing scaling, use the output dimensions here */ height = q_data_out->height; width = q_data_out->width; p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0); p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0); if (!p_in || !p_out) { v4l2_err(&dev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); return -EFAULT; } out_vb->sequence = q_data_out->sequence++; in_vb->sequence = q_data_in->sequence++; v4l2_m2m_buf_copy_metadata(in_vb, out_vb); if (ctx->mode & MEM2MEM_VFLIP) { start = height - 1; end = -1; step = -1; } else { start = 0; end = height; step = 1; } y_out = 0; /* * When format and resolution are identical, * we can use a faster copy logic */ if (q_data_in->fmt->fourcc == q_data_out->fmt->fourcc && q_data_in->width == q_data_out->width && q_data_in->height == q_data_out->height) { for (y = start; y != end; y += step, y_out++) { p = p_in + (y * bytesperline); if (ctx->mode & MEM2MEM_HFLIP) p += bytesperline - (q_data_in->fmt->depth >> 3); copy_line(q_data_out, p, p_out, ctx->mode & MEM2MEM_HFLIP); p_out += bytesperline; } return 0; } /* Slower algorithm with format conversion, hflip, vflip and scaler */ /* To speed scaler up, use Bresenham for X dimension */ x_int = q_data_in->width / q_data_out->width; x_fract = q_data_in->width % q_data_out->width; for (y = start; y != end; y += step, y_out++) { y_in = (y * q_data_in->height) / q_data_out->height; x_offset = 0; x_err = 0; p_line = p_in + (y_in * bytesperline); if (ctx->mode & MEM2MEM_HFLIP) p_line += bytesperline - (q_data_in->fmt->depth >> 3); p_in_x[0] = p_line; for (x = 0; x < width >> 1; x++) { x_offset += x_int; x_err += x_fract; if (x_err > width) { x_offset++; x_err -= width; } if (ctx->mode & MEM2MEM_HFLIP) p_in_x[1] = p_line - x_offset * bytes_per_pixel; else p_in_x[1] = p_line + x_offset * bytes_per_pixel; copy_two_pixels(q_data_in, q_data_out, p_in_x, &p_out, y_out, ctx->mode & MEM2MEM_HFLIP); /* Calculate the next p_in_x0 */ x_offset += x_int; x_err += x_fract; if (x_err > width) { x_offset++; x_err -= width; } if (ctx->mode & MEM2MEM_HFLIP) p_in_x[0] = p_line - x_offset * bytes_per_pixel; else p_in_x[0] = p_line + x_offset * bytes_per_pixel; } } return 0; } /* * mem2mem callbacks */ /* * job_ready() - check whether an instance is ready to be scheduled to run */ static int job_ready(void *priv) { struct vim2m_ctx *ctx = priv; if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) { dprintk(ctx->dev, 1, "Not enough buffers available\n"); return 0; } return 1; } static void job_abort(void *priv) { struct vim2m_ctx *ctx = priv; /* Will cancel the transaction in the next interrupt handler */ ctx->aborting = 1; } /* device_run() - prepares and starts the device * * This simulates all the immediate preparations required before starting * a device. This will be called by the framework when it decides to schedule * a particular instance. */ static void device_run(void *priv) { struct vim2m_ctx *ctx = priv; struct vb2_v4l2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); /* Apply request controls if any */ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req, &ctx->hdl); device_process(ctx, src_buf, dst_buf); /* Complete request controls if any */ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req, &ctx->hdl); /* Run delayed work, which simulates a hardware irq */ schedule_delayed_work(&ctx->work_run, msecs_to_jiffies(ctx->transtime)); } static void device_work(struct work_struct *w) { struct vim2m_ctx *curr_ctx; struct vim2m_dev *vim2m_dev; struct vb2_v4l2_buffer *src_vb, *dst_vb; curr_ctx = container_of(w, struct vim2m_ctx, work_run.work); vim2m_dev = curr_ctx->dev; src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx); curr_ctx->num_processed++; v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); if (curr_ctx->num_processed == curr_ctx->translen || curr_ctx->aborting) { dprintk(curr_ctx->dev, 2, "Finishing capture buffer fill\n"); curr_ctx->num_processed = 0; v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx); } else { device_run(curr_ctx); } } /* * video ioctls */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver)); strscpy(cap->card, MEM2MEM_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", MEM2MEM_NAME); return 0; } static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) { int i, num; struct vim2m_fmt *fmt; num = 0; for (i = 0; i < NUM_FORMATS; ++i) { if (formats[i].types & type) { /* index-th format of type type found ? */ if (num == f->index) break; /* * Correct type but haven't reached our index yet, * just increment per-type index */ ++num; } } if (i < NUM_FORMATS) { /* Format found */ fmt = &formats[i]; f->pixelformat = fmt->fourcc; return 0; } /* Format not found */ return -EINVAL; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_CAPTURE); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_OUTPUT); } static int vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { if (fsize->index != 0) return -EINVAL; if (!find_format(fsize->pixel_format)) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = MIN_W; fsize->stepwise.min_height = MIN_H; fsize->stepwise.max_width = MAX_W; fsize->stepwise.max_height = MAX_H; get_alignment(fsize->pixel_format, &fsize->stepwise.step_width, &fsize->stepwise.step_height); return 0; } static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) { struct vim2m_q_data *q_data; int ret; q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; ret = v4l2_fill_pixfmt(&f->fmt.pix, q_data->fmt->fourcc, q_data->width, q_data->height); if (ret) return ret; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.colorspace = ctx->colorspace; f->fmt.pix.xfer_func = ctx->xfer_func; f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc; f->fmt.pix.quantization = ctx->quant; return 0; } static int vidioc_g_fmt_mplane(struct vim2m_ctx *ctx, struct v4l2_format *f) { struct vim2m_q_data *q_data; int ret; q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; ret = v4l2_fill_pixfmt_mp(&f->fmt.pix_mp, q_data->fmt->fourcc, q_data->width, q_data->height); if (ret) return ret; f->fmt.pix_mp.field = V4L2_FIELD_NONE; f->fmt.pix_mp.colorspace = ctx->colorspace; f->fmt.pix_mp.xfer_func = ctx->xfer_func; f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc; f->fmt.pix_mp.quantization = ctx->quant; return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return vidioc_g_fmt(file2ctx(file), f); } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return vidioc_g_fmt(file2ctx(file), f); } static int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vidioc_g_fmt_mplane(file2ctx(file), f); } static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vidioc_g_fmt_mplane(file2ctx(file), f); } static int vidioc_try_fmt(struct v4l2_format *f, bool is_mplane) { int walign, halign, ret; int width = (is_mplane) ? f->fmt.pix_mp.width : f->fmt.pix.width; int height = (is_mplane) ? f->fmt.pix_mp.height : f->fmt.pix.height; u32 pixfmt = (is_mplane) ? f->fmt.pix_mp.pixelformat : f->fmt.pix.pixelformat; width = clamp(width, MIN_W, MAX_W); height = clamp(height, MIN_H, MAX_H); get_alignment(pixfmt, &walign, &halign); width = ALIGN(width, walign); height = ALIGN(height, halign); f->fmt.pix.field = V4L2_FIELD_NONE; if (is_mplane) { ret = v4l2_fill_pixfmt_mp(&f->fmt.pix_mp, pixfmt, width, height); } else { ret = v4l2_fill_pixfmt(&f->fmt.pix, pixfmt, width, height); } return ret; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); struct vim2m_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; fmt = find_format(f->fmt.pix.pixelformat); if (!fmt) { f->fmt.pix.pixelformat = formats[0].fourcc; fmt = find_format(f->fmt.pix.pixelformat); } if (!(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } f->fmt.pix.colorspace = ctx->colorspace; f->fmt.pix.xfer_func = ctx->xfer_func; f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc; f->fmt.pix.quantization = ctx->quant; return vidioc_try_fmt(f, false); } static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); struct vim2m_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; fmt = find_format(f->fmt.pix_mp.pixelformat); if (!fmt) { f->fmt.pix_mp.pixelformat = formats[0].fourcc; fmt = find_format(f->fmt.pix_mp.pixelformat); } if (!(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } f->fmt.pix_mp.colorspace = ctx->colorspace; f->fmt.pix_mp.xfer_func = ctx->xfer_func; f->fmt.pix_mp.ycbcr_enc = ctx->ycbcr_enc; f->fmt.pix_mp.quantization = ctx->quant; return vidioc_try_fmt(f, true); } static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); struct vim2m_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; fmt = find_format(f->fmt.pix.pixelformat); if (!fmt) { f->fmt.pix.pixelformat = formats[0].fourcc; fmt = find_format(f->fmt.pix.pixelformat); } if (!(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } if (!f->fmt.pix.colorspace) f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; return vidioc_try_fmt(f, false); } static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); struct vim2m_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; fmt = find_format(f->fmt.pix_mp.pixelformat); if (!fmt) { f->fmt.pix_mp.pixelformat = formats[0].fourcc; fmt = find_format(f->fmt.pix_mp.pixelformat); } if (!(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix_mp.pixelformat); return -EINVAL; } if (!f->fmt.pix_mp.colorspace) f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709; return vidioc_try_fmt(f, true); } static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) { struct vim2m_q_data *q_data; struct vb2_queue *vq; unsigned int i; bool is_mplane = ctx->dev->multiplanar; u32 pixfmt = (is_mplane) ? f->fmt.pix_mp.pixelformat : f->fmt.pix.pixelformat; u32 width = (is_mplane) ? f->fmt.pix_mp.width : f->fmt.pix.width; u32 height = (is_mplane) ? f->fmt.pix_mp.height : f->fmt.pix.height; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } q_data->fmt = find_format(pixfmt); q_data->width = width; q_data->height = height; if (is_mplane) { q_data->num_mem_planes = f->fmt.pix_mp.num_planes; for (i = 0; i < f->fmt.pix_mp.num_planes; i++) q_data->sizeimage[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; } else { q_data->sizeimage[0] = f->fmt.pix.sizeimage; q_data->num_mem_planes = 1; } dprintk(ctx->dev, 1, "Format for type %s: %dx%d (%d bpp), fmt: %c%c%c%c\n", type_name(f->type), q_data->width, q_data->height, q_data->fmt->depth, (q_data->fmt->fourcc & 0xff), (q_data->fmt->fourcc >> 8) & 0xff, (q_data->fmt->fourcc >> 16) & 0xff, (q_data->fmt->fourcc >> 24) & 0xff); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; struct vim2m_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; ret = vidioc_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return vidioc_s_fmt(file2ctx(file), f); } static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { int ret; struct vim2m_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; ret = vidioc_try_fmt_vid_cap_mplane(file, priv, f); if (ret) return ret; return vidioc_s_fmt(file2ctx(file), f); } static int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_ctx *ctx = file2ctx(file); struct vim2m_dev *dev = video_drvdata(file); int ret; if (dev->multiplanar) return -ENOTTY; ret = vidioc_try_fmt_vid_out(file, priv, f); if (ret) return ret; ret = vidioc_s_fmt(file2ctx(file), f); if (!ret) { ctx->colorspace = f->fmt.pix.colorspace; ctx->xfer_func = f->fmt.pix.xfer_func; ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc; ctx->quant = f->fmt.pix.quantization; } return ret; } static int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vim2m_ctx *ctx = file2ctx(file); struct vim2m_dev *dev = video_drvdata(file); int ret; if (!dev->multiplanar) return -ENOTTY; ret = vidioc_try_fmt_vid_out_mplane(file, priv, f); if (ret) return ret; ret = vidioc_s_fmt(file2ctx(file), f); if (!ret) { ctx->colorspace = f->fmt.pix_mp.colorspace; ctx->xfer_func = f->fmt.pix_mp.xfer_func; ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; ctx->quant = f->fmt.pix_mp.quantization; } return ret; } static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl) { struct vim2m_ctx *ctx = container_of(ctrl->handler, struct vim2m_ctx, hdl); switch (ctrl->id) { case V4L2_CID_HFLIP: if (ctrl->val) ctx->mode |= MEM2MEM_HFLIP; else ctx->mode &= ~MEM2MEM_HFLIP; break; case V4L2_CID_VFLIP: if (ctrl->val) ctx->mode |= MEM2MEM_VFLIP; else ctx->mode &= ~MEM2MEM_VFLIP; break; case V4L2_CID_TRANS_TIME_MSEC: ctx->transtime = ctrl->val; if (ctx->transtime < 1) ctx->transtime = 1; break; case V4L2_CID_TRANS_NUM_BUFS: ctx->translen = ctrl->val; break; default: v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n"); return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops vim2m_ctrl_ops = { .s_ctrl = vim2m_s_ctrl, }; static const struct v4l2_ioctl_ops vim2m_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane, .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane, .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_vid_out_mplane, .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_vid_out_mplane, .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_vid_out_mplane, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * Queue operations */ static int vim2m_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct vim2m_ctx *ctx = vb2_get_drv_priv(vq); struct vim2m_q_data *q_data; unsigned int size, p, count = *nbuffers; q_data = get_q_data(ctx, vq->type); if (!q_data) return -EINVAL; size = 0; for (p = 0; p < q_data->num_mem_planes; p++) size += q_data->sizeimage[p]; while (size * count > MEM2MEM_VID_MEM_LIMIT) (count)--; *nbuffers = count; if (*nplanes) { if (*nplanes != q_data->num_mem_planes) return -EINVAL; for (p = 0; p < q_data->num_mem_planes; p++) { if (sizes[p] < q_data->sizeimage[p]) return -EINVAL; } } else { *nplanes = q_data->num_mem_planes; for (p = 0; p < q_data->num_mem_planes; p++) sizes[p] = q_data->sizeimage[p]; } dprintk(ctx->dev, 1, "%s: get %d buffer(s) of size %d each.\n", type_name(vq->type), count, size); return 0; } static int vim2m_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); if (vbuf->field == V4L2_FIELD_ANY) vbuf->field = V4L2_FIELD_NONE; if (vbuf->field != V4L2_FIELD_NONE) { dprintk(ctx->dev, 1, "%s field isn't supported\n", __func__); return -EINVAL; } return 0; } static int vim2m_buf_prepare(struct vb2_buffer *vb) { struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vim2m_q_data *q_data; unsigned int p; dprintk(ctx->dev, 2, "type: %s\n", type_name(vb->vb2_queue->type)); q_data = get_q_data(ctx, vb->vb2_queue->type); if (!q_data) return -EINVAL; for (p = 0; p < q_data->num_mem_planes; p++) { if (vb2_plane_size(vb, p) < q_data->sizeimage[p]) { dprintk(ctx->dev, 1, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, p), (long)q_data->sizeimage[p]); return -EINVAL; } vb2_set_plane_payload(vb, p, q_data->sizeimage[p]); } return 0; } static void vim2m_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); struct vim2m_q_data *q_data = get_q_data(ctx, q->type); if (!q_data) return -EINVAL; if (V4L2_TYPE_IS_OUTPUT(q->type)) ctx->aborting = 0; q_data->sequence = 0; return 0; } static void vim2m_stop_streaming(struct vb2_queue *q) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); struct vb2_v4l2_buffer *vbuf; cancel_delayed_work_sync(&ctx->work_run); for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (!vbuf) return; v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &ctx->hdl); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); } } static void vim2m_buf_request_complete(struct vb2_buffer *vb) { struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl); } static const struct vb2_ops vim2m_qops = { .queue_setup = vim2m_queue_setup, .buf_out_validate = vim2m_buf_out_validate, .buf_prepare = vim2m_buf_prepare, .buf_queue = vim2m_buf_queue, .start_streaming = vim2m_start_streaming, .stop_streaming = vim2m_stop_streaming, .buf_request_complete = vim2m_buf_request_complete, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct vim2m_ctx *ctx = priv; int ret; src_vq->type = (ctx->dev->multiplanar) ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &vim2m_qops; src_vq->mem_ops = &vb2_vmalloc_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->vb_mutex; src_vq->supports_requests = true; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = (ctx->dev->multiplanar) ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &vim2m_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &ctx->vb_mutex; return vb2_queue_init(dst_vq); } static struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = { .ops = &vim2m_ctrl_ops, .id = V4L2_CID_TRANS_TIME_MSEC, .name = "Transaction Time (msec)", .type = V4L2_CTRL_TYPE_INTEGER, .min = 1, .max = 10001, .step = 1, }; static const struct v4l2_ctrl_config vim2m_ctrl_trans_num_bufs = { .ops = &vim2m_ctrl_ops, .id = V4L2_CID_TRANS_NUM_BUFS, .name = "Buffers Per Transaction", .type = V4L2_CTRL_TYPE_INTEGER, .def = 1, .min = 1, .max = MEM2MEM_DEF_NUM_BUFS, .step = 1, }; /* * File operations */ static int vim2m_open(struct file *file) { struct vim2m_dev *dev = video_drvdata(file); struct vim2m_ctx *ctx = NULL; struct v4l2_ctrl_handler *hdl; int rc = 0; if (mutex_lock_interruptible(&dev->dev_mutex)) return -ERESTARTSYS; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { rc = -ENOMEM; goto open_unlock; } v4l2_fh_init(&ctx->fh, video_devdata(file)); ctx->dev = dev; hdl = &ctx->hdl; v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); vim2m_ctrl_trans_time_msec.def = default_transtime; v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL); v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL); if (hdl->error) { rc = hdl->error; v4l2_ctrl_handler_free(hdl); kfree(ctx); goto open_unlock; } ctx->fh.ctrl_handler = hdl; v4l2_ctrl_handler_setup(hdl); ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0]; ctx->q_data[V4L2_M2M_SRC].width = 640; ctx->q_data[V4L2_M2M_SRC].height = 480; ctx->q_data[V4L2_M2M_SRC].sizeimage[0] = ctx->q_data[V4L2_M2M_SRC].width * ctx->q_data[V4L2_M2M_SRC].height * (ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3); ctx->q_data[V4L2_M2M_SRC].num_mem_planes = 1; ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; ctx->colorspace = V4L2_COLORSPACE_REC709; ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); mutex_init(&ctx->vb_mutex); INIT_DELAYED_WORK(&ctx->work_run, device_work); if (IS_ERR(ctx->fh.m2m_ctx)) { rc = PTR_ERR(ctx->fh.m2m_ctx); v4l2_ctrl_handler_free(hdl); v4l2_fh_exit(&ctx->fh); kfree(ctx); goto open_unlock; } v4l2_fh_add(&ctx->fh, file); atomic_inc(&dev->num_inst); dprintk(dev, 1, "Created instance: %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx); open_unlock: mutex_unlock(&dev->dev_mutex); return rc; } static int vim2m_release(struct file *file) { struct vim2m_dev *dev = video_drvdata(file); struct vim2m_ctx *ctx = file2ctx(file); dprintk(dev, 1, "Releasing instance %p\n", ctx); v4l2_fh_del(&ctx->fh, file); v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->hdl); mutex_lock(&dev->dev_mutex); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); mutex_unlock(&dev->dev_mutex); kfree(ctx); atomic_dec(&dev->num_inst); return 0; } static void vim2m_device_release(struct video_device *vdev) { struct vim2m_dev *dev = container_of(vdev, struct vim2m_dev, vfd); v4l2_device_unregister(&dev->v4l2_dev); v4l2_m2m_release(dev->m2m_dev); media_device_cleanup(&dev->mdev); kfree(dev); } static const struct v4l2_file_operations vim2m_fops = { .owner = THIS_MODULE, .open = vim2m_open, .release = vim2m_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static const struct video_device vim2m_videodev = { .name = MEM2MEM_NAME, .vfl_dir = VFL_DIR_M2M, .fops = &vim2m_fops, .ioctl_ops = &vim2m_ioctl_ops, .minor = -1, .release = vim2m_device_release, .device_caps = V4L2_CAP_STREAMING, }; static const struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, .job_abort = job_abort, }; static const struct media_device_ops m2m_media_ops = { .req_validate = vb2_request_validate, .req_queue = v4l2_m2m_request_queue, }; static int vim2m_probe(struct platform_device *pdev) { struct vim2m_dev *dev; struct video_device *vfd; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto error_free; atomic_set(&dev->num_inst, 0); mutex_init(&dev->dev_mutex); dev->multiplanar = (multiplanar == 2); dev->vfd = vim2m_videodev; vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; vfd->device_caps |= (dev->multiplanar) ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M; video_set_drvdata(vfd, dev); platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); dev->m2m_dev = NULL; goto error_dev; } dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model)); strscpy(dev->mdev.bus_info, "platform:vim2m", sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->mdev.ops = &m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto error_m2m; } v4l2_info(&dev->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n"); goto error_v4l2; } ret = media_device_register(&dev->mdev); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register mem2mem media device\n"); goto error_m2m_mc; } return 0; error_m2m_mc: v4l2_m2m_unregister_media_controller(dev->m2m_dev); error_v4l2: video_unregister_device(&dev->vfd); /* vim2m_device_release called by video_unregister_device to release various objects */ return ret; error_m2m: v4l2_m2m_release(dev->m2m_dev); error_dev: v4l2_device_unregister(&dev->v4l2_dev); error_free: kfree(dev); return ret; } static void vim2m_remove(struct platform_device *pdev) { struct vim2m_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_NAME); media_device_unregister(&dev->mdev); v4l2_m2m_unregister_media_controller(dev->m2m_dev); video_unregister_device(&dev->vfd); } static struct platform_driver vim2m_pdrv = { .probe = vim2m_probe, .remove = vim2m_remove, .driver = { .name = MEM2MEM_NAME, }, }; static void __exit vim2m_exit(void) { platform_driver_unregister(&vim2m_pdrv); platform_device_unregister(&vim2m_pdev); } static int __init vim2m_init(void) { int ret; ret = platform_device_register(&vim2m_pdev); if (ret) return ret; ret = platform_driver_register(&vim2m_pdrv); if (ret) platform_device_unregister(&vim2m_pdev); return ret; } module_init(vim2m_init); module_exit(vim2m_exit);
7218 7212 7223 394 394 404 10 399 211 394 90 87 90 70 90 107 109 4 105 105 5 109 6 6 6 6 6 6 6 259 74 7609 7294 434 404 199 109 93 90 6 6 121 7810 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* I/O iterator iteration building functions. * * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_IOV_ITER_H #define _LINUX_IOV_ITER_H #include <linux/uio.h> #include <linux/bvec.h> #include <linux/folio_queue.h> typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len, void *priv, void *priv2); typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len, void *priv, void *priv2); /* * Handle ITER_UBUF. */ static __always_inline size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_ustep_f step) { void __user *base = iter->ubuf; size_t progress = 0, remain; remain = step(base + iter->iov_offset, 0, len, priv, priv2); progress = len - remain; iter->iov_offset += progress; iter->count -= progress; return progress; } /* * Handle ITER_IOVEC. */ static __always_inline size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_ustep_f step) { const struct iovec *p = iter->__iov; size_t progress = 0, skip = iter->iov_offset; do { size_t remain, consumed; size_t part = min(len, p->iov_len - skip); if (likely(part)) { remain = step(p->iov_base + skip, progress, part, priv, priv2); consumed = part - remain; progress += consumed; skip += consumed; len -= consumed; if (skip < p->iov_len) break; } p++; skip = 0; } while (len); iter->nr_segs -= p - iter->__iov; iter->__iov = p; iter->iov_offset = skip; iter->count -= progress; return progress; } /* * Handle ITER_KVEC. */ static __always_inline size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_step_f step) { const struct kvec *p = iter->kvec; size_t progress = 0, skip = iter->iov_offset; do { size_t remain, consumed; size_t part = min(len, p->iov_len - skip); if (likely(part)) { remain = step(p->iov_base + skip, progress, part, priv, priv2); consumed = part - remain; progress += consumed; skip += consumed; len -= consumed; if (skip < p->iov_len) break; } p++; skip = 0; } while (len); iter->nr_segs -= p - iter->kvec; iter->kvec = p; iter->iov_offset = skip; iter->count -= progress; return progress; } /* * Handle ITER_BVEC. */ static __always_inline size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_step_f step) { const struct bio_vec *p = iter->bvec; size_t progress = 0, skip = iter->iov_offset; do { size_t remain, consumed; size_t offset = p->bv_offset + skip, part; void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE); part = min3(len, (size_t)(p->bv_len - skip), (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2); kunmap_local(kaddr); consumed = part - remain; len -= consumed; progress += consumed; skip += consumed; if (skip >= p->bv_len) { skip = 0; p++; } if (remain) break; } while (len); iter->nr_segs -= p - iter->bvec; iter->bvec = p; iter->iov_offset = skip; iter->count -= progress; return progress; } /* * Handle ITER_FOLIOQ. */ static __always_inline size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_step_f step) { const struct folio_queue *folioq = iter->folioq; unsigned int slot = iter->folioq_slot; size_t progress = 0, skip = iter->iov_offset; if (slot == folioq_nr_slots(folioq)) { /* The iterator may have been extended. */ folioq = folioq->next; slot = 0; } do { struct folio *folio = folioq_folio(folioq, slot); size_t part, remain = 0, consumed; size_t fsize; void *base; if (!folio) break; fsize = folioq_folio_size(folioq, slot); if (skip < fsize) { base = kmap_local_folio(folio, skip); part = umin(len, PAGE_SIZE - skip % PAGE_SIZE); remain = step(base, progress, part, priv, priv2); kunmap_local(base); consumed = part - remain; len -= consumed; progress += consumed; skip += consumed; } if (skip >= fsize) { skip = 0; slot++; if (slot == folioq_nr_slots(folioq) && folioq->next) { folioq = folioq->next; slot = 0; } } if (remain) break; } while (len); iter->folioq_slot = slot; iter->folioq = folioq; iter->iov_offset = skip; iter->count -= progress; return progress; } /* * Handle ITER_XARRAY. */ static __always_inline size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_step_f step) { struct folio *folio; size_t progress = 0; loff_t start = iter->xarray_start + iter->iov_offset; pgoff_t index = start / PAGE_SIZE; XA_STATE(xas, iter->xarray, index); rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { size_t remain, consumed, offset, part, flen; if (xas_retry(&xas, folio)) continue; if (WARN_ON(xa_is_value(folio))) break; if (WARN_ON(folio_test_hugetlb(folio))) break; offset = offset_in_folio(folio, start + progress); flen = min(folio_size(folio) - offset, len); while (flen) { void *base = kmap_local_folio(folio, offset); part = min_t(size_t, flen, PAGE_SIZE - offset_in_page(offset)); remain = step(base, progress, part, priv, priv2); kunmap_local(base); consumed = part - remain; progress += consumed; len -= consumed; if (remain || len == 0) goto out; flen -= consumed; offset += consumed; } } out: rcu_read_unlock(); iter->iov_offset += progress; iter->count -= progress; return progress; } /* * Handle ITER_DISCARD. */ static __always_inline size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_step_f step) { size_t progress = len; iter->count -= progress; return progress; } /** * iterate_and_advance2 - Iterate over an iterator * @iter: The iterator to iterate over. * @len: The amount to iterate over. * @priv: Data for the step functions. * @priv2: More data for the step functions. * @ustep: Function for UBUF/IOVEC iterators; given __user addresses. * @step: Function for other iterators; given kernel addresses. * * Iterate over the next part of an iterator, up to the specified length. The * buffer is presented in segments, which for kernel iteration are broken up by * physical pages and mapped, with the mapped address being presented. * * Two step functions, @step and @ustep, must be provided, one for handling * mapped kernel addresses and the other is given user addresses which have the * potential to fault since no pinning is performed. * * The step functions are passed the address and length of the segment, @priv, * @priv2 and the amount of data so far iterated over (which can, for example, * be added to @priv to point to the right part of a second buffer). The step * functions should return the amount of the segment they didn't process (ie. 0 * indicates complete processsing). * * This function returns the amount of data processed (ie. 0 means nothing was * processed and the value of @len means processes to completion). */ static __always_inline size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_ustep_f ustep, iov_step_f step) { if (unlikely(iter->count < len)) len = iter->count; if (unlikely(!len)) return 0; if (likely(iter_is_ubuf(iter))) return iterate_ubuf(iter, len, priv, priv2, ustep); if (likely(iter_is_iovec(iter))) return iterate_iovec(iter, len, priv, priv2, ustep); if (iov_iter_is_bvec(iter)) return iterate_bvec(iter, len, priv, priv2, step); if (iov_iter_is_kvec(iter)) return iterate_kvec(iter, len, priv, priv2, step); if (iov_iter_is_folioq(iter)) return iterate_folioq(iter, len, priv, priv2, step); if (iov_iter_is_xarray(iter)) return iterate_xarray(iter, len, priv, priv2, step); return iterate_discard(iter, len, priv, priv2, step); } /** * iterate_and_advance - Iterate over an iterator * @iter: The iterator to iterate over. * @len: The amount to iterate over. * @priv: Data for the step functions. * @ustep: Function for UBUF/IOVEC iterators; given __user addresses. * @step: Function for other iterators; given kernel addresses. * * As iterate_and_advance2(), but priv2 is always NULL. */ static __always_inline size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv, iov_ustep_f ustep, iov_step_f step) { return iterate_and_advance2(iter, len, priv, NULL, ustep, step); } /** * iterate_and_advance_kernel - Iterate over a kernel-internal iterator * @iter: The iterator to iterate over. * @len: The amount to iterate over. * @priv: Data for the step functions. * @priv2: More data for the step functions. * @step: Function for other iterators; given kernel addresses. * * Iterate over the next part of an iterator, up to the specified length. The * buffer is presented in segments, which for kernel iteration are broken up by * physical pages and mapped, with the mapped address being presented. * * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type * iterators; it will not handle UBUF or IOVEC-type iterators. * * A step functions, @step, must be provided, one for handling mapped kernel * addresses and the other is given user addresses which have the potential to * fault since no pinning is performed. * * The step functions are passed the address and length of the segment, @priv, * @priv2 and the amount of data so far iterated over (which can, for example, * be added to @priv to point to the right part of a second buffer). The step * functions should return the amount of the segment they didn't process (ie. 0 * indicates complete processsing). * * This function returns the amount of data processed (ie. 0 means nothing was * processed and the value of @len means processes to completion). */ static __always_inline size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv, void *priv2, iov_step_f step) { if (unlikely(iter->count < len)) len = iter->count; if (unlikely(!len)) return 0; if (iov_iter_is_bvec(iter)) return iterate_bvec(iter, len, priv, priv2, step); if (iov_iter_is_kvec(iter)) return iterate_kvec(iter, len, priv, priv2, step); if (iov_iter_is_folioq(iter)) return iterate_folioq(iter, len, priv, priv2, step); if (iov_iter_is_xarray(iter)) return iterate_xarray(iter, len, priv, priv2, step); return iterate_discard(iter, len, priv, priv2, step); } #endif /* _LINUX_IOV_ITER_H */
27 12 63 70 74 27 9 1 8 20 17 46 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 /* SPDX-License-Identifier: GPL-2.0-only */ /* * V9FS definitions. * * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> */ #ifndef FS_9P_V9FS_H #define FS_9P_V9FS_H #include <linux/backing-dev.h> #include <linux/netfs.h> #include <linux/fs_parser.h> #include <net/9p/client.h> #include <net/9p/transport.h> /** * enum p9_session_flags - option flags for each 9P session * @V9FS_PROTO_2000U: whether or not to use 9P2000.u extensions * @V9FS_PROTO_2000L: whether or not to use 9P2000.l extensions * @V9FS_ACCESS_SINGLE: only the mounting user can access the hierarchy * @V9FS_ACCESS_USER: a new attach will be issued for every user (default) * @V9FS_ACCESS_CLIENT: Just like user, but access check is performed on client. * @V9FS_ACCESS_ANY: use a single attach for all users * @V9FS_ACCESS_MASK: bit mask of different ACCESS options * @V9FS_POSIX_ACL: POSIX ACLs are enforced * * Session flags reflect options selected by users at mount time */ #define V9FS_ACCESS_ANY (V9FS_ACCESS_SINGLE | \ V9FS_ACCESS_USER | \ V9FS_ACCESS_CLIENT) #define V9FS_ACCESS_MASK V9FS_ACCESS_ANY #define V9FS_ACL_MASK V9FS_POSIX_ACL enum p9_session_flags { V9FS_PROTO_2000U = 0x01, V9FS_PROTO_2000L = 0x02, V9FS_ACCESS_SINGLE = 0x04, V9FS_ACCESS_USER = 0x08, V9FS_ACCESS_CLIENT = 0x10, V9FS_POSIX_ACL = 0x20, V9FS_NO_XATTR = 0x40, V9FS_IGNORE_QV = 0x80, /* ignore qid.version for cache hints */ V9FS_DIRECT_IO = 0x100, V9FS_SYNC = 0x200 }; /** * enum p9_cache_shortcuts - human readable cache preferences * @CACHE_SC_NONE: disable all caches * @CACHE_SC_READAHEAD: only provide caching for readahead * @CACHE_SC_MMAP: provide caching to enable mmap * @CACHE_SC_LOOSE: non-coherent caching for files and meta data * @CACHE_SC_FSCACHE: persistent non-coherent caching for files and meta-data * */ enum p9_cache_shortcuts { CACHE_SC_NONE = 0b00000000, CACHE_SC_READAHEAD = 0b00000001, CACHE_SC_MMAP = 0b00000101, CACHE_SC_LOOSE = 0b00001111, CACHE_SC_FSCACHE = 0b10001111, }; /** * enum p9_cache_bits - possible values of ->cache * @CACHE_NONE: caches disabled * @CACHE_FILE: file caching (open to close) * @CACHE_META: meta-data and directory caching * @CACHE_WRITEBACK: write-back caching for files * @CACHE_LOOSE: don't check cache consistency * @CACHE_FSCACHE: local persistent caches * */ enum p9_cache_bits { CACHE_NONE = 0b00000000, CACHE_FILE = 0b00000001, CACHE_META = 0b00000010, CACHE_WRITEBACK = 0b00000100, CACHE_LOOSE = 0b00001000, CACHE_FSCACHE = 0b10000000, }; /** * struct v9fs_session_info - per-instance session information * @flags: session options of type &p9_session_flags * @nodev: set to 1 to disable device mapping * @debug: debug level * @afid: authentication handle * @cache: cache mode of type &p9_cache_bits * @cachetag: the tag of the cache associated with this session * @fscache: session cookie associated with FS-Cache * @uname: string user name to mount hierarchy as * @aname: mount specifier for remote hierarchy * @maxdata: maximum data to be sent/recvd per protocol message * @dfltuid: default numeric userid to mount hierarchy as * @dfltgid: default numeric groupid to mount hierarchy as * @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy * @clnt: reference to 9P network client instantiated for this session * @slist: reference to list of registered 9p sessions * * This structure holds state for each session instance established during * a sys_mount() . * * Bugs: there seems to be a lot of state which could be condensed and/or * removed. */ struct v9fs_session_info { /* options */ unsigned int flags; unsigned char nodev; unsigned short debug; unsigned int afid; unsigned int cache; #ifdef CONFIG_9P_FSCACHE char *cachetag; struct fscache_volume *fscache; #endif char *uname; /* user name to mount as */ char *aname; /* name of remote hierarchy being mounted */ unsigned int maxdata; /* max data for client interface */ kuid_t dfltuid; /* default uid/muid for legacy support */ kgid_t dfltgid; /* default gid for legacy support */ kuid_t uid; /* if ACCESS_SINGLE, the uid that has access */ struct p9_client *clnt; /* 9p client */ struct list_head slist; /* list of sessions registered with v9fs */ struct rw_semaphore rename_sem; long session_lock_timeout; /* retry interval for blocking locks */ }; /* cache_validity flags */ #define V9FS_INO_INVALID_ATTR 0x01 struct v9fs_inode { struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct p9_qid qid; unsigned int cache_validity; struct mutex v_mutex; }; static inline struct v9fs_inode *V9FS_I(const struct inode *inode) { return container_of(inode, struct v9fs_inode, netfs.inode); } static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode) { #ifdef CONFIG_9P_FSCACHE return netfs_i_cookie(&v9inode->netfs); #else return NULL; #endif } static inline struct fscache_volume *v9fs_session_cache(struct v9fs_session_info *v9ses) { #ifdef CONFIG_9P_FSCACHE return v9ses->fscache; #else return NULL; #endif } extern const struct fs_parameter_spec v9fs_param_spec[]; extern int v9fs_parse_param(struct fs_context *fc, struct fs_parameter *param); extern int v9fs_show_options(struct seq_file *m, struct dentry *root); struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, struct fs_context *fc); extern void v9fs_session_close(struct v9fs_session_info *v9ses); extern void v9fs_session_cancel(struct v9fs_session_info *v9ses); extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d); extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d); extern int v9fs_vfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags); extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb, int new); extern const struct inode_operations v9fs_dir_inode_operations_dotl; extern const struct inode_operations v9fs_file_inode_operations_dotl; extern const struct inode_operations v9fs_symlink_inode_operations_dotl; extern const struct netfs_request_ops v9fs_req_ops; extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb, int new); /* other default globals */ #define V9FS_PORT 564 #define V9FS_DEFUSER "nobody" #define V9FS_DEFANAME "" #define V9FS_DEFUID KUIDT_INIT(-2) #define V9FS_DEFGID KGIDT_INIT(-2) static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode) { return inode->i_sb->s_fs_info; } static inline struct v9fs_session_info *v9fs_dentry2v9ses(const struct dentry *dentry) { return dentry->d_sb->s_fs_info; } static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses) { return v9ses->flags & V9FS_PROTO_2000U; } static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses) { return v9ses->flags & V9FS_PROTO_2000L; } /** * v9fs_get_inode_from_fid - Helper routine to populate an inode by * issuing a attribute request * @v9ses: session information * @fid: fid to issue attribute request for * @sb: superblock on which to create inode * */ static inline struct inode * v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb) { if (v9fs_proto_dotl(v9ses)) return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 0); else return v9fs_inode_from_fid(v9ses, fid, sb, 0); } /** * v9fs_get_new_inode_from_fid - Helper routine to populate an inode by * issuing a attribute request * @v9ses: session information * @fid: fid to issue attribute request for * @sb: superblock on which to create inode * */ static inline struct inode * v9fs_get_new_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb) { if (v9fs_proto_dotl(v9ses)) return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 1); else return v9fs_inode_from_fid(v9ses, fid, sb, 1); } #endif
2 2 2 2 2 2 35 12 34 17 20 261 191 260 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 198 195 198 194 1 1 1 1 1 129 69 88 87 129 198 81 154 155 272 261 261 129 198 259 260 260 39 330 39 34 275 325 6 6 6 5 6 6 6 12 12 4 4 1 12 16 12 12 12 8 8 16 12 1 1 12 12 2 12 12 12 2 6 6 17 12 6 11 16 2 2 2 2 2 2 2 2 2 2 2 2 2 2 6 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 /* * Copyright (c) 2018 Cumulus Networks. All rights reserved. * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com> * * This software is licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this * source tree. * * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. */ #include <linux/bitmap.h> #include <linux/in6.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/rhashtable.h> #include <linux/spinlock_types.h> #include <linux/types.h> #include <net/fib_notifier.h> #include <net/inet_dscp.h> #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/fib_rules.h> #include <net/net_namespace.h> #include <net/nexthop.h> #include <linux/debugfs.h> #include "netdevsim.h" struct nsim_fib_entry { u64 max; atomic64_t num; }; struct nsim_per_fib_data { struct nsim_fib_entry fib; struct nsim_fib_entry rules; }; struct nsim_fib_data { struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; struct nsim_fib_entry nexthops; struct rhashtable fib_rt_ht; struct list_head fib_rt_list; struct mutex fib_lock; /* Protects FIB HT and list */ struct notifier_block nexthop_nb; struct rhashtable nexthop_ht; struct devlink *devlink; struct work_struct fib_event_work; struct work_struct fib_flush_work; struct list_head fib_event_queue; spinlock_t fib_event_queue_lock; /* Protects fib event queue list */ struct mutex nh_lock; /* Protects NH HT */ struct dentry *ddir; bool fail_route_offload; bool fail_res_nexthop_group_replace; bool fail_nexthop_bucket_replace; bool fail_route_delete; }; struct nsim_fib_rt_key { unsigned char addr[sizeof(struct in6_addr)]; unsigned char prefix_len; int family; u32 tb_id; }; struct nsim_fib_rt { struct nsim_fib_rt_key key; struct rhash_head ht_node; struct list_head list; /* Member of fib_rt_list */ }; struct nsim_fib4_rt { struct nsim_fib_rt common; struct fib_info *fi; dscp_t dscp; u8 type; }; struct nsim_fib6_rt { struct nsim_fib_rt common; struct list_head nh_list; unsigned int nhs; }; struct nsim_fib6_rt_nh { struct list_head list; /* Member of nh_list */ struct fib6_info *rt; }; struct nsim_fib6_event { struct fib6_info **rt_arr; unsigned int nrt6; }; struct nsim_fib_event { struct list_head list; /* node in fib queue */ union { struct fib_entry_notifier_info fen_info; struct nsim_fib6_event fib6_event; }; struct nsim_fib_data *data; unsigned long event; int family; }; static const struct rhashtable_params nsim_fib_rt_ht_params = { .key_offset = offsetof(struct nsim_fib_rt, key), .head_offset = offsetof(struct nsim_fib_rt, ht_node), .key_len = sizeof(struct nsim_fib_rt_key), .automatic_shrinking = true, }; struct nsim_nexthop { struct rhash_head ht_node; u64 occ; u32 id; bool is_resilient; }; static const struct rhashtable_params nsim_nexthop_ht_params = { .key_offset = offsetof(struct nsim_nexthop, id), .head_offset = offsetof(struct nsim_nexthop, ht_node), .key_len = sizeof(u32), .automatic_shrinking = true, }; u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, enum nsim_resource_id res_id, bool max) { struct nsim_fib_entry *entry; switch (res_id) { case NSIM_RESOURCE_IPV4_FIB: entry = &fib_data->ipv4.fib; break; case NSIM_RESOURCE_IPV4_FIB_RULES: entry = &fib_data->ipv4.rules; break; case NSIM_RESOURCE_IPV6_FIB: entry = &fib_data->ipv6.fib; break; case NSIM_RESOURCE_IPV6_FIB_RULES: entry = &fib_data->ipv6.rules; break; case NSIM_RESOURCE_NEXTHOPS: entry = &fib_data->nexthops; break; default: return 0; } return max ? entry->max : atomic64_read(&entry->num); } static void nsim_fib_set_max(struct nsim_fib_data *fib_data, enum nsim_resource_id res_id, u64 val) { struct nsim_fib_entry *entry; switch (res_id) { case NSIM_RESOURCE_IPV4_FIB: entry = &fib_data->ipv4.fib; break; case NSIM_RESOURCE_IPV4_FIB_RULES: entry = &fib_data->ipv4.rules; break; case NSIM_RESOURCE_IPV6_FIB: entry = &fib_data->ipv6.fib; break; case NSIM_RESOURCE_IPV6_FIB_RULES: entry = &fib_data->ipv6.rules; break; case NSIM_RESOURCE_NEXTHOPS: entry = &fib_data->nexthops; break; default: WARN_ON(1); return; } entry->max = val; } static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add, struct netlink_ext_ack *extack) { int err = 0; if (add) { if (!atomic64_add_unless(&entry->num, 1, entry->max)) { err = -ENOSPC; NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib rule entries"); } } else { atomic64_dec_if_positive(&entry->num); } return err; } static int nsim_fib_rule_event(struct nsim_fib_data *data, struct fib_notifier_info *info, bool add) { struct netlink_ext_ack *extack = info->extack; int err = 0; switch (info->family) { case AF_INET: err = nsim_fib_rule_account(&data->ipv4.rules, add, extack); break; case AF_INET6: err = nsim_fib_rule_account(&data->ipv6.rules, add, extack); break; } return err; } static int nsim_fib_account(struct nsim_fib_entry *entry, bool add) { int err = 0; if (add) { if (!atomic64_add_unless(&entry->num, 1, entry->max)) err = -ENOSPC; } else { atomic64_dec_if_positive(&entry->num); } return err; } static void nsim_fib_rt_init(struct nsim_fib_data *data, struct nsim_fib_rt *fib_rt, const void *addr, size_t addr_len, unsigned int prefix_len, int family, u32 tb_id) { memcpy(fib_rt->key.addr, addr, addr_len); fib_rt->key.prefix_len = prefix_len; fib_rt->key.family = family; fib_rt->key.tb_id = tb_id; list_add(&fib_rt->list, &data->fib_rt_list); } static void nsim_fib_rt_fini(struct nsim_fib_rt *fib_rt) { list_del(&fib_rt->list); } static struct nsim_fib_rt *nsim_fib_rt_lookup(struct rhashtable *fib_rt_ht, const void *addr, size_t addr_len, unsigned int prefix_len, int family, u32 tb_id) { struct nsim_fib_rt_key key; memset(&key, 0, sizeof(key)); memcpy(key.addr, addr, addr_len); key.prefix_len = prefix_len; key.family = family; key.tb_id = tb_id; return rhashtable_lookup_fast(fib_rt_ht, &key, nsim_fib_rt_ht_params); } static struct nsim_fib4_rt * nsim_fib4_rt_create(struct nsim_fib_data *data, struct fib_entry_notifier_info *fen_info) { struct nsim_fib4_rt *fib4_rt; fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_KERNEL); if (!fib4_rt) return NULL; nsim_fib_rt_init(data, &fib4_rt->common, &fen_info->dst, sizeof(u32), fen_info->dst_len, AF_INET, fen_info->tb_id); fib4_rt->fi = fen_info->fi; fib_info_hold(fib4_rt->fi); fib4_rt->dscp = fen_info->dscp; fib4_rt->type = fen_info->type; return fib4_rt; } static void nsim_fib4_rt_destroy(struct nsim_fib4_rt *fib4_rt) { fib_info_put(fib4_rt->fi); nsim_fib_rt_fini(&fib4_rt->common); kfree(fib4_rt); } static struct nsim_fib4_rt * nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib_entry_notifier_info *fen_info) { struct nsim_fib_rt *fib_rt; fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &fen_info->dst, sizeof(u32), fen_info->dst_len, AF_INET, fen_info->tb_id); if (!fib_rt) return NULL; return container_of(fib_rt, struct nsim_fib4_rt, common); } static void nsim_fib4_rt_offload_failed_flag_set(struct net *net, struct fib_entry_notifier_info *fen_info) { u32 *p_dst = (u32 *)&fen_info->dst; struct fib_rt_info fri; fri.fi = fen_info->fi; fri.tb_id = fen_info->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = fen_info->dst_len; fri.dscp = fen_info->dscp; fri.type = fen_info->type; fri.offload = false; fri.trap = false; fri.offload_failed = true; fib_alias_hw_flags_set(net, &fri); } static void nsim_fib4_rt_hw_flags_set(struct net *net, const struct nsim_fib4_rt *fib4_rt, bool trap) { u32 *p_dst = (u32 *) fib4_rt->common.key.addr; int dst_len = fib4_rt->common.key.prefix_len; struct fib_rt_info fri; fri.fi = fib4_rt->fi; fri.tb_id = fib4_rt->common.key.tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; fri.dscp = fib4_rt->dscp; fri.type = fib4_rt->type; fri.offload = false; fri.trap = trap; fri.offload_failed = false; fib_alias_hw_flags_set(net, &fri); } static int nsim_fib4_rt_add(struct nsim_fib_data *data, struct nsim_fib4_rt *fib4_rt) { struct net *net = devlink_net(data->devlink); int err; err = rhashtable_insert_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node, nsim_fib_rt_ht_params); if (err) goto err_fib_dismiss; /* Simulate hardware programming latency. */ msleep(1); nsim_fib4_rt_hw_flags_set(net, fib4_rt, true); return 0; err_fib_dismiss: /* Drop the accounting that was increased from the notification * context when FIB_EVENT_ENTRY_REPLACE was triggered. */ nsim_fib_account(&data->ipv4.fib, false); return err; } static int nsim_fib4_rt_replace(struct nsim_fib_data *data, struct nsim_fib4_rt *fib4_rt, struct nsim_fib4_rt *fib4_rt_old) { struct net *net = devlink_net(data->devlink); int err; /* We are replacing a route, so need to remove the accounting which * was increased when FIB_EVENT_ENTRY_REPLACE was triggered. */ err = nsim_fib_account(&data->ipv4.fib, false); if (err) return err; err = rhashtable_replace_fast(&data->fib_rt_ht, &fib4_rt_old->common.ht_node, &fib4_rt->common.ht_node, nsim_fib_rt_ht_params); if (err) return err; msleep(1); nsim_fib4_rt_hw_flags_set(net, fib4_rt, true); nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false); nsim_fib4_rt_destroy(fib4_rt_old); return 0; } static int nsim_fib4_rt_insert(struct nsim_fib_data *data, struct fib_entry_notifier_info *fen_info) { struct nsim_fib4_rt *fib4_rt, *fib4_rt_old; int err; if (data->fail_route_offload) { /* For testing purposes, user set debugfs fail_route_offload * value to true. Simulate hardware programming latency and then * fail. */ msleep(1); return -EINVAL; } fib4_rt = nsim_fib4_rt_create(data, fen_info); if (!fib4_rt) return -ENOMEM; fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info); if (!fib4_rt_old) err = nsim_fib4_rt_add(data, fib4_rt); else err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old); if (err) nsim_fib4_rt_destroy(fib4_rt); return err; } static void nsim_fib4_rt_remove(struct nsim_fib_data *data, const struct fib_entry_notifier_info *fen_info) { struct nsim_fib4_rt *fib4_rt; fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info); if (!fib4_rt) return; rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node, nsim_fib_rt_ht_params); nsim_fib4_rt_destroy(fib4_rt); } static int nsim_fib4_event(struct nsim_fib_data *data, struct fib_entry_notifier_info *fen_info, unsigned long event) { int err = 0; switch (event) { case FIB_EVENT_ENTRY_REPLACE: err = nsim_fib4_rt_insert(data, fen_info); if (err) { struct net *net = devlink_net(data->devlink); nsim_fib4_rt_offload_failed_flag_set(net, fen_info); } break; case FIB_EVENT_ENTRY_DEL: nsim_fib4_rt_remove(data, fen_info); break; default: break; } return err; } static struct nsim_fib6_rt_nh * nsim_fib6_rt_nh_find(const struct nsim_fib6_rt *fib6_rt, const struct fib6_info *rt) { struct nsim_fib6_rt_nh *fib6_rt_nh; list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) { if (fib6_rt_nh->rt == rt) return fib6_rt_nh; } return NULL; } static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt, struct fib6_info *rt) { struct nsim_fib6_rt_nh *fib6_rt_nh; fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_KERNEL); if (!fib6_rt_nh) return -ENOMEM; fib6_info_hold(rt); fib6_rt_nh->rt = rt; list_add_tail(&fib6_rt_nh->list, &fib6_rt->nh_list); fib6_rt->nhs++; return 0; } #if IS_ENABLED(CONFIG_IPV6) static void nsim_rt6_release(struct fib6_info *rt) { fib6_info_release(rt); } #else static void nsim_rt6_release(struct fib6_info *rt) { } #endif static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt, const struct fib6_info *rt) { struct nsim_fib6_rt_nh *fib6_rt_nh; fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt); if (!fib6_rt_nh) return; fib6_rt->nhs--; list_del(&fib6_rt_nh->list); nsim_rt6_release(fib6_rt_nh->rt); kfree(fib6_rt_nh); } static struct nsim_fib6_rt * nsim_fib6_rt_create(struct nsim_fib_data *data, struct fib6_info **rt_arr, unsigned int nrt6) { struct fib6_info *rt = rt_arr[0]; struct nsim_fib6_rt *fib6_rt; int i = 0; int err; fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_KERNEL); if (!fib6_rt) return ERR_PTR(-ENOMEM); nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr, sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6, rt->fib6_table->tb6_id); /* We consider a multipath IPv6 route as one entry, but it can be made * up from several fib6_info structs (one for each nexthop), so we * add them all to the same list under the entry. */ INIT_LIST_HEAD(&fib6_rt->nh_list); for (i = 0; i < nrt6; i++) { err = nsim_fib6_rt_nh_add(fib6_rt, rt_arr[i]); if (err) goto err_fib6_rt_nh_del; } return fib6_rt; err_fib6_rt_nh_del: for (i--; i >= 0; i--) { nsim_fib6_rt_nh_del(fib6_rt, rt_arr[i]); } nsim_fib_rt_fini(&fib6_rt->common); kfree(fib6_rt); return ERR_PTR(err); } static void nsim_fib6_rt_destroy(struct nsim_fib6_rt *fib6_rt) { struct nsim_fib6_rt_nh *iter, *tmp; list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list) nsim_fib6_rt_nh_del(fib6_rt, iter->rt); WARN_ON_ONCE(!list_empty(&fib6_rt->nh_list)); nsim_fib_rt_fini(&fib6_rt->common); kfree(fib6_rt); } static struct nsim_fib6_rt * nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt) { struct nsim_fib_rt *fib_rt; fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &rt->fib6_dst.addr, sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6, rt->fib6_table->tb6_id); if (!fib_rt) return NULL; return container_of(fib_rt, struct nsim_fib6_rt, common); } static int nsim_fib6_rt_append(struct nsim_fib_data *data, struct nsim_fib6_event *fib6_event) { struct fib6_info *rt = fib6_event->rt_arr[0]; struct nsim_fib6_rt *fib6_rt; int i, err; if (data->fail_route_offload) { /* For testing purposes, user set debugfs fail_route_offload * value to true. Simulate hardware programming latency and then * fail. */ msleep(1); return -EINVAL; } fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt); if (!fib6_rt) return -EINVAL; for (i = 0; i < fib6_event->nrt6; i++) { err = nsim_fib6_rt_nh_add(fib6_rt, fib6_event->rt_arr[i]); if (err) goto err_fib6_rt_nh_del; WRITE_ONCE(fib6_event->rt_arr[i]->trap, true); } return 0; err_fib6_rt_nh_del: for (i--; i >= 0; i--) { WRITE_ONCE(fib6_event->rt_arr[i]->trap, false); nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]); } return err; } #if IS_ENABLED(CONFIG_IPV6) static void nsim_fib6_rt_offload_failed_flag_set(struct nsim_fib_data *data, struct fib6_info **rt_arr, unsigned int nrt6) { struct net *net = devlink_net(data->devlink); int i; for (i = 0; i < nrt6; i++) fib6_info_hw_flags_set(net, rt_arr[i], false, false, true); } #else static void nsim_fib6_rt_offload_failed_flag_set(struct nsim_fib_data *data, struct fib6_info **rt_arr, unsigned int nrt6) { } #endif #if IS_ENABLED(CONFIG_IPV6) static void nsim_fib6_rt_hw_flags_set(struct nsim_fib_data *data, const struct nsim_fib6_rt *fib6_rt, bool trap) { struct net *net = devlink_net(data->devlink); struct nsim_fib6_rt_nh *fib6_rt_nh; list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) fib6_info_hw_flags_set(net, fib6_rt_nh->rt, false, trap, false); } #else static void nsim_fib6_rt_hw_flags_set(struct nsim_fib_data *data, const struct nsim_fib6_rt *fib6_rt, bool trap) { } #endif static int nsim_fib6_rt_add(struct nsim_fib_data *data, struct nsim_fib6_rt *fib6_rt) { int err; err = rhashtable_insert_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node, nsim_fib_rt_ht_params); if (err) goto err_fib_dismiss; msleep(1); nsim_fib6_rt_hw_flags_set(data, fib6_rt, true); return 0; err_fib_dismiss: /* Drop the accounting that was increased from the notification * context when FIB_EVENT_ENTRY_REPLACE was triggered. */ nsim_fib_account(&data->ipv6.fib, false); return err; } static int nsim_fib6_rt_replace(struct nsim_fib_data *data, struct nsim_fib6_rt *fib6_rt, struct nsim_fib6_rt *fib6_rt_old) { int err; /* We are replacing a route, so need to remove the accounting which * was increased when FIB_EVENT_ENTRY_REPLACE was triggered. */ err = nsim_fib_account(&data->ipv6.fib, false); if (err) return err; err = rhashtable_replace_fast(&data->fib_rt_ht, &fib6_rt_old->common.ht_node, &fib6_rt->common.ht_node, nsim_fib_rt_ht_params); if (err) return err; msleep(1); nsim_fib6_rt_hw_flags_set(data, fib6_rt, true); nsim_fib6_rt_hw_flags_set(data, fib6_rt_old, false); nsim_fib6_rt_destroy(fib6_rt_old); return 0; } static int nsim_fib6_rt_insert(struct nsim_fib_data *data, struct nsim_fib6_event *fib6_event) { struct fib6_info *rt = fib6_event->rt_arr[0]; struct nsim_fib6_rt *fib6_rt, *fib6_rt_old; int err; if (data->fail_route_offload) { /* For testing purposes, user set debugfs fail_route_offload * value to true. Simulate hardware programming latency and then * fail. */ msleep(1); return -EINVAL; } fib6_rt = nsim_fib6_rt_create(data, fib6_event->rt_arr, fib6_event->nrt6); if (IS_ERR(fib6_rt)) return PTR_ERR(fib6_rt); fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt); if (!fib6_rt_old) err = nsim_fib6_rt_add(data, fib6_rt); else err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old); if (err) nsim_fib6_rt_destroy(fib6_rt); return err; } static void nsim_fib6_rt_remove(struct nsim_fib_data *data, struct nsim_fib6_event *fib6_event) { struct fib6_info *rt = fib6_event->rt_arr[0]; struct nsim_fib6_rt *fib6_rt; int i; /* Multipath routes are first added to the FIB trie and only then * notified. If we vetoed the addition, we will get a delete * notification for a route we do not have. Therefore, do not warn if * route was not found. */ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt); if (!fib6_rt) return; /* If not all the nexthops are deleted, then only reduce the nexthop * group. */ if (fib6_event->nrt6 != fib6_rt->nhs) { for (i = 0; i < fib6_event->nrt6; i++) nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]); return; } rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node, nsim_fib_rt_ht_params); nsim_fib6_rt_destroy(fib6_rt); } static int nsim_fib6_event_init(struct nsim_fib6_event *fib6_event, struct fib6_entry_notifier_info *fen6_info) { struct fib6_info *rt = fen6_info->rt; struct fib6_info **rt_arr; struct fib6_info *iter; unsigned int nrt6; int i = 0; nrt6 = fen6_info->nsiblings + 1; rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC); if (!rt_arr) return -ENOMEM; fib6_event->rt_arr = rt_arr; fib6_event->nrt6 = nrt6; rt_arr[0] = rt; fib6_info_hold(rt); if (!fen6_info->nsiblings) return 0; list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) { if (i == fen6_info->nsiblings) break; rt_arr[i + 1] = iter; fib6_info_hold(iter); i++; } WARN_ON_ONCE(i != fen6_info->nsiblings); return 0; } static void nsim_fib6_event_fini(struct nsim_fib6_event *fib6_event) { int i; for (i = 0; i < fib6_event->nrt6; i++) nsim_rt6_release(fib6_event->rt_arr[i]); kfree(fib6_event->rt_arr); } static int nsim_fib6_event(struct nsim_fib_data *data, struct nsim_fib6_event *fib6_event, unsigned long event) { int err; if (fib6_event->rt_arr[0]->fib6_src.plen) return 0; switch (event) { case FIB_EVENT_ENTRY_REPLACE: err = nsim_fib6_rt_insert(data, fib6_event); if (err) goto err_rt_offload_failed_flag_set; break; case FIB_EVENT_ENTRY_APPEND: err = nsim_fib6_rt_append(data, fib6_event); if (err) goto err_rt_offload_failed_flag_set; break; case FIB_EVENT_ENTRY_DEL: nsim_fib6_rt_remove(data, fib6_event); break; default: break; } return 0; err_rt_offload_failed_flag_set: nsim_fib6_rt_offload_failed_flag_set(data, fib6_event->rt_arr, fib6_event->nrt6); return err; } static void nsim_fib_event(struct nsim_fib_event *fib_event) { switch (fib_event->family) { case AF_INET: nsim_fib4_event(fib_event->data, &fib_event->fen_info, fib_event->event); fib_info_put(fib_event->fen_info.fi); break; case AF_INET6: nsim_fib6_event(fib_event->data, &fib_event->fib6_event, fib_event->event); nsim_fib6_event_fini(&fib_event->fib6_event); break; } } static int nsim_fib4_prepare_event(struct fib_notifier_info *info, struct nsim_fib_event *fib_event, unsigned long event) { struct nsim_fib_data *data = fib_event->data; struct fib_entry_notifier_info *fen_info; struct netlink_ext_ack *extack; int err = 0; fen_info = container_of(info, struct fib_entry_notifier_info, info); fib_event->fen_info = *fen_info; extack = info->extack; switch (event) { case FIB_EVENT_ENTRY_REPLACE: err = nsim_fib_account(&data->ipv4.fib, true); if (err) { NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries"); return err; } break; case FIB_EVENT_ENTRY_DEL: if (data->fail_route_delete) { NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion"); return -EINVAL; } nsim_fib_account(&data->ipv4.fib, false); break; } /* Take reference on fib_info to prevent it from being * freed while event is queued. Release it afterwards. */ fib_info_hold(fib_event->fen_info.fi); return 0; } static int nsim_fib6_prepare_event(struct fib_notifier_info *info, struct nsim_fib_event *fib_event, unsigned long event) { struct nsim_fib_data *data = fib_event->data; struct fib6_entry_notifier_info *fen6_info; struct netlink_ext_ack *extack; int err = 0; fen6_info = container_of(info, struct fib6_entry_notifier_info, info); err = nsim_fib6_event_init(&fib_event->fib6_event, fen6_info); if (err) return err; extack = info->extack; switch (event) { case FIB_EVENT_ENTRY_REPLACE: err = nsim_fib_account(&data->ipv6.fib, true); if (err) { NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries"); goto err_fib6_event_fini; } break; case FIB_EVENT_ENTRY_DEL: if (data->fail_route_delete) { err = -EINVAL; NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion"); goto err_fib6_event_fini; } nsim_fib_account(&data->ipv6.fib, false); break; } return 0; err_fib6_event_fini: nsim_fib6_event_fini(&fib_event->fib6_event); return err; } static int nsim_fib_event_schedule_work(struct nsim_fib_data *data, struct fib_notifier_info *info, unsigned long event) { struct nsim_fib_event *fib_event; int err; if (info->family != AF_INET && info->family != AF_INET6) /* netdevsim does not support 'RTNL_FAMILY_IP6MR' and * 'RTNL_FAMILY_IPMR' and should ignore them. */ return NOTIFY_DONE; fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC); if (!fib_event) goto err_fib_event_alloc; fib_event->data = data; fib_event->event = event; fib_event->family = info->family; switch (info->family) { case AF_INET: err = nsim_fib4_prepare_event(info, fib_event, event); break; case AF_INET6: err = nsim_fib6_prepare_event(info, fib_event, event); break; } if (err) goto err_fib_prepare_event; /* Enqueue the event and trigger the work */ spin_lock_bh(&data->fib_event_queue_lock); list_add_tail(&fib_event->list, &data->fib_event_queue); spin_unlock_bh(&data->fib_event_queue_lock); schedule_work(&data->fib_event_work); return NOTIFY_DONE; err_fib_prepare_event: kfree(fib_event); err_fib_event_alloc: if (event == FIB_EVENT_ENTRY_DEL) schedule_work(&data->fib_flush_work); return NOTIFY_BAD; } static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, fib_nb); struct fib_notifier_info *info = ptr; int err; switch (event) { case FIB_EVENT_RULE_ADD: case FIB_EVENT_RULE_DEL: err = nsim_fib_rule_event(data, info, event == FIB_EVENT_RULE_ADD); return notifier_from_errno(err); case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_APPEND: case FIB_EVENT_ENTRY_DEL: return nsim_fib_event_schedule_work(data, info, event); } return NOTIFY_DONE; } static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt, struct nsim_fib_data *data) { struct devlink *devlink = data->devlink; struct nsim_fib4_rt *fib4_rt; fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common); nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false); nsim_fib_account(&data->ipv4.fib, false); nsim_fib4_rt_destroy(fib4_rt); } static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt, struct nsim_fib_data *data) { struct nsim_fib6_rt *fib6_rt; fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common); nsim_fib6_rt_hw_flags_set(data, fib6_rt, false); nsim_fib_account(&data->ipv6.fib, false); nsim_fib6_rt_destroy(fib6_rt); } static void nsim_fib_rt_free(void *ptr, void *arg) { struct nsim_fib_rt *fib_rt = ptr; struct nsim_fib_data *data = arg; switch (fib_rt->key.family) { case AF_INET: nsim_fib4_rt_free(fib_rt, data); break; case AF_INET6: nsim_fib6_rt_free(fib_rt, data); break; default: WARN_ON_ONCE(1); } } /* inconsistent dump, trying again */ static void nsim_fib_dump_inconsistent(struct notifier_block *nb) { struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, fib_nb); struct nsim_fib_rt *fib_rt, *fib_rt_tmp; /* Flush the work to make sure there is no race with notifications. */ flush_work(&data->fib_event_work); /* The notifier block is still not registered, so we do not need to * take any locks here. */ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) { rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node, nsim_fib_rt_ht_params); nsim_fib_rt_free(fib_rt, data); } atomic64_set(&data->ipv4.rules.num, 0ULL); atomic64_set(&data->ipv6.rules.num, 0ULL); } static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data, struct nh_notifier_info *info) { struct nsim_nexthop *nexthop; u64 occ = 0; int i; nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL); if (!nexthop) return ERR_PTR(-ENOMEM); nexthop->id = info->id; /* Determine the number of nexthop entries the new nexthop will * occupy. */ switch (info->type) { case NH_NOTIFIER_INFO_TYPE_SINGLE: occ = 1; break; case NH_NOTIFIER_INFO_TYPE_GRP: for (i = 0; i < info->nh_grp->num_nh; i++) occ += info->nh_grp->nh_entries[i].weight; break; case NH_NOTIFIER_INFO_TYPE_RES_TABLE: occ = info->nh_res_table->num_nh_buckets; nexthop->is_resilient = true; break; default: NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type"); kfree(nexthop); return ERR_PTR(-EOPNOTSUPP); } nexthop->occ = occ; return nexthop; } static void nsim_nexthop_destroy(struct nsim_nexthop *nexthop) { kfree(nexthop); } static int nsim_nexthop_account(struct nsim_fib_data *data, u64 occ, bool add, struct netlink_ext_ack *extack) { int i, err = 0; if (add) { for (i = 0; i < occ; i++) if (!atomic64_add_unless(&data->nexthops.num, 1, data->nexthops.max)) { err = -ENOSPC; NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops"); goto err_num_decrease; } } else { if (WARN_ON(occ > atomic64_read(&data->nexthops.num))) return -EINVAL; atomic64_sub(occ, &data->nexthops.num); } return err; err_num_decrease: atomic64_sub(i, &data->nexthops.num); return err; } static void nsim_nexthop_hw_flags_set(struct net *net, const struct nsim_nexthop *nexthop, bool trap) { int i; nexthop_set_hw_flags(net, nexthop->id, false, trap); if (!nexthop->is_resilient) return; for (i = 0; i < nexthop->occ; i++) nexthop_bucket_set_hw_flags(net, nexthop->id, i, false, trap); } static int nsim_nexthop_add(struct nsim_fib_data *data, struct nsim_nexthop *nexthop, struct netlink_ext_ack *extack) { struct net *net = devlink_net(data->devlink); int err; err = nsim_nexthop_account(data, nexthop->occ, true, extack); if (err) return err; err = rhashtable_insert_fast(&data->nexthop_ht, &nexthop->ht_node, nsim_nexthop_ht_params); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to insert nexthop"); goto err_nexthop_dismiss; } nsim_nexthop_hw_flags_set(net, nexthop, true); return 0; err_nexthop_dismiss: nsim_nexthop_account(data, nexthop->occ, false, extack); return err; } static int nsim_nexthop_replace(struct nsim_fib_data *data, struct nsim_nexthop *nexthop, struct nsim_nexthop *nexthop_old, struct netlink_ext_ack *extack) { struct net *net = devlink_net(data->devlink); int err; err = nsim_nexthop_account(data, nexthop->occ, true, extack); if (err) return err; err = rhashtable_replace_fast(&data->nexthop_ht, &nexthop_old->ht_node, &nexthop->ht_node, nsim_nexthop_ht_params); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to replace nexthop"); goto err_nexthop_dismiss; } nsim_nexthop_hw_flags_set(net, nexthop, true); nsim_nexthop_account(data, nexthop_old->occ, false, extack); nsim_nexthop_destroy(nexthop_old); return 0; err_nexthop_dismiss: nsim_nexthop_account(data, nexthop->occ, false, extack); return err; } static int nsim_nexthop_insert(struct nsim_fib_data *data, struct nh_notifier_info *info) { struct nsim_nexthop *nexthop, *nexthop_old; int err; nexthop = nsim_nexthop_create(data, info); if (IS_ERR(nexthop)) return PTR_ERR(nexthop); nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id, nsim_nexthop_ht_params); if (!nexthop_old) err = nsim_nexthop_add(data, nexthop, info->extack); else err = nsim_nexthop_replace(data, nexthop, nexthop_old, info->extack); if (err) nsim_nexthop_destroy(nexthop); return err; } static void nsim_nexthop_remove(struct nsim_fib_data *data, struct nh_notifier_info *info) { struct nsim_nexthop *nexthop; nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &info->id, nsim_nexthop_ht_params); if (!nexthop) return; rhashtable_remove_fast(&data->nexthop_ht, &nexthop->ht_node, nsim_nexthop_ht_params); nsim_nexthop_account(data, nexthop->occ, false, info->extack); nsim_nexthop_destroy(nexthop); } static int nsim_nexthop_res_table_pre_replace(struct nsim_fib_data *data, struct nh_notifier_info *info) { if (data->fail_res_nexthop_group_replace) { NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace a resilient nexthop group"); return -EINVAL; } return 0; } static int nsim_nexthop_bucket_replace(struct nsim_fib_data *data, struct nh_notifier_info *info) { if (data->fail_nexthop_bucket_replace) { NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace nexthop bucket"); return -EINVAL; } nexthop_bucket_set_hw_flags(info->net, info->id, info->nh_res_bucket->bucket_index, false, true); return 0; } static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, void *ptr) { struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, nexthop_nb); struct nh_notifier_info *info = ptr; int err = 0; mutex_lock(&data->nh_lock); switch (event) { case NEXTHOP_EVENT_REPLACE: err = nsim_nexthop_insert(data, info); break; case NEXTHOP_EVENT_DEL: nsim_nexthop_remove(data, info); break; case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE: err = nsim_nexthop_res_table_pre_replace(data, info); break; case NEXTHOP_EVENT_BUCKET_REPLACE: err = nsim_nexthop_bucket_replace(data, info); break; default: break; } mutex_unlock(&data->nh_lock); return notifier_from_errno(err); } static void nsim_nexthop_free(void *ptr, void *arg) { struct nsim_nexthop *nexthop = ptr; struct nsim_fib_data *data = arg; struct net *net; net = devlink_net(data->devlink); nsim_nexthop_hw_flags_set(net, nexthop, false); nsim_nexthop_account(data, nexthop->occ, false, NULL); nsim_nexthop_destroy(nexthop); } static ssize_t nsim_nexthop_bucket_activity_write(struct file *file, const char __user *user_buf, size_t size, loff_t *ppos) { struct nsim_fib_data *data = file->private_data; struct net *net = devlink_net(data->devlink); struct nsim_nexthop *nexthop; unsigned long *activity; loff_t pos = *ppos; u16 bucket_index; char buf[128]; int err = 0; u32 nhid; if (pos != 0) return -EINVAL; if (size > sizeof(buf) - 1) return -EINVAL; if (copy_from_user(buf, user_buf, size)) return -EFAULT; buf[size] = 0; if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2) return -EINVAL; rtnl_lock(); nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &nhid, nsim_nexthop_ht_params); if (!nexthop || !nexthop->is_resilient || bucket_index >= nexthop->occ) { err = -EINVAL; goto out; } activity = bitmap_zalloc(nexthop->occ, GFP_KERNEL); if (!activity) { err = -ENOMEM; goto out; } bitmap_set(activity, bucket_index, 1); nexthop_res_grp_activity_update(net, nhid, nexthop->occ, activity); bitmap_free(activity); out: rtnl_unlock(); *ppos = size; return err ?: size; } static const struct file_operations nsim_nexthop_bucket_activity_fops = { .open = simple_open, .write = nsim_nexthop_bucket_activity_write, .owner = THIS_MODULE, }; static u64 nsim_fib_ipv4_resource_occ_get(void *priv) { struct nsim_fib_data *data = priv; return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB, false); } static u64 nsim_fib_ipv4_rules_res_occ_get(void *priv) { struct nsim_fib_data *data = priv; return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB_RULES, false); } static u64 nsim_fib_ipv6_resource_occ_get(void *priv) { struct nsim_fib_data *data = priv; return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB, false); } static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv) { struct nsim_fib_data *data = priv; return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false); } static u64 nsim_fib_nexthops_res_occ_get(void *priv) { struct nsim_fib_data *data = priv; return nsim_fib_get_val(data, NSIM_RESOURCE_NEXTHOPS, false); } static void nsim_fib_set_max_all(struct nsim_fib_data *data, struct devlink *devlink) { static const enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES, NSIM_RESOURCE_NEXTHOPS, }; int i; for (i = 0; i < ARRAY_SIZE(res_ids); i++) { int err; u64 val; err = devl_resource_size_get(devlink, res_ids[i], &val); if (err) val = (u64) -1; nsim_fib_set_max(data, res_ids[i], val); } } static void nsim_fib_event_work(struct work_struct *work) { struct nsim_fib_data *data = container_of(work, struct nsim_fib_data, fib_event_work); struct nsim_fib_event *fib_event, *next_fib_event; LIST_HEAD(fib_event_queue); spin_lock_bh(&data->fib_event_queue_lock); list_splice_init(&data->fib_event_queue, &fib_event_queue); spin_unlock_bh(&data->fib_event_queue_lock); mutex_lock(&data->fib_lock); list_for_each_entry_safe(fib_event, next_fib_event, &fib_event_queue, list) { nsim_fib_event(fib_event); list_del(&fib_event->list); kfree(fib_event); cond_resched(); } mutex_unlock(&data->fib_lock); } static void nsim_fib_flush_work(struct work_struct *work) { struct nsim_fib_data *data = container_of(work, struct nsim_fib_data, fib_flush_work); struct nsim_fib_rt *fib_rt, *fib_rt_tmp; /* Process pending work. */ flush_work(&data->fib_event_work); mutex_lock(&data->fib_lock); list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) { rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node, nsim_fib_rt_ht_params); nsim_fib_rt_free(fib_rt, data); } mutex_unlock(&data->fib_lock); } static int nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev) { data->ddir = debugfs_create_dir("fib", nsim_dev->ddir); if (IS_ERR(data->ddir)) return PTR_ERR(data->ddir); data->fail_route_offload = false; debugfs_create_bool("fail_route_offload", 0600, data->ddir, &data->fail_route_offload); data->fail_res_nexthop_group_replace = false; debugfs_create_bool("fail_res_nexthop_group_replace", 0600, data->ddir, &data->fail_res_nexthop_group_replace); data->fail_nexthop_bucket_replace = false; debugfs_create_bool("fail_nexthop_bucket_replace", 0600, data->ddir, &data->fail_nexthop_bucket_replace); debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir, data, &nsim_nexthop_bucket_activity_fops); data->fail_route_delete = false; debugfs_create_bool("fail_route_delete", 0600, data->ddir, &data->fail_route_delete); return 0; } static void nsim_fib_debugfs_exit(struct nsim_fib_data *data) { debugfs_remove_recursive(data->ddir); } struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, struct netlink_ext_ack *extack) { struct nsim_fib_data *data; struct nsim_dev *nsim_dev; int err; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); data->devlink = devlink; nsim_dev = devlink_priv(devlink); err = nsim_fib_debugfs_init(data, nsim_dev); if (err) goto err_data_free; mutex_init(&data->nh_lock); err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params); if (err) goto err_debugfs_exit; mutex_init(&data->fib_lock); INIT_LIST_HEAD(&data->fib_rt_list); err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params); if (err) goto err_rhashtable_nexthop_destroy; INIT_WORK(&data->fib_event_work, nsim_fib_event_work); INIT_WORK(&data->fib_flush_work, nsim_fib_flush_work); INIT_LIST_HEAD(&data->fib_event_queue); spin_lock_init(&data->fib_event_queue_lock); nsim_fib_set_max_all(data, devlink); data->nexthop_nb.notifier_call = nsim_nexthop_event_nb; err = register_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb, extack); if (err) { pr_err("Failed to register nexthop notifier\n"); goto err_rhashtable_fib_destroy; } data->fib_nb.notifier_call = nsim_fib_event_nb; err = register_fib_notifier(devlink_net(devlink), &data->fib_nb, nsim_fib_dump_inconsistent, extack); if (err) { pr_err("Failed to register fib notifier\n"); goto err_nexthop_nb_unregister; } devl_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB, nsim_fib_ipv4_resource_occ_get, data); devl_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB_RULES, nsim_fib_ipv4_rules_res_occ_get, data); devl_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB, nsim_fib_ipv6_resource_occ_get, data); devl_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB_RULES, nsim_fib_ipv6_rules_res_occ_get, data); devl_resource_occ_get_register(devlink, NSIM_RESOURCE_NEXTHOPS, nsim_fib_nexthops_res_occ_get, data); return data; err_nexthop_nb_unregister: unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); err_rhashtable_fib_destroy: cancel_work_sync(&data->fib_flush_work); flush_work(&data->fib_event_work); rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, data); err_rhashtable_nexthop_destroy: rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free, data); mutex_destroy(&data->fib_lock); err_debugfs_exit: mutex_destroy(&data->nh_lock); nsim_fib_debugfs_exit(data); err_data_free: kfree(data); return ERR_PTR(err); } void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) { devl_resource_occ_get_unregister(devlink, NSIM_RESOURCE_NEXTHOPS); devl_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV6_FIB_RULES); devl_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV6_FIB); devl_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV4_FIB_RULES); devl_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV4_FIB); unregister_fib_notifier(devlink_net(devlink), &data->fib_nb); unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); cancel_work_sync(&data->fib_flush_work); flush_work(&data->fib_event_work); rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, data); rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free, data); WARN_ON_ONCE(!list_empty(&data->fib_event_queue)); WARN_ON_ONCE(!list_empty(&data->fib_rt_list)); mutex_destroy(&data->fib_lock); mutex_destroy(&data->nh_lock); nsim_fib_debugfs_exit(data); kfree(data); }
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 // SPDX-License-Identifier: GPL-2.0-only /* * net/sunrpc/cache.c * * Generic code for various authentication-related caches * used by sunrpc clients and servers. * * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> */ #include <linux/types.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/string_helpers.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/net.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/pagemap.h> #include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <trace/events/sunrpc.h> #include "netns.h" #include "fail.h" #define RPCDBG_FACILITY RPCDBG_CACHE static bool cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); static void cache_init(struct cache_head *h, struct cache_detail *detail) { time64_t now = seconds_since_boot(); INIT_HLIST_NODE(&h->cache_list); h->flags = 0; kref_init(&h->ref); h->expiry_time = now + CACHE_NEW_EXPIRY; if (now <= detail->flush_time) /* ensure it isn't already expired */ now = detail->flush_time + 1; h->last_refresh = now; } static void cache_fresh_unlocked(struct cache_head *head, struct cache_detail *detail); static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail, struct cache_head *key, int hash) { struct hlist_head *head = &detail->hash_table[hash]; struct cache_head *tmp; rcu_read_lock(); hlist_for_each_entry_rcu(tmp, head, cache_list) { if (!detail->match(tmp, key)) continue; if (test_bit(CACHE_VALID, &tmp->flags) && cache_is_expired(detail, tmp)) continue; tmp = cache_get_rcu(tmp); rcu_read_unlock(); return tmp; } rcu_read_unlock(); return NULL; } static void sunrpc_begin_cache_remove_entry(struct cache_head *ch, struct cache_detail *cd) { /* Must be called under cd->hash_lock */ hlist_del_init_rcu(&ch->cache_list); set_bit(CACHE_CLEANED, &ch->flags); cd->entries --; } static void sunrpc_end_cache_remove_entry(struct cache_head *ch, struct cache_detail *cd) { cache_fresh_unlocked(ch, cd); cache_put(ch, cd); } static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail, struct cache_head *key, int hash) { struct cache_head *new, *tmp, *freeme = NULL; struct hlist_head *head = &detail->hash_table[hash]; new = detail->alloc(); if (!new) return NULL; /* must fully initialise 'new', else * we might get lose if we need to * cache_put it soon. */ cache_init(new, detail); detail->init(new, key); spin_lock(&detail->hash_lock); /* check if entry appeared while we slept */ hlist_for_each_entry_rcu(tmp, head, cache_list, lockdep_is_held(&detail->hash_lock)) { if (!detail->match(tmp, key)) continue; if (test_bit(CACHE_VALID, &tmp->flags) && cache_is_expired(detail, tmp)) { sunrpc_begin_cache_remove_entry(tmp, detail); trace_cache_entry_expired(detail, tmp); freeme = tmp; break; } cache_get(tmp); spin_unlock(&detail->hash_lock); cache_put(new, detail); return tmp; } hlist_add_head_rcu(&new->cache_list, head); detail->entries++; if (detail->nextcheck > new->expiry_time) detail->nextcheck = new->expiry_time + 1; cache_get(new); spin_unlock(&detail->hash_lock); if (freeme) sunrpc_end_cache_remove_entry(freeme, detail); return new; } struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail, struct cache_head *key, int hash) { struct cache_head *ret; ret = sunrpc_cache_find_rcu(detail, key, hash); if (ret) return ret; /* Didn't find anything, insert an empty entry */ return sunrpc_cache_add_entry(detail, key, hash); } EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu); static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); static void cache_fresh_locked(struct cache_head *head, time64_t expiry, struct cache_detail *detail) { time64_t now = seconds_since_boot(); if (now <= detail->flush_time) /* ensure it isn't immediately treated as expired */ now = detail->flush_time + 1; head->expiry_time = expiry; head->last_refresh = now; smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ set_bit(CACHE_VALID, &head->flags); } static void cache_fresh_unlocked(struct cache_head *head, struct cache_detail *detail) { if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { cache_revisit_request(head); cache_dequeue(detail, head); } } static void cache_make_negative(struct cache_detail *detail, struct cache_head *h) { set_bit(CACHE_NEGATIVE, &h->flags); trace_cache_entry_make_negative(detail, h); } static void cache_entry_update(struct cache_detail *detail, struct cache_head *h, struct cache_head *new) { if (!test_bit(CACHE_NEGATIVE, &new->flags)) { detail->update(h, new); trace_cache_entry_update(detail, h); } else { cache_make_negative(detail, h); } } struct cache_head *sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash) { /* The 'old' entry is to be replaced by 'new'. * If 'old' is not VALID, we update it directly, * otherwise we need to replace it */ struct cache_head *tmp; if (!test_bit(CACHE_VALID, &old->flags)) { spin_lock(&detail->hash_lock); if (!test_bit(CACHE_VALID, &old->flags)) { cache_entry_update(detail, old, new); cache_fresh_locked(old, new->expiry_time, detail); spin_unlock(&detail->hash_lock); cache_fresh_unlocked(old, detail); return old; } spin_unlock(&detail->hash_lock); } /* We need to insert a new entry */ tmp = detail->alloc(); if (!tmp) { cache_put(old, detail); return NULL; } cache_init(tmp, detail); detail->init(tmp, old); spin_lock(&detail->hash_lock); cache_entry_update(detail, tmp, new); hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); detail->entries++; cache_get(tmp); cache_fresh_locked(tmp, new->expiry_time, detail); cache_fresh_locked(old, 0, detail); spin_unlock(&detail->hash_lock); cache_fresh_unlocked(tmp, detail); cache_fresh_unlocked(old, detail); cache_put(old, detail); return tmp; } EXPORT_SYMBOL_GPL(sunrpc_cache_update); static inline int cache_is_valid(struct cache_head *h) { if (!test_bit(CACHE_VALID, &h->flags)) return -EAGAIN; else { /* entry is valid */ if (test_bit(CACHE_NEGATIVE, &h->flags)) return -ENOENT; else { /* * In combination with write barrier in * sunrpc_cache_update, ensures that anyone * using the cache entry after this sees the * updated contents: */ smp_rmb(); return 0; } } } static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) { int rv; spin_lock(&detail->hash_lock); rv = cache_is_valid(h); if (rv == -EAGAIN) { cache_make_negative(detail, h); cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY, detail); rv = -ENOENT; } spin_unlock(&detail->hash_lock); cache_fresh_unlocked(h, detail); return rv; } int cache_check_rcu(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp) { int rv; time64_t refresh_age, age; /* First decide return status as best we can */ rv = cache_is_valid(h); /* now see if we want to start an upcall */ refresh_age = (h->expiry_time - h->last_refresh); age = seconds_since_boot() - h->last_refresh; if (rqstp == NULL) { if (rv == -EAGAIN) rv = -ENOENT; } else if (rv == -EAGAIN || (h->expiry_time != 0 && age > refresh_age/2)) { dprintk("RPC: Want update, refage=%lld, age=%lld\n", refresh_age, age); switch (detail->cache_upcall(detail, h)) { case -EINVAL: rv = try_to_negate_entry(detail, h); break; case -EAGAIN: cache_fresh_unlocked(h, detail); break; } } if (rv == -EAGAIN) { if (!cache_defer_req(rqstp, h)) { /* * Request was not deferred; handle it as best * we can ourselves: */ rv = cache_is_valid(h); if (rv == -EAGAIN) rv = -ETIMEDOUT; } } return rv; } EXPORT_SYMBOL_GPL(cache_check_rcu); /* * This is the generic cache management routine for all * the authentication caches. * It checks the currency of a cache item and will (later) * initiate an upcall to fill it if needed. * * * Returns 0 if the cache_head can be used, or cache_puts it and returns * -EAGAIN if upcall is pending and request has been queued * -ETIMEDOUT if upcall failed or request could not be queue or * upcall completed but item is still invalid (implying that * the cache item has been replaced with a newer one). * -ENOENT if cache entry was negative */ int cache_check(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp) { int rv; rv = cache_check_rcu(detail, h, rqstp); if (rv) cache_put(h, detail); return rv; } EXPORT_SYMBOL_GPL(cache_check); /* * caches need to be periodically cleaned. * For this we maintain a list of cache_detail and * a current pointer into that list and into the table * for that entry. * * Each time cache_clean is called it finds the next non-empty entry * in the current table and walks the list in that entry * looking for entries that can be removed. * * An entry gets removed if: * - The expiry is before current time * - The last_refresh time is before the flush_time for that cache * * later we might drop old entries with non-NEVER expiry if that table * is getting 'full' for some definition of 'full' * * The question of "how often to scan a table" is an interesting one * and is answered in part by the use of the "nextcheck" field in the * cache_detail. * When a scan of a table begins, the nextcheck field is set to a time * that is well into the future. * While scanning, if an expiry time is found that is earlier than the * current nextcheck time, nextcheck is set to that expiry time. * If the flush_time is ever set to a time earlier than the nextcheck * time, the nextcheck time is then set to that flush_time. * * A table is then only scanned if the current time is at least * the nextcheck time. * */ static LIST_HEAD(cache_list); static DEFINE_SPINLOCK(cache_list_lock); static struct cache_detail *current_detail; static int current_index; static void do_cache_clean(struct work_struct *work); static struct delayed_work cache_cleaner; void sunrpc_init_cache_detail(struct cache_detail *cd) { spin_lock_init(&cd->hash_lock); INIT_LIST_HEAD(&cd->queue); spin_lock(&cache_list_lock); cd->nextcheck = 0; cd->entries = 0; atomic_set(&cd->writers, 0); cd->last_close = 0; cd->last_warn = -1; list_add(&cd->others, &cache_list); spin_unlock(&cache_list_lock); /* start the cleaning process */ queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0); } EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); void sunrpc_destroy_cache_detail(struct cache_detail *cd) { cache_purge(cd); spin_lock(&cache_list_lock); spin_lock(&cd->hash_lock); if (current_detail == cd) current_detail = NULL; list_del_init(&cd->others); spin_unlock(&cd->hash_lock); spin_unlock(&cache_list_lock); if (list_empty(&cache_list)) { /* module must be being unloaded so its safe to kill the worker */ cancel_delayed_work_sync(&cache_cleaner); } } EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); /* clean cache tries to find something to clean * and cleans it. * It returns 1 if it cleaned something, * 0 if it didn't find anything this time * -1 if it fell off the end of the list. */ static int cache_clean(void) { int rv = 0; struct list_head *next; spin_lock(&cache_list_lock); /* find a suitable table if we don't already have one */ while (current_detail == NULL || current_index >= current_detail->hash_size) { if (current_detail) next = current_detail->others.next; else next = cache_list.next; if (next == &cache_list) { current_detail = NULL; spin_unlock(&cache_list_lock); return -1; } current_detail = list_entry(next, struct cache_detail, others); if (current_detail->nextcheck > seconds_since_boot()) current_index = current_detail->hash_size; else { current_index = 0; current_detail->nextcheck = seconds_since_boot()+30*60; } } spin_lock(&current_detail->hash_lock); /* find a non-empty bucket in the table */ while (current_index < current_detail->hash_size && hlist_empty(&current_detail->hash_table[current_index])) current_index++; /* find a cleanable entry in the bucket and clean it, or set to next bucket */ if (current_index < current_detail->hash_size) { struct cache_head *ch = NULL; struct cache_detail *d; struct hlist_head *head; struct hlist_node *tmp; /* Ok, now to clean this strand */ head = &current_detail->hash_table[current_index]; hlist_for_each_entry_safe(ch, tmp, head, cache_list) { if (current_detail->nextcheck > ch->expiry_time) current_detail->nextcheck = ch->expiry_time+1; if (!cache_is_expired(current_detail, ch)) continue; sunrpc_begin_cache_remove_entry(ch, current_detail); trace_cache_entry_expired(current_detail, ch); rv = 1; break; } spin_unlock(&current_detail->hash_lock); d = current_detail; if (!ch) current_index ++; spin_unlock(&cache_list_lock); if (ch) sunrpc_end_cache_remove_entry(ch, d); } else { spin_unlock(&current_detail->hash_lock); spin_unlock(&cache_list_lock); } return rv; } /* * We want to regularly clean the cache, so we need to schedule some work ... */ static void do_cache_clean(struct work_struct *work) { int delay; if (list_empty(&cache_list)) return; if (cache_clean() == -1) delay = round_jiffies_relative(30*HZ); else delay = 5; queue_delayed_work(system_power_efficient_wq, &cache_cleaner, delay); } /* * Clean all caches promptly. This just calls cache_clean * repeatedly until we are sure that every cache has had a chance to * be fully cleaned */ void cache_flush(void) { while (cache_clean() != -1) cond_resched(); while (cache_clean() != -1) cond_resched(); } EXPORT_SYMBOL_GPL(cache_flush); void cache_purge(struct cache_detail *detail) { struct cache_head *ch = NULL; struct hlist_head *head = NULL; int i = 0; spin_lock(&detail->hash_lock); if (!detail->entries) { spin_unlock(&detail->hash_lock); return; } dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); for (i = 0; i < detail->hash_size; i++) { head = &detail->hash_table[i]; while (!hlist_empty(head)) { ch = hlist_entry(head->first, struct cache_head, cache_list); sunrpc_begin_cache_remove_entry(ch, detail); spin_unlock(&detail->hash_lock); sunrpc_end_cache_remove_entry(ch, detail); spin_lock(&detail->hash_lock); } } spin_unlock(&detail->hash_lock); } EXPORT_SYMBOL_GPL(cache_purge); /* * Deferral and Revisiting of Requests. * * If a cache lookup finds a pending entry, we * need to defer the request and revisit it later. * All deferred requests are stored in a hash table, * indexed by "struct cache_head *". * As it may be wasteful to store a whole request * structure, we allow the request to provide a * deferred form, which must contain a * 'struct cache_deferred_req' * This cache_deferred_req contains a method to allow * it to be revisited when cache info is available */ #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) #define DFR_MAX 300 /* ??? */ static DEFINE_SPINLOCK(cache_defer_lock); static LIST_HEAD(cache_defer_list); static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; static int cache_defer_cnt; static void __unhash_deferred_req(struct cache_deferred_req *dreq) { hlist_del_init(&dreq->hash); if (!list_empty(&dreq->recent)) { list_del_init(&dreq->recent); cache_defer_cnt--; } } static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) { int hash = DFR_HASH(item); INIT_LIST_HEAD(&dreq->recent); hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); } static void setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item, int count_me) { dreq->item = item; spin_lock(&cache_defer_lock); __hash_deferred_req(dreq, item); if (count_me) { cache_defer_cnt++; list_add(&dreq->recent, &cache_defer_list); } spin_unlock(&cache_defer_lock); } struct thread_deferred_req { struct cache_deferred_req handle; struct completion completion; }; static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) { struct thread_deferred_req *dr = container_of(dreq, struct thread_deferred_req, handle); complete(&dr->completion); } static void cache_wait_req(struct cache_req *req, struct cache_head *item) { struct thread_deferred_req sleeper; struct cache_deferred_req *dreq = &sleeper.handle; sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); dreq->revisit = cache_restart_thread; setup_deferral(dreq, item, 0); if (!test_bit(CACHE_PENDING, &item->flags) || wait_for_completion_interruptible_timeout( &sleeper.completion, req->thread_wait) <= 0) { /* The completion wasn't completed, so we need * to clean up */ spin_lock(&cache_defer_lock); if (!hlist_unhashed(&sleeper.handle.hash)) { __unhash_deferred_req(&sleeper.handle); spin_unlock(&cache_defer_lock); } else { /* cache_revisit_request already removed * this from the hash table, but hasn't * called ->revisit yet. It will very soon * and we need to wait for it. */ spin_unlock(&cache_defer_lock); wait_for_completion(&sleeper.completion); } } } static void cache_limit_defers(void) { /* Make sure we haven't exceed the limit of allowed deferred * requests. */ struct cache_deferred_req *discard = NULL; if (cache_defer_cnt <= DFR_MAX) return; spin_lock(&cache_defer_lock); /* Consider removing either the first or the last */ if (cache_defer_cnt > DFR_MAX) { if (get_random_u32_below(2)) discard = list_entry(cache_defer_list.next, struct cache_deferred_req, recent); else discard = list_entry(cache_defer_list.prev, struct cache_deferred_req, recent); __unhash_deferred_req(discard); } spin_unlock(&cache_defer_lock); if (discard) discard->revisit(discard, 1); } #if IS_ENABLED(CONFIG_FAIL_SUNRPC) static inline bool cache_defer_immediately(void) { return !fail_sunrpc.ignore_cache_wait && should_fail(&fail_sunrpc.attr, 1); } #else static inline bool cache_defer_immediately(void) { return false; } #endif /* Return true if and only if a deferred request is queued. */ static bool cache_defer_req(struct cache_req *req, struct cache_head *item) { struct cache_deferred_req *dreq; if (!cache_defer_immediately()) { cache_wait_req(req, item); if (!test_bit(CACHE_PENDING, &item->flags)) return false; } dreq = req->defer(req); if (dreq == NULL) return false; setup_deferral(dreq, item, 1); if (!test_bit(CACHE_PENDING, &item->flags)) /* Bit could have been cleared before we managed to * set up the deferral, so need to revisit just in case */ cache_revisit_request(item); cache_limit_defers(); return true; } static void cache_revisit_request(struct cache_head *item) { struct cache_deferred_req *dreq; struct hlist_node *tmp; int hash = DFR_HASH(item); LIST_HEAD(pending); spin_lock(&cache_defer_lock); hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) if (dreq->item == item) { __unhash_deferred_req(dreq); list_add(&dreq->recent, &pending); } spin_unlock(&cache_defer_lock); while (!list_empty(&pending)) { dreq = list_entry(pending.next, struct cache_deferred_req, recent); list_del_init(&dreq->recent); dreq->revisit(dreq, 0); } } void cache_clean_deferred(void *owner) { struct cache_deferred_req *dreq, *tmp; LIST_HEAD(pending); spin_lock(&cache_defer_lock); list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { if (dreq->owner == owner) { __unhash_deferred_req(dreq); list_add(&dreq->recent, &pending); } } spin_unlock(&cache_defer_lock); while (!list_empty(&pending)) { dreq = list_entry(pending.next, struct cache_deferred_req, recent); list_del_init(&dreq->recent); dreq->revisit(dreq, 1); } } /* * communicate with user-space * * We have a magic /proc file - /proc/net/rpc/<cachename>/channel. * On read, you get a full request, or block. * On write, an update request is processed. * Poll works if anything to read, and always allows write. * * Implemented by linked list of requests. Each open file has * a ->private that also exists in this list. New requests are added * to the end and may wakeup and preceding readers. * New readers are added to the head. If, on read, an item is found with * CACHE_UPCALLING clear, we free it from the list. * */ static DEFINE_SPINLOCK(queue_lock); struct cache_queue { struct list_head list; int reader; /* if 0, then request */ }; struct cache_request { struct cache_queue q; struct cache_head *item; char * buf; int len; int readers; }; struct cache_reader { struct cache_queue q; int offset; /* if non-0, we have a refcnt on next request */ }; static int cache_request(struct cache_detail *detail, struct cache_request *crq) { char *bp = crq->buf; int len = PAGE_SIZE; detail->cache_request(detail, crq->item, &bp, &len); if (len < 0) return -E2BIG; return PAGE_SIZE - len; } static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { struct cache_reader *rp = filp->private_data; struct cache_request *rq; struct inode *inode = file_inode(filp); int err; if (count == 0) return 0; inode_lock(inode); /* protect against multiple concurrent * readers on this file */ again: spin_lock(&queue_lock); /* need to find next request */ while (rp->q.list.next != &cd->queue && list_entry(rp->q.list.next, struct cache_queue, list) ->reader) { struct list_head *next = rp->q.list.next; list_move(&rp->q.list, next); } if (rp->q.list.next == &cd->queue) { spin_unlock(&queue_lock); inode_unlock(inode); WARN_ON_ONCE(rp->offset); return 0; } rq = container_of(rp->q.list.next, struct cache_request, q.list); WARN_ON_ONCE(rq->q.reader); if (rp->offset == 0) rq->readers++; spin_unlock(&queue_lock); if (rq->len == 0) { err = cache_request(cd, rq); if (err < 0) goto out; rq->len = err; } if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { err = -EAGAIN; spin_lock(&queue_lock); list_move(&rp->q.list, &rq->q.list); spin_unlock(&queue_lock); } else { if (rp->offset + count > rq->len) count = rq->len - rp->offset; err = -EFAULT; if (copy_to_user(buf, rq->buf + rp->offset, count)) goto out; rp->offset += count; if (rp->offset >= rq->len) { rp->offset = 0; spin_lock(&queue_lock); list_move(&rp->q.list, &rq->q.list); spin_unlock(&queue_lock); } err = 0; } out: if (rp->offset == 0) { /* need to release rq */ spin_lock(&queue_lock); rq->readers--; if (rq->readers == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { list_del(&rq->q.list); spin_unlock(&queue_lock); cache_put(rq->item, cd); kfree(rq->buf); kfree(rq); } else spin_unlock(&queue_lock); } if (err == -EAGAIN) goto again; inode_unlock(inode); return err ? err : count; } static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, size_t count, struct cache_detail *cd) { ssize_t ret; if (count == 0) return -EINVAL; if (copy_from_user(kaddr, buf, count)) return -EFAULT; kaddr[count] = '\0'; ret = cd->cache_parse(cd, kaddr, count); if (!ret) ret = count; return ret; } static ssize_t cache_downcall(struct address_space *mapping, const char __user *buf, size_t count, struct cache_detail *cd) { char *write_buf; ssize_t ret = -ENOMEM; if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */ ret = -EINVAL; goto out; } write_buf = kvmalloc(count + 1, GFP_KERNEL); if (!write_buf) goto out; ret = cache_do_downcall(write_buf, buf, count, cd); kvfree(write_buf); out: return ret; } static ssize_t cache_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { struct address_space *mapping = filp->f_mapping; struct inode *inode = file_inode(filp); ssize_t ret = -EINVAL; if (!cd->cache_parse) goto out; inode_lock(inode); ret = cache_downcall(mapping, buf, count, cd); inode_unlock(inode); out: return ret; } static DECLARE_WAIT_QUEUE_HEAD(queue_wait); static __poll_t cache_poll(struct file *filp, poll_table *wait, struct cache_detail *cd) { __poll_t mask; struct cache_reader *rp = filp->private_data; struct cache_queue *cq; poll_wait(filp, &queue_wait, wait); /* alway allow write */ mask = EPOLLOUT | EPOLLWRNORM; if (!rp) return mask; spin_lock(&queue_lock); for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { mask |= EPOLLIN | EPOLLRDNORM; break; } spin_unlock(&queue_lock); return mask; } static int cache_ioctl(struct inode *ino, struct file *filp, unsigned int cmd, unsigned long arg, struct cache_detail *cd) { int len = 0; struct cache_reader *rp = filp->private_data; struct cache_queue *cq; if (cmd != FIONREAD || !rp) return -EINVAL; spin_lock(&queue_lock); /* only find the length remaining in current request, * or the length of the next request */ for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { struct cache_request *cr = container_of(cq, struct cache_request, q); len = cr->len - rp->offset; break; } spin_unlock(&queue_lock); return put_user(len, (int __user *)arg); } static int cache_open(struct inode *inode, struct file *filp, struct cache_detail *cd) { struct cache_reader *rp = NULL; if (!cd || !try_module_get(cd->owner)) return -EACCES; nonseekable_open(inode, filp); if (filp->f_mode & FMODE_READ) { rp = kmalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { module_put(cd->owner); return -ENOMEM; } rp->offset = 0; rp->q.reader = 1; spin_lock(&queue_lock); list_add(&rp->q.list, &cd->queue); spin_unlock(&queue_lock); } if (filp->f_mode & FMODE_WRITE) atomic_inc(&cd->writers); filp->private_data = rp; return 0; } static int cache_release(struct inode *inode, struct file *filp, struct cache_detail *cd) { struct cache_reader *rp = filp->private_data; if (rp) { spin_lock(&queue_lock); if (rp->offset) { struct cache_queue *cq; for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { container_of(cq, struct cache_request, q) ->readers--; break; } rp->offset = 0; } list_del(&rp->q.list); spin_unlock(&queue_lock); filp->private_data = NULL; kfree(rp); } if (filp->f_mode & FMODE_WRITE) { atomic_dec(&cd->writers); cd->last_close = seconds_since_boot(); } module_put(cd->owner); return 0; } static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) { struct cache_queue *cq, *tmp; struct cache_request *cr; LIST_HEAD(dequeued); spin_lock(&queue_lock); list_for_each_entry_safe(cq, tmp, &detail->queue, list) if (!cq->reader) { cr = container_of(cq, struct cache_request, q); if (cr->item != ch) continue; if (test_bit(CACHE_PENDING, &ch->flags)) /* Lost a race and it is pending again */ break; if (cr->readers != 0) continue; list_move(&cr->q.list, &dequeued); } spin_unlock(&queue_lock); while (!list_empty(&dequeued)) { cr = list_entry(dequeued.next, struct cache_request, q.list); list_del(&cr->q.list); cache_put(cr->item, detail); kfree(cr->buf); kfree(cr); } } /* * Support routines for text-based upcalls. * Fields are separated by spaces. * Fields are either mangled to quote space tab newline slosh with slosh * or a hexified with a leading \x * Record is terminated with newline. * */ void qword_add(char **bpp, int *lp, char *str) { char *bp = *bpp; int len = *lp; int ret; if (len < 0) return; ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t"); if (ret >= len) { bp += len; len = -1; } else { bp += ret; len -= ret; *bp++ = ' '; len--; } *bpp = bp; *lp = len; } EXPORT_SYMBOL_GPL(qword_add); void qword_addhex(char **bpp, int *lp, char *buf, int blen) { char *bp = *bpp; int len = *lp; if (len < 0) return; if (len > 2) { *bp++ = '\\'; *bp++ = 'x'; len -= 2; while (blen && len >= 2) { bp = hex_byte_pack(bp, *buf++); len -= 2; blen--; } } if (blen || len<1) len = -1; else { *bp++ = ' '; len--; } *bpp = bp; *lp = len; } EXPORT_SYMBOL_GPL(qword_addhex); static void warn_no_listener(struct cache_detail *detail) { if (detail->last_warn != detail->last_close) { detail->last_warn = detail->last_close; if (detail->warn_no_listener) detail->warn_no_listener(detail, detail->last_close != 0); } } static bool cache_listeners_exist(struct cache_detail *detail) { if (atomic_read(&detail->writers)) return true; if (detail->last_close == 0) /* This cache was never opened */ return false; if (detail->last_close < seconds_since_boot() - 30) /* * We allow for the possibility that someone might * restart a userspace daemon without restarting the * server; but after 30 seconds, we give up. */ return false; return true; } /* * register an upcall request to user-space and queue it up for read() by the * upcall daemon. * * Each request is at most one page long. */ static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) { char *buf; struct cache_request *crq; int ret = 0; if (test_bit(CACHE_CLEANED, &h->flags)) /* Too late to make an upcall */ return -EAGAIN; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -EAGAIN; crq = kmalloc(sizeof (*crq), GFP_KERNEL); if (!crq) { kfree(buf); return -EAGAIN; } crq->q.reader = 0; crq->buf = buf; crq->len = 0; crq->readers = 0; spin_lock(&queue_lock); if (test_bit(CACHE_PENDING, &h->flags)) { crq->item = cache_get(h); list_add_tail(&crq->q.list, &detail->queue); trace_cache_entry_upcall(detail, h); } else /* Lost a race, no longer PENDING, so don't enqueue */ ret = -EAGAIN; spin_unlock(&queue_lock); wake_up(&queue_wait); if (ret == -EAGAIN) { kfree(buf); kfree(crq); } return ret; } int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h) { if (test_and_set_bit(CACHE_PENDING, &h->flags)) return 0; return cache_pipe_upcall(detail, h); } EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail, struct cache_head *h) { if (!cache_listeners_exist(detail)) { warn_no_listener(detail); trace_cache_entry_no_listener(detail, h); return -EINVAL; } return sunrpc_cache_pipe_upcall(detail, h); } EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout); /* * parse a message from user-space and pass it * to an appropriate cache * Messages are, like requests, separated into fields by * spaces and dequotes as \xHEXSTRING or embedded \nnn octal * * Message is * reply cachename expiry key ... content.... * * key and content are both parsed by cache */ int qword_get(char **bpp, char *dest, int bufsize) { /* return bytes copied, or -1 on error */ char *bp = *bpp; int len = 0; while (*bp == ' ') bp++; if (bp[0] == '\\' && bp[1] == 'x') { /* HEX STRING */ bp += 2; while (len < bufsize - 1) { int h, l; h = hex_to_bin(bp[0]); if (h < 0) break; l = hex_to_bin(bp[1]); if (l < 0) break; *dest++ = (h << 4) | l; bp += 2; len++; } } else { /* text with \nnn octal quoting */ while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { if (*bp == '\\' && isodigit(bp[1]) && (bp[1] <= '3') && isodigit(bp[2]) && isodigit(bp[3])) { int byte = (*++bp -'0'); bp++; byte = (byte << 3) | (*bp++ - '0'); byte = (byte << 3) | (*bp++ - '0'); *dest++ = byte; len++; } else { *dest++ = *bp++; len++; } } } if (*bp != ' ' && *bp != '\n' && *bp != '\0') return -1; while (*bp == ' ') bp++; *bpp = bp; *dest = '\0'; return len; } EXPORT_SYMBOL_GPL(qword_get); /* * support /proc/net/rpc/$CACHENAME/content * as a seqfile. * We call ->cache_show passing NULL for the item to * get a header, then pass each real item in the cache */ static void *__cache_seq_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; unsigned int hash, entry; struct cache_head *ch; struct cache_detail *cd = m->private; if (!n--) return SEQ_START_TOKEN; hash = n >> 32; entry = n & ((1LL<<32) - 1); hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list) if (!entry--) return ch; n &= ~((1LL<<32) - 1); do { hash++; n += 1LL<<32; } while(hash < cd->hash_size && hlist_empty(&cd->hash_table[hash])); if (hash >= cd->hash_size) return NULL; *pos = n+1; return hlist_entry_safe(rcu_dereference_raw( hlist_first_rcu(&cd->hash_table[hash])), struct cache_head, cache_list); } static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos) { struct cache_head *ch = p; int hash = (*pos >> 32); struct cache_detail *cd = m->private; if (p == SEQ_START_TOKEN) hash = 0; else if (ch->cache_list.next == NULL) { hash++; *pos += 1LL<<32; } else { ++*pos; return hlist_entry_safe(rcu_dereference_raw( hlist_next_rcu(&ch->cache_list)), struct cache_head, cache_list); } *pos &= ~((1LL<<32) - 1); while (hash < cd->hash_size && hlist_empty(&cd->hash_table[hash])) { hash++; *pos += 1LL<<32; } if (hash >= cd->hash_size) return NULL; ++*pos; return hlist_entry_safe(rcu_dereference_raw( hlist_first_rcu(&cd->hash_table[hash])), struct cache_head, cache_list); } void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return __cache_seq_start(m, pos); } EXPORT_SYMBOL_GPL(cache_seq_start_rcu); void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos) { return cache_seq_next(file, p, pos); } EXPORT_SYMBOL_GPL(cache_seq_next_rcu); void cache_seq_stop_rcu(struct seq_file *m, void *p) __releases(RCU) { rcu_read_unlock(); } EXPORT_SYMBOL_GPL(cache_seq_stop_rcu); static int c_show(struct seq_file *m, void *p) { struct cache_head *cp = p; struct cache_detail *cd = m->private; if (p == SEQ_START_TOKEN) return cd->cache_show(m, cd, NULL); ifdebug(CACHE) seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n", convert_to_wallclock(cp->expiry_time), kref_read(&cp->ref), cp->flags); if (cache_check_rcu(cd, cp, NULL)) seq_puts(m, "# "); else if (cache_is_expired(cd, cp)) seq_puts(m, "# "); return cd->cache_show(m, cd, cp); } static const struct seq_operations cache_content_op = { .start = cache_seq_start_rcu, .next = cache_seq_next_rcu, .stop = cache_seq_stop_rcu, .show = c_show, }; static int content_open(struct inode *inode, struct file *file, struct cache_detail *cd) { struct seq_file *seq; int err; if (!cd || !try_module_get(cd->owner)) return -EACCES; err = seq_open(file, &cache_content_op); if (err) { module_put(cd->owner); return err; } seq = file->private_data; seq->private = cd; return 0; } static int content_release(struct inode *inode, struct file *file, struct cache_detail *cd) { int ret = seq_release(inode, file); module_put(cd->owner); return ret; } static int open_flush(struct inode *inode, struct file *file, struct cache_detail *cd) { if (!cd || !try_module_get(cd->owner)) return -EACCES; return nonseekable_open(inode, file); } static int release_flush(struct inode *inode, struct file *file, struct cache_detail *cd) { module_put(cd->owner); return 0; } static ssize_t read_flush(struct file *file, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { char tbuf[22]; size_t len; len = snprintf(tbuf, sizeof(tbuf), "%llu\n", convert_to_wallclock(cd->flush_time)); return simple_read_from_buffer(buf, count, ppos, tbuf, len); } static ssize_t write_flush(struct file *file, const char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { char tbuf[20]; char *ep; time64_t now; if (*ppos || count > sizeof(tbuf)-1) return -EINVAL; if (copy_from_user(tbuf, buf, count)) return -EFAULT; tbuf[count] = 0; simple_strtoul(tbuf, &ep, 0); if (*ep && *ep != '\n') return -EINVAL; /* Note that while we check that 'buf' holds a valid number, * we always ignore the value and just flush everything. * Making use of the number leads to races. */ now = seconds_since_boot(); /* Always flush everything, so behave like cache_purge() * Do this by advancing flush_time to the current time, * or by one second if it has already reached the current time. * Newly added cache entries will always have ->last_refresh greater * that ->flush_time, so they don't get flushed prematurely. */ if (cd->flush_time >= now) now = cd->flush_time + 1; cd->flush_time = now; cd->nextcheck = now; cache_flush(); if (cd->flush) cd->flush(); *ppos += count; return count; } static ssize_t cache_read_procfs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = pde_data(file_inode(filp)); return cache_read(filp, buf, count, ppos, cd); } static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = pde_data(file_inode(filp)); return cache_write(filp, buf, count, ppos, cd); } static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait) { struct cache_detail *cd = pde_data(file_inode(filp)); return cache_poll(filp, wait, cd); } static long cache_ioctl_procfs(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct cache_detail *cd = pde_data(inode); return cache_ioctl(inode, filp, cmd, arg, cd); } static int cache_open_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = pde_data(inode); return cache_open(inode, filp, cd); } static int cache_release_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = pde_data(inode); return cache_release(inode, filp, cd); } static const struct proc_ops cache_channel_proc_ops = { .proc_read = cache_read_procfs, .proc_write = cache_write_procfs, .proc_poll = cache_poll_procfs, .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */ .proc_open = cache_open_procfs, .proc_release = cache_release_procfs, }; static int content_open_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = pde_data(inode); return content_open(inode, filp, cd); } static int content_release_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = pde_data(inode); return content_release(inode, filp, cd); } static const struct proc_ops content_proc_ops = { .proc_open = content_open_procfs, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = content_release_procfs, }; static int open_flush_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = pde_data(inode); return open_flush(inode, filp, cd); } static int release_flush_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = pde_data(inode); return release_flush(inode, filp, cd); } static ssize_t read_flush_procfs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = pde_data(file_inode(filp)); return read_flush(filp, buf, count, ppos, cd); } static ssize_t write_flush_procfs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = pde_data(file_inode(filp)); return write_flush(filp, buf, count, ppos, cd); } static const struct proc_ops cache_flush_proc_ops = { .proc_open = open_flush_procfs, .proc_read = read_flush_procfs, .proc_write = write_flush_procfs, .proc_release = release_flush_procfs, }; static void remove_cache_proc_entries(struct cache_detail *cd) { if (cd->procfs) { proc_remove(cd->procfs); cd->procfs = NULL; } } static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) { struct proc_dir_entry *p; struct sunrpc_net *sn; if (!IS_ENABLED(CONFIG_PROC_FS)) return 0; sn = net_generic(net, sunrpc_net_id); cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); if (cd->procfs == NULL) goto out_nomem; p = proc_create_data("flush", S_IFREG | 0600, cd->procfs, &cache_flush_proc_ops, cd); if (p == NULL) goto out_nomem; if (cd->cache_request || cd->cache_parse) { p = proc_create_data("channel", S_IFREG | 0600, cd->procfs, &cache_channel_proc_ops, cd); if (p == NULL) goto out_nomem; } if (cd->cache_show) { p = proc_create_data("content", S_IFREG | 0400, cd->procfs, &content_proc_ops, cd); if (p == NULL) goto out_nomem; } return 0; out_nomem: remove_cache_proc_entries(cd); return -ENOMEM; } void __init cache_initialize(void) { INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean); } int cache_register_net(struct cache_detail *cd, struct net *net) { int ret; sunrpc_init_cache_detail(cd); ret = create_cache_proc_entries(cd, net); if (ret) sunrpc_destroy_cache_detail(cd); return ret; } EXPORT_SYMBOL_GPL(cache_register_net); void cache_unregister_net(struct cache_detail *cd, struct net *net) { remove_cache_proc_entries(cd); sunrpc_destroy_cache_detail(cd); } EXPORT_SYMBOL_GPL(cache_unregister_net); struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net) { struct cache_detail *cd; int i; cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); if (cd == NULL) return ERR_PTR(-ENOMEM); cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head), GFP_KERNEL); if (cd->hash_table == NULL) { kfree(cd); return ERR_PTR(-ENOMEM); } for (i = 0; i < cd->hash_size; i++) INIT_HLIST_HEAD(&cd->hash_table[i]); cd->net = net; return cd; } EXPORT_SYMBOL_GPL(cache_create_net); void cache_destroy_net(struct cache_detail *cd, struct net *net) { kfree(cd->hash_table); kfree(cd); } EXPORT_SYMBOL_GPL(cache_destroy_net); static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(file_inode(filp))->private; return cache_read(filp, buf, count, ppos, cd); } static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(file_inode(filp))->private; return cache_write(filp, buf, count, ppos, cd); } static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait) { struct cache_detail *cd = RPC_I(file_inode(filp))->private; return cache_poll(filp, wait, cd); } static long cache_ioctl_pipefs(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct cache_detail *cd = RPC_I(inode)->private; return cache_ioctl(inode, filp, cmd, arg, cd); } static int cache_open_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return cache_open(inode, filp, cd); } static int cache_release_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return cache_release(inode, filp, cd); } const struct file_operations cache_file_operations_pipefs = { .owner = THIS_MODULE, .read = cache_read_pipefs, .write = cache_write_pipefs, .poll = cache_poll_pipefs, .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ .open = cache_open_pipefs, .release = cache_release_pipefs, }; static int content_open_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return content_open(inode, filp, cd); } static int content_release_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return content_release(inode, filp, cd); } const struct file_operations content_file_operations_pipefs = { .open = content_open_pipefs, .read = seq_read, .llseek = seq_lseek, .release = content_release_pipefs, }; static int open_flush_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return open_flush(inode, filp, cd); } static int release_flush_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return release_flush(inode, filp, cd); } static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(file_inode(filp))->private; return read_flush(filp, buf, count, ppos, cd); } static ssize_t write_flush_pipefs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(file_inode(filp))->private; return write_flush(filp, buf, count, ppos, cd); } const struct file_operations cache_flush_operations_pipefs = { .open = open_flush_pipefs, .read = read_flush_pipefs, .write = write_flush_pipefs, .release = release_flush_pipefs, }; int sunrpc_cache_register_pipefs(struct dentry *parent, const char *name, umode_t umode, struct cache_detail *cd) { struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd); if (IS_ERR(dir)) return PTR_ERR(dir); cd->pipefs = dir; return 0; } EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) { if (cd->pipefs) { rpc_remove_cache_dir(cd->pipefs); cd->pipefs = NULL; } } EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs); void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h) { spin_lock(&cd->hash_lock); if (!hlist_unhashed(&h->cache_list)){ sunrpc_begin_cache_remove_entry(h, cd); spin_unlock(&cd->hash_lock); sunrpc_end_cache_remove_entry(h, cd); } else spin_unlock(&cd->hash_lock); } EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);
35 19 30 2 2 30 30 30 30 29 2 2 2 2 2 2 18 18 18 2 2 2 1 2 2 2 18 18 18 18 17 1 1 1 2 2 2 1 1 1 1 6 6 6 3 2 3 3 3 1 2 2 2 19 17 17 1 1 5 5 1 1 21 33 32 33 4 11 6 6 5 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 /* * net/tipc/bcast.c: TIPC broadcast code * * Copyright (c) 2004-2006, 2014-2017, Ericsson AB * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/tipc_config.h> #include "socket.h" #include "msg.h" #include "bcast.h" #include "link.h" #include "name_table.h" #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */ #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */ const char tipc_bclink_name[] = "broadcast-link"; unsigned long sysctl_tipc_bc_retruni __read_mostly; /** * struct tipc_bc_base - base structure for keeping broadcast send state * @link: broadcast send link structure * @inputq: data input queue; will only carry SOCK_WAKEUP messages * @dests: array keeping number of reachable destinations per bearer * @primary_bearer: a bearer having links to all broadcast destinations, if any * @bcast_support: indicates if primary bearer, if any, supports broadcast * @force_bcast: forces broadcast for multicast traffic * @rcast_support: indicates if all peer nodes support replicast * @force_rcast: forces replicast for multicast traffic * @rc_ratio: dest count as percentage of cluster size where send method changes * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast */ struct tipc_bc_base { struct tipc_link *link; struct sk_buff_head inputq; int dests[MAX_BEARERS]; int primary_bearer; bool bcast_support; bool force_bcast; bool rcast_support; bool force_rcast; int rc_ratio; int bc_threshold; }; static struct tipc_bc_base *tipc_bc_base(struct net *net) { return tipc_net(net)->bcbase; } /* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link * Note: the MTU is decremented to give room for a tunnel header, in * case the message needs to be sent as replicast */ int tipc_bcast_get_mtu(struct net *net) { return tipc_link_mss(tipc_bc_sndlink(net)); } void tipc_bcast_toggle_rcast(struct net *net, bool supp) { tipc_bc_base(net)->rcast_support = supp; } static void tipc_bcbase_calc_bc_threshold(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net)); bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100); } /* tipc_bcbase_select_primary(): find a bearer with links to all destinations, * if any, and make it primary bearer */ static void tipc_bcbase_select_primary(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); int all_dests = tipc_link_bc_peers(bb->link); int max_win = tipc_link_max_win(bb->link); int min_win = tipc_link_min_win(bb->link); int i, mtu, prim; bb->primary_bearer = INVALID_BEARER_ID; bb->bcast_support = true; if (!all_dests) return; for (i = 0; i < MAX_BEARERS; i++) { if (!bb->dests[i]) continue; mtu = tipc_bearer_mtu(net, i); if (mtu < tipc_link_mtu(bb->link)) { tipc_link_set_mtu(bb->link, mtu); tipc_link_set_queue_limits(bb->link, min_win, max_win); } bb->bcast_support &= tipc_bearer_bcast_support(net, i); if (bb->dests[i] < all_dests) continue; bb->primary_bearer = i; /* Reduce risk that all nodes select same primary */ if ((i ^ tipc_own_addr(net)) & 1) break; } prim = bb->primary_bearer; if (prim != INVALID_BEARER_ID) bb->bcast_support = tipc_bearer_bcast_support(net, prim); } void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id) { struct tipc_bc_base *bb = tipc_bc_base(net); tipc_bcast_lock(net); bb->dests[bearer_id]++; tipc_bcbase_select_primary(net); tipc_bcast_unlock(net); } void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id) { struct tipc_bc_base *bb = tipc_bc_base(net); tipc_bcast_lock(net); bb->dests[bearer_id]--; tipc_bcbase_select_primary(net); tipc_bcast_unlock(net); } /* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers * * Note that number of reachable destinations, as indicated in the dests[] * array, may transitionally differ from the number of destinations indicated * in each sent buffer. We can sustain this. Excess destination nodes will * drop and never acknowledge the unexpected packets, and missing destinations * will either require retransmission (if they are just about to be added to * the bearer), or be removed from the buffer's 'ackers' counter (if they * just went down) */ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq) { int bearer_id; struct tipc_bc_base *bb = tipc_bc_base(net); struct sk_buff *skb, *_skb; struct sk_buff_head _xmitq; if (skb_queue_empty(xmitq)) return; /* The typical case: at least one bearer has links to all nodes */ bearer_id = bb->primary_bearer; if (bearer_id >= 0) { tipc_bearer_bc_xmit(net, bearer_id, xmitq); return; } /* We have to transmit across all bearers */ __skb_queue_head_init(&_xmitq); for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { if (!bb->dests[bearer_id]) continue; skb_queue_walk(xmitq, skb) { _skb = pskb_copy_for_clone(skb, GFP_ATOMIC); if (!_skb) break; __skb_queue_tail(&_xmitq, _skb); } tipc_bearer_bc_xmit(net, bearer_id, &_xmitq); } __skb_queue_purge(xmitq); __skb_queue_purge(&_xmitq); } static void tipc_bcast_select_xmit_method(struct net *net, int dests, struct tipc_mc_method *method) { struct tipc_bc_base *bb = tipc_bc_base(net); unsigned long exp = method->expires; /* Broadcast supported by used bearer/bearers? */ if (!bb->bcast_support) { method->rcast = true; return; } /* Any destinations which don't support replicast ? */ if (!bb->rcast_support) { method->rcast = false; return; } /* Can current method be changed ? */ method->expires = jiffies + TIPC_METHOD_EXPIRE; if (method->mandatory) return; if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) && time_before(jiffies, exp)) return; /* Configuration as force 'broadcast' method */ if (bb->force_bcast) { method->rcast = false; return; } /* Configuration as force 'replicast' method */ if (bb->force_rcast) { method->rcast = true; return; } /* Configuration as 'autoselect' or default method */ /* Determine method to use now */ method->rcast = dests <= bb->bc_threshold; } /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes * @net: the applicable net namespace * @pkts: chain of buffers containing message * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0 * Consumes the buffer chain. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE */ int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, u16 *cong_link_cnt) { struct tipc_link *l = tipc_bc_sndlink(net); struct sk_buff_head xmitq; int rc = 0; __skb_queue_head_init(&xmitq); tipc_bcast_lock(net); if (tipc_link_bc_peers(l)) rc = tipc_link_xmit(l, pkts, &xmitq); tipc_bcast_unlock(net); tipc_bcbase_xmit(net, &xmitq); __skb_queue_purge(pkts); if (rc == -ELINKCONG) { *cong_link_cnt = 1; rc = 0; } return rc; } /* tipc_rcast_xmit - replicate and send a message to given destination nodes * @net: the applicable net namespace * @pkts: chain of buffers containing message * @dests: list of destination nodes * @cong_link_cnt: returns number of congested links * @cong_links: returns identities of congested links * Returns 0 if success, otherwise errno */ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, struct tipc_nlist *dests, u16 *cong_link_cnt) { struct tipc_dest *dst, *tmp; struct sk_buff_head _pkts; u32 dnode, selector; selector = msg_link_selector(buf_msg(skb_peek(pkts))); __skb_queue_head_init(&_pkts); list_for_each_entry_safe(dst, tmp, &dests->list, list) { dnode = dst->node; if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts)) return -ENOMEM; /* Any other return value than -ELINKCONG is ignored */ if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG) (*cong_link_cnt)++; } return 0; } /* tipc_mcast_send_sync - deliver a dummy message with SYN bit * @net: the applicable net namespace * @skb: socket buffer to copy * @method: send method to be used * @dests: destination nodes for message. * Returns 0 if success, otherwise errno */ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, struct tipc_mc_method *method, struct tipc_nlist *dests) { struct tipc_msg *hdr, *_hdr; struct sk_buff_head tmpq; u16 cong_link_cnt = 0; struct sk_buff *_skb; int rc = 0; /* Is a cluster supporting with new capabilities ? */ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL)) return 0; hdr = buf_msg(skb); if (msg_user(hdr) == MSG_FRAGMENTER) hdr = msg_inner_hdr(hdr); if (msg_type(hdr) != TIPC_MCAST_MSG) return 0; /* Allocate dummy message */ _skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL); if (!_skb) return -ENOMEM; /* Preparing for 'synching' header */ msg_set_syn(hdr, 1); /* Copy skb's header into a dummy header */ skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE); skb_orphan(_skb); /* Reverse method for dummy message */ _hdr = buf_msg(_skb); msg_set_size(_hdr, MCAST_H_SIZE); msg_set_is_rcast(_hdr, !msg_is_rcast(hdr)); msg_set_errcode(_hdr, TIPC_ERR_NO_PORT); __skb_queue_head_init(&tmpq); __skb_queue_tail(&tmpq, _skb); if (method->rcast) rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt); else rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt); /* This queue should normally be empty by now */ __skb_queue_purge(&tmpq); return rc; } /* tipc_mcast_xmit - deliver message to indicated destination nodes * and to identified node local sockets * @net: the applicable net namespace * @pkts: chain of buffers containing message * @method: send method to be used * @dests: destination nodes for message. * @cong_link_cnt: returns number of encountered congested destination links * Consumes buffer chain. * Returns 0 if success, otherwise errno */ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, struct tipc_mc_method *method, struct tipc_nlist *dests, u16 *cong_link_cnt) { struct sk_buff_head inputq, localq; bool rcast = method->rcast; struct tipc_msg *hdr; struct sk_buff *skb; int rc = 0; skb_queue_head_init(&inputq); __skb_queue_head_init(&localq); /* Clone packets before they are consumed by next call */ if (dests->local && !tipc_msg_reassemble(pkts, &localq)) { rc = -ENOMEM; goto exit; } /* Send according to determined transmit method */ if (dests->remote) { tipc_bcast_select_xmit_method(net, dests->remote, method); skb = skb_peek(pkts); hdr = buf_msg(skb); if (msg_user(hdr) == MSG_FRAGMENTER) hdr = msg_inner_hdr(hdr); msg_set_is_rcast(hdr, method->rcast); /* Switch method ? */ if (rcast != method->rcast) { rc = tipc_mcast_send_sync(net, skb, method, dests); if (unlikely(rc)) { pr_err("Unable to send SYN: method %d, rc %d\n", rcast, rc); goto exit; } } if (method->rcast) rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt); else rc = tipc_bcast_xmit(net, pkts, cong_link_cnt); } if (dests->local) { tipc_loopback_trace(net, &localq); tipc_sk_mcast_rcv(net, &localq, &inputq); } exit: /* This queue should normally be empty by now */ __skb_queue_purge(pkts); return rc; } /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link * * RCU is locked, no other locks set */ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb) { struct tipc_msg *hdr = buf_msg(skb); struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; struct sk_buff_head xmitq; int rc; __skb_queue_head_init(&xmitq); if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) { kfree_skb(skb); return 0; } tipc_bcast_lock(net); if (msg_user(hdr) == BCAST_PROTOCOL) rc = tipc_link_bc_nack_rcv(l, skb, &xmitq); else rc = tipc_link_rcv(l, skb, NULL); tipc_bcast_unlock(net); tipc_bcbase_xmit(net, &xmitq); /* Any socket wakeup messages ? */ if (!skb_queue_empty(inputq)) tipc_sk_rcv(net, inputq); return rc; } /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge * * RCU is locked, no other locks set */ void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr) { struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; u16 acked = msg_bcast_ack(hdr); struct sk_buff_head xmitq; /* Ignore bc acks sent by peer before bcast synch point was received */ if (msg_bc_ack_invalid(hdr)) return; __skb_queue_head_init(&xmitq); tipc_bcast_lock(net); tipc_link_bc_ack_rcv(l, acked, 0, NULL, &xmitq, NULL); tipc_bcast_unlock(net); tipc_bcbase_xmit(net, &xmitq); /* Any socket wakeup messages ? */ if (!skb_queue_empty(inputq)) tipc_sk_rcv(net, inputq); } /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state * * RCU is locked, no other locks set */ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr, struct sk_buff_head *retrq) { struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; struct tipc_gap_ack_blks *ga; struct sk_buff_head xmitq; int rc = 0; __skb_queue_head_init(&xmitq); tipc_bcast_lock(net); if (msg_type(hdr) != STATE_MSG) { tipc_link_bc_init_rcv(l, hdr); } else if (!msg_bc_ack_invalid(hdr)) { tipc_get_gap_ack_blks(&ga, l, hdr, false); if (!sysctl_tipc_bc_retruni) retrq = &xmitq; rc = tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), msg_bc_gap(hdr), ga, &xmitq, retrq); rc |= tipc_link_bc_sync_rcv(l, hdr, &xmitq); } tipc_bcast_unlock(net); tipc_bcbase_xmit(net, &xmitq); /* Any socket wakeup messages ? */ if (!skb_queue_empty(inputq)) tipc_sk_rcv(net, inputq); return rc; } /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer * * RCU is locked, node lock is set */ void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l, struct sk_buff_head *xmitq) { struct tipc_link *snd_l = tipc_bc_sndlink(net); tipc_bcast_lock(net); tipc_link_add_bc_peer(snd_l, uc_l, xmitq); tipc_bcbase_select_primary(net); tipc_bcbase_calc_bc_threshold(net); tipc_bcast_unlock(net); } /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer * * RCU is locked, node lock is set */ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l) { struct tipc_link *snd_l = tipc_bc_sndlink(net); struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq; struct sk_buff_head xmitq; __skb_queue_head_init(&xmitq); tipc_bcast_lock(net); tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq); tipc_bcbase_select_primary(net); tipc_bcbase_calc_bc_threshold(net); tipc_bcast_unlock(net); tipc_bcbase_xmit(net, &xmitq); /* Any socket wakeup messages ? */ if (!skb_queue_empty(inputq)) tipc_sk_rcv(net, inputq); } int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l) { if (!l) return -ENOPROTOOPT; tipc_bcast_lock(net); tipc_link_reset_stats(l); tipc_bcast_unlock(net); return 0; } static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win) { struct tipc_link *l = tipc_bc_sndlink(net); if (!l) return -ENOPROTOOPT; if (max_win < BCLINK_WIN_MIN) max_win = BCLINK_WIN_MIN; if (max_win > TIPC_MAX_LINK_WIN) return -EINVAL; tipc_bcast_lock(net); tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win); tipc_bcast_unlock(net); return 0; } static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode) { struct tipc_bc_base *bb = tipc_bc_base(net); switch (bc_mode) { case BCLINK_MODE_BCAST: if (!bb->bcast_support) return -ENOPROTOOPT; bb->force_bcast = true; bb->force_rcast = false; break; case BCLINK_MODE_RCAST: if (!bb->rcast_support) return -ENOPROTOOPT; bb->force_bcast = false; bb->force_rcast = true; break; case BCLINK_MODE_SEL: if (!bb->bcast_support || !bb->rcast_support) return -ENOPROTOOPT; bb->force_bcast = false; bb->force_rcast = false; break; default: return -EINVAL; } return 0; } static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio) { struct tipc_bc_base *bb = tipc_bc_base(net); if (!bb->bcast_support || !bb->rcast_support) return -ENOPROTOOPT; if (bc_ratio > 100 || bc_ratio <= 0) return -EINVAL; bb->rc_ratio = bc_ratio; tipc_bcast_lock(net); tipc_bcbase_calc_bc_threshold(net); tipc_bcast_unlock(net); return 0; } int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]) { int err; u32 win; u32 bc_mode; u32 bc_ratio; struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; if (!attrs[TIPC_NLA_LINK_PROP]) return -EINVAL; err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props); if (err) return err; if (!props[TIPC_NLA_PROP_WIN] && !props[TIPC_NLA_PROP_BROADCAST] && !props[TIPC_NLA_PROP_BROADCAST_RATIO]) { return -EOPNOTSUPP; } if (props[TIPC_NLA_PROP_BROADCAST]) { bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]); err = tipc_bc_link_set_broadcast_mode(net, bc_mode); } if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) { bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]); err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio); } if (!err && props[TIPC_NLA_PROP_WIN]) { win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); err = tipc_bc_link_set_queue_limits(net, win); } return err; } int tipc_bcast_init(struct net *net) { struct tipc_net *tn = tipc_net(net); struct tipc_bc_base *bb = NULL; struct tipc_link *l = NULL; bb = kzalloc(sizeof(*bb), GFP_KERNEL); if (!bb) goto enomem; tn->bcbase = bb; spin_lock_init(&tipc_net(net)->bclock); if (!tipc_link_bc_create(net, 0, 0, NULL, one_page_mtu, BCLINK_WIN_DEFAULT, BCLINK_WIN_DEFAULT, 0, &bb->inputq, NULL, NULL, &l)) goto enomem; bb->link = l; tn->bcl = l; bb->rc_ratio = 10; bb->rcast_support = true; return 0; enomem: kfree(bb); kfree(l); return -ENOMEM; } void tipc_bcast_stop(struct net *net) { struct tipc_net *tn = net_generic(net, tipc_net_id); synchronize_net(); kfree(tn->bcbase); kfree(tn->bcl); } void tipc_nlist_init(struct tipc_nlist *nl, u32 self) { memset(nl, 0, sizeof(*nl)); INIT_LIST_HEAD(&nl->list); nl->self = self; } void tipc_nlist_add(struct tipc_nlist *nl, u32 node) { if (node == nl->self) nl->local = true; else if (tipc_dest_push(&nl->list, node, 0)) nl->remote++; } void tipc_nlist_del(struct tipc_nlist *nl, u32 node) { if (node == nl->self) nl->local = false; else if (tipc_dest_del(&nl->list, node, 0)) nl->remote--; } void tipc_nlist_purge(struct tipc_nlist *nl) { tipc_dest_list_purge(&nl->list); nl->remote = 0; nl->local = false; } u32 tipc_bcast_get_mode(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); if (bb->force_bcast) return BCLINK_MODE_BCAST; if (bb->force_rcast) return BCLINK_MODE_RCAST; if (bb->bcast_support && bb->rcast_support) return BCLINK_MODE_SEL; return 0; } u32 tipc_bcast_get_broadcast_ratio(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); return bb->rc_ratio; } void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, struct sk_buff_head *inputq) { struct sk_buff *skb, *_skb, *tmp; struct tipc_msg *hdr, *_hdr; bool match = false; u32 node, port; skb = skb_peek(inputq); if (!skb) return; hdr = buf_msg(skb); if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq))) return; node = msg_orignode(hdr); if (node == tipc_own_addr(net)) return; port = msg_origport(hdr); /* Has the twin SYN message already arrived ? */ skb_queue_walk(defq, _skb) { _hdr = buf_msg(_skb); if (msg_orignode(_hdr) != node) continue; if (msg_origport(_hdr) != port) continue; match = true; break; } if (!match) { if (!msg_is_syn(hdr)) return; __skb_dequeue(inputq); __skb_queue_tail(defq, skb); return; } /* Deliver non-SYN message from other link, otherwise queue it */ if (!msg_is_syn(hdr)) { if (msg_is_rcast(hdr) != msg_is_rcast(_hdr)) return; __skb_dequeue(inputq); __skb_queue_tail(defq, skb); return; } /* Queue non-SYN/SYN message from same link */ if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) { __skb_dequeue(inputq); __skb_queue_tail(defq, skb); return; } /* Matching SYN messages => return the one with data, if any */ __skb_unlink(_skb, defq); if (msg_data_sz(hdr)) { kfree_skb(_skb); } else { __skb_dequeue(inputq); kfree_skb(skb); __skb_queue_tail(inputq, _skb); } /* Deliver subsequent non-SYN messages from same peer */ skb_queue_walk_safe(defq, _skb, tmp) { _hdr = buf_msg(_skb); if (msg_orignode(_hdr) != node) continue; if (msg_origport(_hdr) != port) continue; if (msg_is_syn(_hdr)) break; __skb_unlink(_skb, defq); __skb_queue_tail(inputq, _skb); } }
5 2 5 10 7 10 10 4 4 4 4 2 1 3 3 2 2 1 2 3 3 3 3 3 1 1 2 275 276 277 1 1 1 1 1 1 1 1 1 1 1 1 276 783 277 785 4 2 2 4 5 4 4 4 4 3 1 1 1 5 5 3 3 3 2 1 1 1 3 3 1 1 1 2 2 2 2 2 2 2 2 6 6 5 1 6 1 6 1 1 1 1 5 4 4 2 3 1 2 4 6 2 2 2 1 8 2 1 1 1 1 1 3 3 3 3 3 3 3 3 1 1 1 8 5 5 5 5 5 5 5 5 5 5 5 5 2 2 1 1 1 2 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 // SPDX-License-Identifier: GPL-2.0-or-later /** -*- linux-c -*- *********************************************************** * Linux PPP over Ethernet (PPPoX/PPPoE) Sockets * * PPPoX --- Generic PPP encapsulation socket family * PPPoE --- PPP over Ethernet (RFC 2516) * * Version: 0.7.0 * * 070228 : Fix to allow multiple sessions with same remote MAC and same * session id by including the local device ifindex in the * tuple identifying a session. This also ensures packets can't * be injected into a session from interfaces other than the one * specified by userspace. Florian Zumbiehl <florz@florz.de> * (Oh, BTW, this one is YYMMDD, in case you were wondering ...) * 220102 : Fix module use count on failure in pppoe_create, pppox_sk -acme * 030700 : Fixed connect logic to allow for disconnect. * 270700 : Fixed potential SMP problems; we must protect against * simultaneous invocation of ppp_input * and ppp_unregister_channel. * 040800 : Respect reference count mechanisms on net-devices. * 200800 : fix kfree(skb) in pppoe_rcv (acme) * Module reference count is decremented in the right spot now, * guards against sock_put not actually freeing the sk * in pppoe_release. * 051000 : Initialization cleanup. * 111100 : Fix recvmsg. * 050101 : Fix PADT processing. * 140501 : Use pppoe_rcv_core to handle all backlog. (Alexey) * 170701 : Do not lock_sock with rwlock held. (DaveM) * Ignore discovery frames if user has socket * locked. (DaveM) * Ignore return value of dev_queue_xmit in __pppoe_xmit * or else we may kfree an SKB twice. (DaveM) * 190701 : When doing copies of skb's in __pppoe_xmit, always delete * the original skb that was passed in on success, never on * failure. Delete the copy of the skb on failure to avoid * a memory leak. * 081001 : Misc. cleanup (licence string, non-blocking, prevent * reference of device on close). * 121301 : New ppp channels interface; cannot unregister a channel * from interrupts. Thus, we mark the socket as a ZOMBIE * and do the unregistration later. * 081002 : seq_file support for proc stuff -acme * 111602 : Merge all 2.4 fixes into 2.5/2.6 tree. Label 2.5/2.6 * as version 0.7. Spacing cleanup. * Author: Michal Ostrowski <mostrows@speakeasy.net> * Contributors: * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * David S. Miller (davem@redhat.com) * * License: */ #include <linux/string.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/if_ether.h> #include <linux/if_pppox.h> #include <linux/ppp_channel.h> #include <linux/ppp_defs.h> #include <linux/ppp-ioctl.h> #include <linux/notifier.h> #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/nsproxy.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> #include <linux/uaccess.h> #define PPPOE_HASH_BITS CONFIG_PPPOE_HASH_BITS #define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) #define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); static const struct proto_ops pppoe_ops; static const struct ppp_channel_ops pppoe_chan_ops; /* per-net private data for this module */ static unsigned int pppoe_net_id __read_mostly; struct pppoe_net { /* * we could use _single_ hash table for all * nets by injecting net id into the hash but * it would increase hash chains and add * a few additional math comparisons messy * as well, moreover in case of SMP less locking * controversy here */ struct pppox_sock __rcu *hash_table[PPPOE_HASH_SIZE]; spinlock_t hash_lock; }; /* * PPPoE could be in the following stages: * 1) Discovery stage (to obtain remote MAC and Session ID) * 2) Session stage (MAC and SID are known) * * Ethernet frames have a special tag for this but * we use simpler approach based on session id */ static inline bool stage_session(__be16 sid) { return sid != 0; } static inline struct pppoe_net *pppoe_pernet(struct net *net) { return net_generic(net, pppoe_net_id); } static inline int cmp_2_addr(struct pppoe_addr *a, struct pppoe_addr *b) { return a->sid == b->sid && ether_addr_equal(a->remote, b->remote); } static inline int cmp_addr(struct pppoe_addr *a, __be16 sid, char *addr) { return a->sid == sid && ether_addr_equal(a->remote, addr); } #if 8 % PPPOE_HASH_BITS #error 8 must be a multiple of PPPOE_HASH_BITS #endif static int hash_item(__be16 sid, unsigned char *addr) { unsigned char hash = 0; unsigned int i; for (i = 0; i < ETH_ALEN; i++) hash ^= addr[i]; for (i = 0; i < sizeof(sid_t) * 8; i += 8) hash ^= (__force __u32)sid >> i; for (i = 8; (i >>= 1) >= PPPOE_HASH_BITS;) hash ^= hash >> i; return hash & PPPOE_HASH_MASK; } /********************************************************************** * * Set/get/delete/rehash items (internal versions) * **********************************************************************/ static struct pppox_sock *__get_item(struct pppoe_net *pn, __be16 sid, unsigned char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret; ret = rcu_dereference(pn->hash_table[hash]); while (ret) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) return ret; ret = rcu_dereference(ret->next); } return NULL; } static int __set_item(struct pppoe_net *pn, struct pppox_sock *po) { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); struct pppox_sock *ret, *first; first = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock)); ret = first; while (ret) { if (cmp_2_addr(&ret->pppoe_pa, &po->pppoe_pa) && ret->pppoe_ifindex == po->pppoe_ifindex) return -EALREADY; ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock)); } RCU_INIT_POINTER(po->next, first); rcu_assign_pointer(pn->hash_table[hash], po); return 0; } static void __delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { int hash = hash_item(sid, addr); struct pppox_sock *ret, __rcu **src; ret = rcu_dereference_protected(pn->hash_table[hash], lockdep_is_held(&pn->hash_lock)); src = &pn->hash_table[hash]; while (ret) { if (cmp_addr(&ret->pppoe_pa, sid, addr) && ret->pppoe_ifindex == ifindex) { struct pppox_sock *next; next = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock)); rcu_assign_pointer(*src, next); break; } src = &ret->next; ret = rcu_dereference_protected(ret->next, lockdep_is_held(&pn->hash_lock)); } } /********************************************************************** * * Set/get/delete/rehash items * **********************************************************************/ static inline struct pppox_sock *get_item(struct pppoe_net *pn, __be16 sid, unsigned char *addr, int ifindex) { struct pppox_sock *po; po = __get_item(pn, sid, addr, ifindex); if (po && !refcount_inc_not_zero(&sk_pppox(po)->sk_refcnt)) po = NULL; return po; } static inline struct pppox_sock *__get_item_by_addr(struct net *net, struct sockaddr_pppox *sp) { struct net_device *dev; struct pppoe_net *pn; struct pppox_sock *pppox_sock = NULL; int ifindex; dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev); if (dev) { ifindex = dev->ifindex; pn = pppoe_pernet(net); pppox_sock = __get_item(pn, sp->sa_addr.pppoe.sid, sp->sa_addr.pppoe.remote, ifindex); } return pppox_sock; } static inline void delete_item(struct pppoe_net *pn, __be16 sid, char *addr, int ifindex) { spin_lock(&pn->hash_lock); __delete_item(pn, sid, addr, ifindex); spin_unlock(&pn->hash_lock); } /*************************************************************************** * * Handler for device events. * Certain device events require that sockets be unconnected. * **************************************************************************/ static void pppoe_flush_dev(struct net_device *dev) { struct pppoe_net *pn; int i; pn = pppoe_pernet(dev_net(dev)); spin_lock(&pn->hash_lock); for (i = 0; i < PPPOE_HASH_SIZE; i++) { struct pppox_sock *po = rcu_dereference_protected(pn->hash_table[i], lockdep_is_held(&pn->hash_lock)); struct sock *sk; while (po) { while (po && po->pppoe_dev != dev) { po = rcu_dereference_protected(po->next, lockdep_is_held(&pn->hash_lock)); } if (!po) break; sk = sk_pppox(po); /* We always grab the socket lock, followed by the * hash_lock, in that order. Since we should hold the * sock lock while doing any unbinding, we need to * release the lock we're holding. Hold a reference to * the sock so it doesn't disappear as we're jumping * between locks. */ sock_hold(sk); spin_unlock(&pn->hash_lock); lock_sock(sk); if (po->pppoe_dev == dev && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { pppox_unbind_sock(sk); sk->sk_state_change(sk); po->pppoe_dev = NULL; dev_put(dev); } release_sock(sk); sock_put(sk); /* Restart the process from the start of the current * hash chain. We dropped locks so the world may have * change from underneath us. */ BUG_ON(pppoe_pernet(dev_net(dev)) == NULL); spin_lock(&pn->hash_lock); po = rcu_dereference_protected(pn->hash_table[i], lockdep_is_held(&pn->hash_lock)); } } spin_unlock(&pn->hash_lock); } static int pppoe_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); /* Only look at sockets that are using this specific device. */ switch (event) { case NETDEV_CHANGEADDR: case NETDEV_CHANGEMTU: /* A change in mtu or address is a bad thing, requiring * LCP re-negotiation. */ case NETDEV_GOING_DOWN: case NETDEV_DOWN: /* Find every socket on this device and kill it. */ pppoe_flush_dev(dev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block pppoe_notifier = { .notifier_call = pppoe_device_event, }; /************************************************************************ * * Do the real work of receiving a PPPoE Session frame. * ***********************************************************************/ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct pppox_sock *relay_po; /* Backlog receive. Semantics of backlog rcv preclude any code from * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state * can't change. */ if (sk->sk_state & PPPOX_BOUND) { ppp_input(&po->chan, skb); } else if (sk->sk_state & PPPOX_RELAY) { relay_po = __get_item_by_addr(sock_net(sk), &po->pppoe_relay); if (relay_po == NULL) goto abort_kfree; if ((sk_pppox(relay_po)->sk_state & PPPOX_CONNECTED) == 0) goto abort_kfree; if (!__pppoe_xmit(sk_pppox(relay_po), skb)) goto abort_kfree; } else { if (sock_queue_rcv_skb(sk, skb)) goto abort_kfree; } return NET_RX_SUCCESS; abort_kfree: kfree_skb(skb); return NET_RX_DROP; } /************************************************************************ * * Receive wrapper called in BH context. * ***********************************************************************/ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; struct pppoe_net *pn; int len; if (skb->pkt_type == PACKET_OTHERHOST) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; if (skb_mac_header_len(skb) < ETH_HLEN) goto drop; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto drop; ph = pppoe_hdr(skb); len = ntohs(ph->length); skb_pull_rcsum(skb, sizeof(*ph)); if (skb->len < len) goto drop; if (pskb_trim_rcsum(skb, len)) goto drop; ph = pppoe_hdr(skb); pn = pppoe_pernet(dev_net(dev)); po = __get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (!po) goto drop; return __sk_receive_skb(sk_pppox(po), skb, 0, 1, false); drop: kfree_skb(skb); out: return NET_RX_DROP; } static void pppoe_unbind_sock_work(struct work_struct *work) { struct pppox_sock *po = container_of(work, struct pppox_sock, proto.pppoe.padt_work); struct sock *sk = sk_pppox(po); lock_sock(sk); if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } pppox_unbind_sock(sk); release_sock(sk); sock_put(sk); } /************************************************************************ * * Receive a PPPoE Discovery frame. * This is solely for detection of PADT frames * ***********************************************************************/ static int pppoe_disc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct pppoe_hdr *ph; struct pppox_sock *po; struct pppoe_net *pn; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; if (skb->pkt_type != PACKET_HOST) goto abort; if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) goto abort; ph = pppoe_hdr(skb); if (ph->code != PADT_CODE) goto abort; pn = pppoe_pernet(dev_net(dev)); po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (po) if (!schedule_work(&po->proto.pppoe.padt_work)) sock_put(sk_pppox(po)); abort: kfree_skb(skb); out: return NET_RX_SUCCESS; /* Lies... :-) */ } static struct packet_type pppoes_ptype __read_mostly = { .type = cpu_to_be16(ETH_P_PPP_SES), .func = pppoe_rcv, }; static struct packet_type pppoed_ptype __read_mostly = { .type = cpu_to_be16(ETH_P_PPP_DISC), .func = pppoe_disc_rcv, }; static struct proto pppoe_sk_proto __read_mostly = { .name = "PPPOE", .owner = THIS_MODULE, .obj_size = sizeof(struct pppox_sock), }; static void pppoe_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); } /*********************************************************************** * * Initialize a new struct sock. * **********************************************************************/ static int pppoe_create(struct net *net, struct socket *sock, int kern) { struct sock *sk; sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock_set_flag(sk, SOCK_RCU_FREE); sock->state = SS_UNCONNECTED; sock->ops = &pppoe_ops; sk->sk_backlog_rcv = pppoe_rcv_core; sk->sk_destruct = pppoe_destruct; sk->sk_state = PPPOX_NONE; sk->sk_type = SOCK_STREAM; sk->sk_family = PF_PPPOX; sk->sk_protocol = PX_PROTO_OE; INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work, pppoe_unbind_sock_work); return 0; } static int pppoe_release(struct socket *sock) { struct sock *sk = sock->sk; struct pppox_sock *po; struct pppoe_net *pn; struct net *net = NULL; if (!sk) return 0; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD)) { release_sock(sk); return -EBADF; } po = pppox_sk(sk); if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } pppox_unbind_sock(sk); /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; net = sock_net(sk); pn = pppoe_pernet(net); /* * protect "po" from concurrent updates * on pppoe_flush_dev */ delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int pppoe_connect(struct socket *sock, struct sockaddr_unsized *uservaddr, int sockaddr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = NULL; struct pppoe_net *pn; struct net *net = NULL; int error; lock_sock(sk); error = -EINVAL; if (sockaddr_len != sizeof(struct sockaddr_pppox)) goto end; if (sp->sa_protocol != PX_PROTO_OE) goto end; /* Check for already bound sockets */ error = -EBUSY; if ((sk->sk_state & PPPOX_CONNECTED) && stage_session(sp->sa_addr.pppoe.sid)) goto end; /* Check for already disconnected sockets, on attempts to disconnect */ error = -EALREADY; if ((sk->sk_state & PPPOX_DEAD) && !stage_session(sp->sa_addr.pppoe.sid)) goto end; error = 0; /* Delete the old binding */ if (stage_session(po->pppoe_pa.sid)) { pppox_unbind_sock(sk); pn = pppoe_pernet(sock_net(sk)); delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } po->pppoe_ifindex = 0; memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa)); memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay)); memset(&po->chan, 0, sizeof(po->chan)); po->next = NULL; po->num = 0; sk->sk_state = PPPOX_NONE; } /* Re-bind in session stage only */ if (stage_session(sp->sa_addr.pppoe.sid)) { error = -ENODEV; net = sock_net(sk); dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); if (!dev) goto err_put; po->pppoe_dev = dev; po->pppoe_ifindex = dev->ifindex; pn = pppoe_pernet(net); if (!(dev->flags & IFF_UP)) { goto err_put; } memcpy(&po->pppoe_pa, &sp->sa_addr.pppoe, sizeof(struct pppoe_addr)); spin_lock(&pn->hash_lock); error = __set_item(pn, po); spin_unlock(&pn->hash_lock); if (error < 0) goto err_put; po->chan.hdrlen = (sizeof(struct pppoe_hdr) + dev->hard_header_len); po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; po->chan.direct_xmit = true; error = ppp_register_net_channel(dev_net(dev), &po->chan); if (error) { delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, po->pppoe_ifindex); goto err_put; } sk->sk_state = PPPOX_CONNECTED; } po->num = sp->sa_addr.pppoe.sid; end: release_sock(sk); return error; err_put: if (po->pppoe_dev) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } goto end; } static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_OE; memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa, sizeof(struct pppoe_addr)); memcpy(uaddr, &sp, len); return len; } static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int val; int err; switch (cmd) { case PPPIOCGMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (put_user(po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN, (int __user *)arg)) break; err = 0; break; case PPPIOCSMRU: err = -ENXIO; if (!(sk->sk_state & PPPOX_CONNECTED)) break; err = -EFAULT; if (get_user(val, (int __user *)arg)) break; if (val < (po->pppoe_dev->mtu - sizeof(struct pppoe_hdr) - PPP_HDRLEN)) err = 0; else err = -EINVAL; break; case PPPIOCSFLAGS: err = -EFAULT; if (get_user(val, (int __user *)arg)) break; err = 0; break; case PPPOEIOCSFWD: { struct pppox_sock *relay_po; err = -EBUSY; if (sk->sk_state & (PPPOX_BOUND | PPPOX_DEAD)) break; err = -ENOTCONN; if (!(sk->sk_state & PPPOX_CONNECTED)) break; /* PPPoE address from the user specifies an outbound PPPoE address which frames are forwarded to */ err = -EFAULT; if (copy_from_user(&po->pppoe_relay, (void __user *)arg, sizeof(struct sockaddr_pppox))) break; err = -EINVAL; if (po->pppoe_relay.sa_family != AF_PPPOX || po->pppoe_relay.sa_protocol != PX_PROTO_OE) break; /* Check that the socket referenced by the address actually exists. */ rcu_read_lock(); relay_po = __get_item_by_addr(sock_net(sk), &po->pppoe_relay); rcu_read_unlock(); if (!relay_po) break; sk->sk_state |= PPPOX_RELAY; err = 0; break; } case PPPOEIOCDFWD: err = -EALREADY; if (!(sk->sk_state & PPPOX_RELAY)) break; sk->sk_state &= ~PPPOX_RELAY; err = 0; break; default: err = -ENOTTY; } return err; } static int pppoe_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { struct sk_buff *skb; struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int error; struct pppoe_hdr hdr; struct pppoe_hdr *ph; struct net_device *dev; char *start; int hlen; lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { error = -ENOTCONN; goto end; } hdr.ver = 1; hdr.type = 1; hdr.code = 0; hdr.sid = po->num; dev = po->pppoe_dev; error = -EMSGSIZE; if (total_len > (dev->mtu + dev->hard_header_len)) goto end; hlen = LL_RESERVED_SPACE(dev); skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len + dev->needed_tailroom, 0, GFP_KERNEL); if (!skb) { error = -ENOMEM; goto end; } /* Reserve space for headers. */ skb_reserve(skb, hlen); skb_reset_network_header(skb); skb->dev = dev; skb->priority = READ_ONCE(sk->sk_priority); skb->protocol = cpu_to_be16(ETH_P_PPP_SES); ph = skb_put(skb, total_len + sizeof(struct pppoe_hdr)); start = (char *)&ph->tag[0]; error = memcpy_from_msg(start, m, total_len); if (error < 0) { kfree_skb(skb); goto end; } error = total_len; dev_hard_header(skb, dev, ETH_P_PPP_SES, po->pppoe_pa.remote, NULL, total_len); memcpy(ph, &hdr, sizeof(struct pppoe_hdr)); ph->length = htons(total_len); dev_queue_xmit(skb); end: release_sock(sk); return error; } /************************************************************************ * * xmit function for internal use. * ***********************************************************************/ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) { struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = po->pppoe_dev; struct pppoe_hdr *ph; int data_len = skb->len; /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP * xmit operations conclude prior to an unregistration call. Thus * sk->sk_state cannot change, so we don't need to do lock_sock(). * But, we also can't do a lock_sock since that introduces a potential * deadlock as we'd reverse the lock ordering used when calling * ppp_unregister_channel(). */ if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; if (!dev) goto abort; /* Copy the data if there is no space for the header or if it's * read-only. */ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph))) goto abort; __skb_push(skb, sizeof(*ph)); skb_reset_network_header(skb); ph = pppoe_hdr(skb); ph->ver = 1; ph->type = 1; ph->code = 0; ph->sid = po->num; ph->length = htons(data_len); skb->protocol = cpu_to_be16(ETH_P_PPP_SES); skb->dev = dev; dev_hard_header(skb, dev, ETH_P_PPP_SES, po->pppoe_pa.remote, NULL, data_len); dev_queue_xmit(skb); return 1; abort: kfree_skb(skb); return 1; } /************************************************************************ * * xmit function called by generic PPP driver * sends PPP frame over PPPoE socket * ***********************************************************************/ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb) { struct sock *sk = chan->private; return __pppoe_xmit(sk, skb); } static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx, struct net_device_path *path, const struct ppp_channel *chan) { struct sock *sk = chan->private; struct pppox_sock *po = pppox_sk(sk); struct net_device *dev = po->pppoe_dev; if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED) || !dev) return -1; path->type = DEV_PATH_PPPOE; path->encap.proto = htons(ETH_P_PPP_SES); path->encap.id = be16_to_cpu(po->num); memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN); memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN); path->dev = ctx->dev; ctx->dev = dev; return 0; } static const struct ppp_channel_ops pppoe_chan_ops = { .start_xmit = pppoe_xmit, .fill_forward_path = pppoe_fill_forward_path, }; static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; if (sk->sk_state & PPPOX_BOUND) return -EIO; skb = skb_recv_datagram(sk, flags, &error); if (!skb) return error; total_len = min_t(size_t, total_len, skb->len); error = skb_copy_datagram_msg(skb, 0, m, total_len); if (error == 0) { consume_skb(skb); return total_len; } kfree_skb(skb); return error; } #ifdef CONFIG_PROC_FS static int pppoe_seq_show(struct seq_file *seq, void *v) { struct pppox_sock *po; char *dev_name; if (v == SEQ_START_TOKEN) { seq_puts(seq, "Id Address Device\n"); goto out; } po = v; dev_name = po->pppoe_pa.dev; seq_printf(seq, "%08X %pM %8s\n", po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name); out: return 0; } static inline struct pppox_sock *pppoe_get_idx(struct pppoe_net *pn, loff_t pos) { struct pppox_sock *po; int i; for (i = 0; i < PPPOE_HASH_SIZE; i++) { po = rcu_dereference(pn->hash_table[i]); while (po) { if (!pos--) goto out; po = rcu_dereference(po->next); } } out: return po; } static void *pppoe_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); loff_t l = *pos; rcu_read_lock(); return l ? pppoe_get_idx(pn, --l) : SEQ_START_TOKEN; } static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct pppoe_net *pn = pppoe_pernet(seq_file_net(seq)); struct pppox_sock *po, *next; ++*pos; if (v == SEQ_START_TOKEN) { po = pppoe_get_idx(pn, 0); goto out; } po = v; next = rcu_dereference(po->next); if (next) po = next; else { int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote); po = NULL; while (++hash < PPPOE_HASH_SIZE) { po = rcu_dereference(pn->hash_table[hash]); if (po) break; } } out: return po; } static void pppoe_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static const struct seq_operations pppoe_seq_ops = { .start = pppoe_seq_start, .next = pppoe_seq_next, .stop = pppoe_seq_stop, .show = pppoe_seq_show, }; #endif /* CONFIG_PROC_FS */ static const struct proto_ops pppoe_ops = { .family = AF_PPPOX, .owner = THIS_MODULE, .release = pppoe_release, .bind = sock_no_bind, .connect = pppoe_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = pppoe_getname, .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = pppoe_sendmsg, .recvmsg = pppoe_recvmsg, .mmap = sock_no_mmap, .ioctl = pppox_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = pppox_compat_ioctl, #endif }; static const struct pppox_proto pppoe_proto = { .create = pppoe_create, .ioctl = pppoe_ioctl, .owner = THIS_MODULE, }; static __net_init int pppoe_init_net(struct net *net) { struct pppoe_net *pn = pppoe_pernet(net); struct proc_dir_entry *pde; spin_lock_init(&pn->hash_lock); pde = proc_create_net("pppoe", 0444, net->proc_net, &pppoe_seq_ops, sizeof(struct seq_net_private)); #ifdef CONFIG_PROC_FS if (!pde) return -ENOMEM; #endif return 0; } static __net_exit void pppoe_exit_net(struct net *net) { remove_proc_entry("pppoe", net->proc_net); } static struct pernet_operations pppoe_net_ops = { .init = pppoe_init_net, .exit = pppoe_exit_net, .id = &pppoe_net_id, .size = sizeof(struct pppoe_net), }; static int __init pppoe_init(void) { int err; err = register_pernet_device(&pppoe_net_ops); if (err) goto out; err = proto_register(&pppoe_sk_proto, 0); if (err) goto out_unregister_net_ops; err = register_pppox_proto(PX_PROTO_OE, &pppoe_proto); if (err) goto out_unregister_pppoe_proto; dev_add_pack(&pppoes_ptype); dev_add_pack(&pppoed_ptype); register_netdevice_notifier(&pppoe_notifier); return 0; out_unregister_pppoe_proto: proto_unregister(&pppoe_sk_proto); out_unregister_net_ops: unregister_pernet_device(&pppoe_net_ops); out: return err; } static void __exit pppoe_exit(void) { unregister_netdevice_notifier(&pppoe_notifier); dev_remove_pack(&pppoed_ptype); dev_remove_pack(&pppoes_ptype); unregister_pppox_proto(PX_PROTO_OE); proto_unregister(&pppoe_sk_proto); unregister_pernet_device(&pppoe_net_ops); } module_init(pppoe_init); module_exit(pppoe_exit); MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>"); MODULE_DESCRIPTION("PPP over Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OE);
18 30 41 72 47 22 68 11 94 5 106 2 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM tcp #if !defined(_TRACE_TCP_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_TCP_H #include <linux/ipv6.h> #include <linux/tcp.h> #include <linux/tracepoint.h> #include <net/ipv6.h> #include <net/tcp.h> #include <linux/sock_diag.h> #include <net/rstreason.h> TRACE_EVENT(tcp_retransmit_skb, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, int err), TP_ARGS(sk, skb, err), TP_STRUCT__entry( __field(const void *, skbaddr) __field(const void *, skaddr) __field(int, state) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) __field(int, err) ), TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); __be32 *p32; __entry->skbaddr = skb; __entry->skaddr = sk; __entry->state = sk->sk_state; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); __entry->err = err; ), TP_printk("skbaddr=%p skaddr=%p family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c state=%s err=%d", __entry->skbaddr, __entry->skaddr, show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, show_tcp_state_name(__entry->state), __entry->err) ); #undef FN #define FN(reason) TRACE_DEFINE_ENUM(SK_RST_REASON_##reason); DEFINE_RST_REASON(FN, FN) #undef FN #undef FNe #define FN(reason) { SK_RST_REASON_##reason, #reason }, #define FNe(reason) { SK_RST_REASON_##reason, #reason } /* * skb of trace_tcp_send_reset is the skb that caused RST. In case of * active reset, skb should be NULL */ TRACE_EVENT(tcp_send_reset, TP_PROTO(const struct sock *sk, const struct sk_buff *skb__nullable, const enum sk_rst_reason reason), TP_ARGS(sk, skb__nullable, reason), TP_STRUCT__entry( __field(const void *, skbaddr) __field(const void *, skaddr) __field(int, state) __field(enum sk_rst_reason, reason) __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( __entry->skbaddr = skb__nullable; __entry->skaddr = sk; /* Zero means unknown state. */ __entry->state = sk ? sk->sk_state : 0; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); if (sk && sk_fullsock(sk)) { const struct inet_sock *inet = inet_sk(sk); TP_STORE_ADDR_PORTS(__entry, inet, sk); } else if (skb__nullable) { const struct tcphdr *th = (const struct tcphdr *)skb__nullable->data; /* * We should reverse the 4-tuple of skb, so later * it can print the right flow direction of rst. */ TP_STORE_ADDR_PORTS_SKB(skb__nullable, th, entry->daddr, entry->saddr); } __entry->reason = reason; ), TP_printk("skbaddr=%p skaddr=%p src=%pISpc dest=%pISpc state=%s reason=%s", __entry->skbaddr, __entry->skaddr, __entry->saddr, __entry->daddr, __entry->state ? show_tcp_state_name(__entry->state) : "UNKNOWN", __print_symbolic(__entry->reason, DEFINE_RST_REASON(FN, FNe))) ); #undef FN #undef FNe /* * tcp event with arguments sk * * Note: this class requires a valid sk pointer. */ DECLARE_EVENT_CLASS(tcp_event_sk, TP_PROTO(struct sock *sk), TP_ARGS(sk), TP_STRUCT__entry( __field(const void *, skaddr) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) __field(__u64, sock_cookie) ), TP_fast_assign( struct inet_sock *inet = inet_sk(sk); __be32 *p32; __entry->skaddr = sk; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); __entry->sock_cookie = sock_gen_cookie(sk); ), TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c sock_cookie=%llx", show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, __entry->sock_cookie) ); DEFINE_EVENT(tcp_event_sk, tcp_receive_reset, TP_PROTO(struct sock *sk), TP_ARGS(sk) ); DEFINE_EVENT(tcp_event_sk, tcp_destroy_sock, TP_PROTO(struct sock *sk), TP_ARGS(sk) ); DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust, TP_PROTO(struct sock *sk), TP_ARGS(sk) ); TRACE_EVENT(tcp_rcvbuf_grow, TP_PROTO(struct sock *sk, int time), TP_ARGS(sk, time), TP_STRUCT__entry( __field(int, time) __field(__u32, rtt_us) __field(__u32, copied) __field(__u32, inq) __field(__u32, space) __field(__u32, ooo_space) __field(__u32, rcvbuf) __field(__u32, rcv_ssthresh) __field(__u32, window_clamp) __field(__u32, rcv_wnd) __field(__u8, scaling_ratio) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) __field(const void *, skaddr) __field(__u64, sock_cookie) ), TP_fast_assign( struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be32 *p32; __entry->time = time; __entry->rtt_us = tp->rcv_rtt_est.rtt_us >> 3; __entry->copied = tp->copied_seq - tp->rcvq_space.seq; __entry->inq = tp->rcv_nxt - tp->copied_seq; __entry->space = tp->rcvq_space.space; __entry->ooo_space = RB_EMPTY_ROOT(&tp->out_of_order_queue) ? 0 : TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt; __entry->rcvbuf = sk->sk_rcvbuf; __entry->rcv_ssthresh = tp->rcv_ssthresh; __entry->window_clamp = tp->window_clamp; __entry->rcv_wnd = tp->rcv_wnd; __entry->scaling_ratio = tp->scaling_ratio; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); __entry->skaddr = sk; __entry->sock_cookie = sock_gen_cookie(sk); ), TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u " "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u " "family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 " "saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx", __entry->time, __entry->rtt_us, __entry->copied, __entry->inq, __entry->space, __entry->ooo_space, __entry->scaling_ratio, __entry->rcvbuf, __entry->rcv_ssthresh, __entry->window_clamp, __entry->rcv_wnd, show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, __entry->skaddr, __entry->sock_cookie) ); TRACE_EVENT(tcp_retransmit_synack, TP_PROTO(const struct sock *sk, const struct request_sock *req), TP_ARGS(sk, req), TP_STRUCT__entry( __field(const void *, skaddr) __field(const void *, req) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) ), TP_fast_assign( struct inet_request_sock *ireq = inet_rsk(req); __be32 *p32; __entry->skaddr = sk; __entry->req = req; __entry->sport = ireq->ir_num; __entry->dport = ntohs(ireq->ir_rmt_port); __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = ireq->ir_loc_addr; p32 = (__be32 *) __entry->daddr; *p32 = ireq->ir_rmt_addr; TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr, ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr); ), TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6) ); TRACE_EVENT(tcp_sendmsg_locked, TP_PROTO(const struct sock *sk, const struct msghdr *msg, const struct sk_buff *skb, int size_goal), TP_ARGS(sk, msg, skb, size_goal), TP_STRUCT__entry( __field(const void *, skb_addr) __field(int, skb_len) __field(int, msg_left) __field(int, size_goal) ), TP_fast_assign( __entry->skb_addr = skb; __entry->skb_len = skb ? skb->len : 0; __entry->msg_left = msg_data_left(msg); __entry->size_goal = size_goal; ), TP_printk("skb_addr %p skb_len %d msg_left %d size_goal %d", __entry->skb_addr, __entry->skb_len, __entry->msg_left, __entry->size_goal)); DECLARE_TRACE(tcp_cwnd_reduction, TP_PROTO(const struct sock *sk, int newly_acked_sacked, int newly_lost, int flag), TP_ARGS(sk, newly_acked_sacked, newly_lost, flag) ); #include <trace/events/net_probe_common.h> TRACE_EVENT(tcp_probe, TP_PROTO(struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb), TP_STRUCT__entry( /* sockaddr_in6 is always bigger than sockaddr_in */ __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(__u32, mark) __field(__u16, data_len) __field(__u32, snd_nxt) __field(__u32, snd_una) __field(__u32, snd_cwnd) __field(__u32, ssthresh) __field(__u32, snd_wnd) __field(__u32, srtt) __field(__u32, rcv_wnd) __field(__u64, sock_cookie) __field(const void *, skbaddr) __field(const void *, skaddr) ), TP_fast_assign( const struct tcphdr *th = (const struct tcphdr *)skb->data; const struct inet_sock *inet = inet_sk(sk); const struct tcp_sock *tp = tcp_sk(sk); memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS(__entry, inet, sk); /* For filtering use */ __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->mark = skb->mark; __entry->family = sk->sk_family; __entry->data_len = skb->len - __tcp_hdrlen(th); __entry->snd_nxt = tp->snd_nxt; __entry->snd_una = tp->snd_una; __entry->snd_cwnd = tcp_snd_cwnd(tp); __entry->snd_wnd = tp->snd_wnd; __entry->rcv_wnd = tp->rcv_wnd; __entry->ssthresh = tcp_current_ssthresh(sk); __entry->srtt = tp->srtt_us >> 3; __entry->sock_cookie = sock_gen_cookie(sk); __entry->skbaddr = skb; __entry->skaddr = sk; ), TP_printk("family=%s src=%pISpc dest=%pISpc mark=%#x data_len=%d snd_nxt=%#x snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u rcv_wnd=%u sock_cookie=%llx skbaddr=%p skaddr=%p", show_family_name(__entry->family), __entry->saddr, __entry->daddr, __entry->mark, __entry->data_len, __entry->snd_nxt, __entry->snd_una, __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd, __entry->srtt, __entry->rcv_wnd, __entry->sock_cookie, __entry->skbaddr, __entry->skaddr) ); /* * tcp event with only skb */ DECLARE_EVENT_CLASS(tcp_event_skb, TP_PROTO(const struct sk_buff *skb), TP_ARGS(skb), TP_STRUCT__entry( __field(const void *, skbaddr) __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) ), TP_fast_assign( const struct tcphdr *th = (const struct tcphdr *)skb->data; __entry->skbaddr = skb; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr); ), TP_printk("skbaddr=%p src=%pISpc dest=%pISpc", __entry->skbaddr, __entry->saddr, __entry->daddr) ); DEFINE_EVENT(tcp_event_skb, tcp_bad_csum, TP_PROTO(const struct sk_buff *skb), TP_ARGS(skb) ); TRACE_EVENT(tcp_cong_state_set, TP_PROTO(struct sock *sk, const u8 ca_state), TP_ARGS(sk, ca_state), TP_STRUCT__entry( __field(const void *, skaddr) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __array(__u8, saddr, 4) __array(__u8, daddr, 4) __array(__u8, saddr_v6, 16) __array(__u8, daddr_v6, 16) __field(__u8, cong_state) ), TP_fast_assign( struct inet_sock *inet = inet_sk(sk); __be32 *p32; __entry->skaddr = sk; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->family = sk->sk_family; p32 = (__be32 *) __entry->saddr; *p32 = inet->inet_saddr; p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); __entry->cong_state = ca_state; ), TP_printk("family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c cong_state=%u", show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, __entry->saddr_v6, __entry->daddr_v6, __entry->cong_state) ); DECLARE_EVENT_CLASS(tcp_hash_event, TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb), TP_STRUCT__entry( __field(__u64, net_cookie) __field(const void *, skbaddr) __field(const void *, skaddr) __field(int, state) /* sockaddr_in6 is always bigger than sockaddr_in */ __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(int, l3index) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(bool, fin) __field(bool, syn) __field(bool, rst) __field(bool, psh) __field(bool, ack) ), TP_fast_assign( const struct tcphdr *th = (const struct tcphdr *)skb->data; __entry->net_cookie = sock_net(sk)->net_cookie; __entry->skbaddr = skb; __entry->skaddr = sk; __entry->state = sk->sk_state; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr); __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0; /* For filtering use */ __entry->sport = ntohs(th->source); __entry->dport = ntohs(th->dest); __entry->family = sk->sk_family; __entry->fin = th->fin; __entry->syn = th->syn; __entry->rst = th->rst; __entry->psh = th->psh; __entry->ack = th->ack; ), TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c]", __entry->net_cookie, show_tcp_state_name(__entry->state), show_family_name(__entry->family), __entry->saddr, __entry->daddr, __entry->l3index, __entry->fin ? 'F' : ' ', __entry->syn ? 'S' : ' ', __entry->rst ? 'R' : ' ', __entry->psh ? 'P' : ' ', __entry->ack ? '.' : ' ') ); DEFINE_EVENT(tcp_hash_event, tcp_hash_bad_header, TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb) ); DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_required, TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb) ); DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_unexpected, TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb) ); DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_mismatch, TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb) ); DEFINE_EVENT(tcp_hash_event, tcp_hash_ao_required, TP_PROTO(const struct sock *sk, const struct sk_buff *skb), TP_ARGS(sk, skb) ); DECLARE_EVENT_CLASS(tcp_ao_event, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, const __u8 keyid, const __u8 rnext, const __u8 maclen), TP_ARGS(sk, skb, keyid, rnext, maclen), TP_STRUCT__entry( __field(__u64, net_cookie) __field(const void *, skbaddr) __field(const void *, skaddr) __field(int, state) /* sockaddr_in6 is always bigger than sockaddr_in */ __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(int, l3index) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(bool, fin) __field(bool, syn) __field(bool, rst) __field(bool, psh) __field(bool, ack) __field(__u8, keyid) __field(__u8, rnext) __field(__u8, maclen) ), TP_fast_assign( const struct tcphdr *th = (const struct tcphdr *)skb->data; __entry->net_cookie = sock_net(sk)->net_cookie; __entry->skbaddr = skb; __entry->skaddr = sk; __entry->state = sk->sk_state; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr); __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0; /* For filtering use */ __entry->sport = ntohs(th->source); __entry->dport = ntohs(th->dest); __entry->family = sk->sk_family; __entry->fin = th->fin; __entry->syn = th->syn; __entry->rst = th->rst; __entry->psh = th->psh; __entry->ack = th->ack; __entry->keyid = keyid; __entry->rnext = rnext; __entry->maclen = maclen; ), TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c] keyid=%u rnext=%u maclen=%u", __entry->net_cookie, show_tcp_state_name(__entry->state), show_family_name(__entry->family), __entry->saddr, __entry->daddr, __entry->l3index, __entry->fin ? 'F' : ' ', __entry->syn ? 'S' : ' ', __entry->rst ? 'R' : ' ', __entry->psh ? 'P' : ' ', __entry->ack ? '.' : ' ', __entry->keyid, __entry->rnext, __entry->maclen) ); DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, const __u8 keyid, const __u8 rnext, const __u8 maclen), TP_ARGS(sk, skb, keyid, rnext, maclen) ); #ifdef CONFIG_TCP_AO DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, const __u8 keyid, const __u8 rnext, const __u8 maclen), TP_ARGS(sk, skb, keyid, rnext, maclen) ); DEFINE_EVENT(tcp_ao_event, tcp_ao_mismatch, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, const __u8 keyid, const __u8 rnext, const __u8 maclen), TP_ARGS(sk, skb, keyid, rnext, maclen) ); DEFINE_EVENT(tcp_ao_event, tcp_ao_key_not_found, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, const __u8 keyid, const __u8 rnext, const __u8 maclen), TP_ARGS(sk, skb, keyid, rnext, maclen) ); DEFINE_EVENT(tcp_ao_event, tcp_ao_rnext_request, TP_PROTO(const struct sock *sk, const struct sk_buff *skb, const __u8 keyid, const __u8 rnext, const __u8 maclen), TP_ARGS(sk, skb, keyid, rnext, maclen) ); DECLARE_EVENT_CLASS(tcp_ao_event_sk, TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext), TP_ARGS(sk, keyid, rnext), TP_STRUCT__entry( __field(__u64, net_cookie) __field(const void *, skaddr) __field(int, state) /* sockaddr_in6 is always bigger than sockaddr_in */ __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(__u8, keyid) __field(__u8, rnext) ), TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); __entry->net_cookie = sock_net(sk)->net_cookie; __entry->skaddr = sk; __entry->state = sk->sk_state; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS(__entry, inet, sk); /* For filtering use */ __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->family = sk->sk_family; __entry->keyid = keyid; __entry->rnext = rnext; ), TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc keyid=%u rnext=%u", __entry->net_cookie, show_tcp_state_name(__entry->state), show_family_name(__entry->family), __entry->saddr, __entry->daddr, __entry->keyid, __entry->rnext) ); DEFINE_EVENT(tcp_ao_event_sk, tcp_ao_synack_no_key, TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext), TP_ARGS(sk, keyid, rnext) ); DECLARE_EVENT_CLASS(tcp_ao_event_sne, TP_PROTO(const struct sock *sk, __u32 new_sne), TP_ARGS(sk, new_sne), TP_STRUCT__entry( __field(__u64, net_cookie) __field(const void *, skaddr) __field(int, state) /* sockaddr_in6 is always bigger than sockaddr_in */ __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(__u16, sport) __field(__u16, dport) __field(__u16, family) __field(__u32, new_sne) ), TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); __entry->net_cookie = sock_net(sk)->net_cookie; __entry->skaddr = sk; __entry->state = sk->sk_state; memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS(__entry, inet, sk); /* For filtering use */ __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->family = sk->sk_family; __entry->new_sne = new_sne; ), TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc sne=%u", __entry->net_cookie, show_tcp_state_name(__entry->state), show_family_name(__entry->family), __entry->saddr, __entry->daddr, __entry->new_sne) ); DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_snd_sne_update, TP_PROTO(const struct sock *sk, __u32 new_sne), TP_ARGS(sk, new_sne) ); DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update, TP_PROTO(const struct sock *sk, __u32 new_sne), TP_ARGS(sk, new_sne) ); #endif /* CONFIG_TCP_AO */ #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
10 10 10 10 10 2 8 8 8 8 8 4 8 8 8 9 9 9 9 9 9 10 10 10 5 3 4 9 9 9 10 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 // SPDX-License-Identifier: GPL-2.0-or-later /* * IPV6 GSO/GRO offload support * Linux INET6 implementation * * UDPv6 GSO support */ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/indirect_call_wrapper.h> #include <net/protocol.h> #include <net/ipv6.h> #include <net/udp.h> #include <net/ip6_checksum.h> #include "ip6_offload.h" #include <net/gro.h> #include <net/gso.h> static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; u8 *packet_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); __wsum csum; int tnl_hlen; int err; if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) segs = skb_udp_tunnel_segment(skb, features, true); else { const struct ipv6hdr *ipv6h; struct udphdr *uh; if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) goto out; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) return __udp_gso_segment(skb, features, true); mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ uh = udp_hdr(skb); ipv6h = ipv6_hdr(skb); uh->check = 0; csum = skb_checksum(skb, 0, skb->len, 0); uh->check = udp_v6_check(skb->len, &ipv6h->saddr, &ipv6h->daddr, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_UNNECESSARY; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in * software prior to segmenting the frame. */ if (!skb->encap_hdr_csum) features |= NETIF_F_HW_CSUM; /* Check if there is enough headroom to insert fragment header. */ tnl_hlen = skb_tnl_header_len(skb); if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) goto out; } /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. */ err = ip6_find_1stfragopt(skb, &prevhdr); if (err < 0) return ERR_PTR(err); unfrag_ip6hlen = err; nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + unfrag_ip6hlen + tnl_hlen; packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb); /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() */ segs = skb_segment(skb, features); } out: return segs; } static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { const struct ipv6hdr *iph = skb_gro_network_header(skb); struct net *net = dev_net_rcu(skb->dev); struct sock *sk; int iif, sdif; sk = udp_tunnel_sk(net, true); if (sk && dport == htons(sk->sk_num)) return sk; inet6_get_iif_sdif(skb, &iif, &sdif); return __udp6_lib_lookup(net, &iph->saddr, sport, &iph->daddr, dport, iif, sdif, net->ipv4.udp_table, NULL); } INDIRECT_CALLABLE_SCOPE struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) { struct udphdr *uh = udp_gro_udphdr(skb); struct sock *sk = NULL; struct sk_buff *pp; if (unlikely(!uh)) goto flush; /* Don't bother verifying checksum if we're going to flush anyway. */ if (NAPI_GRO_CB(skb)->flush) goto skip; if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, ip6_gro_compute_pseudo)) goto flush; else if (uh->check) skb_gro_checksum_try_convert(skb, IPPROTO_UDP, ip6_gro_compute_pseudo); skip: if (static_branch_unlikely(&udpv6_encap_needed_key)) sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest); pp = udp_gro_receive(head, skb, uh, sk); return pp; flush: NAPI_GRO_CB(skb)->flush = 1; return NULL; } INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) { const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); /* do fraglist only if there is no outer UDP encap (or we already processed it) */ if (NAPI_GRO_CB(skb)->is_flist && !NAPI_GRO_CB(skb)->encap_mark) { uh->len = htons(skb->len - nhoff); skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; __skb_incr_checksum_unnecessary(skb); return 0; } if (uh->check) uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, &ipv6h->daddr, 0); return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb); } int __init udpv6_offload_init(void) { net_hotdata.udpv6_offload = (struct net_offload) { .callbacks = { .gso_segment = udp6_ufo_fragment, .gro_receive = udp6_gro_receive, .gro_complete = udp6_gro_complete, }, }; return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP); } int udpv6_offload_exit(void) { return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP); }
1 1 24 24 24 30 30 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 - 2021, 2023 - 2024 Intel Corporation */ #include <net/cfg80211.h> #include "core.h" #include "nl80211.h" #include "rdev-ops.h" static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, struct nlattr *ftmreq, struct cfg80211_pmsr_request_peer *out, struct genl_info *info) { const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa; struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1]; u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */ /* validate existing data */ if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) { NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth"); return -EINVAL; } /* no validation needed - was already done via nested policy */ nla_parse_nested_deprecated(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq, NULL, NULL); if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]); /* set up values - struct is 0-initialized */ out->ftm.requested = true; switch (out->chandef.chan->band) { case NL80211_BAND_60GHZ: /* optional */ break; default: if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) { NL_SET_ERR_MSG(info->extack, "FTM: must specify preamble"); return -EINVAL; } } if (!(capa->ftm.preambles & BIT(preamble))) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE], "FTM: invalid preamble"); return -EINVAL; } out->ftm.preamble = preamble; out->ftm.burst_period = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]) out->ftm.burst_period = nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]); out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP]; if (out->ftm.asap && !capa->ftm.asap) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP], "FTM: ASAP mode not supported"); return -EINVAL; } if (!out->ftm.asap && !capa->ftm.non_asap) { NL_SET_ERR_MSG(info->extack, "FTM: non-ASAP mode not supported"); return -EINVAL; } out->ftm.num_bursts_exp = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]) out->ftm.num_bursts_exp = nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]); if (capa->ftm.max_bursts_exponent >= 0 && out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP], "FTM: max NUM_BURSTS_EXP must be set lower than the device limit"); return -EINVAL; } out->ftm.burst_duration = 15; if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]) out->ftm.burst_duration = nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]); out->ftm.ftms_per_burst = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]) out->ftm.ftms_per_burst = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]); if (capa->ftm.max_ftms_per_burst && (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst || out->ftm.ftms_per_burst == 0)) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST], "FTM: FTMs per burst must be set lower than the device limit but non-zero"); return -EINVAL; } out->ftm.ftmr_retries = 3; if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]) out->ftm.ftmr_retries = nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]); out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI]; if (out->ftm.request_lci && !capa->ftm.request_lci) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI], "FTM: LCI request not supported"); } out->ftm.request_civicloc = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC]; if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC], "FTM: civic location request not supported"); } out->ftm.trigger_based = !!tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED]; if (out->ftm.trigger_based && !capa->ftm.trigger_based) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED], "FTM: trigger based ranging is not supported"); return -EINVAL; } out->ftm.non_trigger_based = !!tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED]; if (out->ftm.non_trigger_based && !capa->ftm.non_trigger_based) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED], "FTM: trigger based ranging is not supported"); return -EINVAL; } if (out->ftm.trigger_based && out->ftm.non_trigger_based) { NL_SET_ERR_MSG(info->extack, "FTM: can't set both trigger based and non trigger based"); return -EINVAL; } if (out->ftm.ftms_per_burst > 31 && !out->ftm.non_trigger_based && !out->ftm.trigger_based) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST], "FTM: FTMs per burst must be set lower than 31"); return -ERANGE; } if ((out->ftm.trigger_based || out->ftm.non_trigger_based) && out->ftm.preamble != NL80211_PREAMBLE_HE) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE], "FTM: non EDCA based ranging must use HE preamble"); return -EINVAL; } out->ftm.lmr_feedback = !!tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK]; if (!out->ftm.trigger_based && !out->ftm.non_trigger_based && out->ftm.lmr_feedback) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK], "FTM: LMR feedback set for EDCA based ranging"); return -EINVAL; } if (tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]) { if (!out->ftm.non_trigger_based && !out->ftm.trigger_based) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR], "FTM: BSS color set for EDCA based ranging"); return -EINVAL; } out->ftm.bss_color = nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR]); } return 0; } static int pmsr_parse_peer(struct cfg80211_registered_device *rdev, struct nlattr *peer, struct cfg80211_pmsr_request_peer *out, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1]; struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1]; struct nlattr *treq; int err, rem; /* no validation needed - was already done via nested policy */ nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, NULL, NULL); if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] || !tb[NL80211_PMSR_PEER_ATTR_CHAN] || !tb[NL80211_PMSR_PEER_ATTR_REQ]) { NL_SET_ERR_MSG_ATTR(info->extack, peer, "insufficient peer data"); return -EINVAL; } memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN); /* reuse info->attrs */ memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1)); err = nla_parse_nested_deprecated(info->attrs, NL80211_ATTR_MAX, tb[NL80211_PMSR_PEER_ATTR_CHAN], NULL, info->extack); if (err) return err; err = nl80211_parse_chandef(rdev, info, &out->chandef); if (err) return err; /* no validation needed - was already done via nested policy */ nla_parse_nested_deprecated(req, NL80211_PMSR_REQ_ATTR_MAX, tb[NL80211_PMSR_PEER_ATTR_REQ], NULL, NULL); if (!req[NL80211_PMSR_REQ_ATTR_DATA]) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_PEER_ATTR_REQ], "missing request type/data"); return -EINVAL; } if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF]) out->report_ap_tsf = true; if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) { NL_SET_ERR_MSG_ATTR(info->extack, req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF], "reporting AP TSF is not supported"); return -EINVAL; } nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) { switch (nla_type(treq)) { case NL80211_PMSR_TYPE_FTM: err = pmsr_parse_ftm(rdev, treq, out, info); break; default: NL_SET_ERR_MSG_ATTR(info->extack, treq, "unsupported measurement type"); err = -EINVAL; } } if (err) return err; return 0; } int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) { struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; struct cfg80211_pmsr_request *req; struct nlattr *peers, *peer; int count, rem, err, idx; if (!rdev->wiphy.pmsr_capa) return -EOPNOTSUPP; if (!reqattr) return -EINVAL; peers = nla_find(nla_data(reqattr), nla_len(reqattr), NL80211_PMSR_ATTR_PEERS); if (!peers) return -EINVAL; count = 0; nla_for_each_nested(peer, peers, rem) { count++; if (count > rdev->wiphy.pmsr_capa->max_peers) { NL_SET_ERR_MSG_ATTR(info->extack, peer, "Too many peers used"); return -EINVAL; } } req = kzalloc(struct_size(req, peers, count), GFP_KERNEL); if (!req) return -ENOMEM; req->n_peers = count; if (info->attrs[NL80211_ATTR_TIMEOUT]) req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]); if (info->attrs[NL80211_ATTR_MAC]) { if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) { NL_SET_ERR_MSG_ATTR(info->extack, info->attrs[NL80211_ATTR_MAC], "device cannot randomize MAC address"); err = -EINVAL; goto out_err; } err = nl80211_parse_random_mac(info->attrs, req->mac_addr, req->mac_addr_mask); if (err) goto out_err; } else { memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); eth_broadcast_addr(req->mac_addr_mask); } idx = 0; nla_for_each_nested(peer, peers, rem) { /* NB: this reuses info->attrs, but we no longer need it */ err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info); if (err) goto out_err; idx++; } req->cookie = cfg80211_assign_cookie(rdev); req->nl_portid = info->snd_portid; err = rdev_start_pmsr(rdev, wdev, req); if (err) goto out_err; list_add_tail(&req->list, &wdev->pmsr_list); nl_set_extack_cookie_u64(info->extack, req->cookie); return 0; out_err: kfree(req); return err; } void cfg80211_pmsr_complete(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL; struct sk_buff *msg; void *hdr; trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) goto free_request; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_COMPLETE); if (!hdr) goto free_msg; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto free_msg; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie, NL80211_ATTR_PAD)) goto free_msg; genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid); goto free_request; free_msg: nlmsg_free(msg); free_request: spin_lock_bh(&wdev->pmsr_lock); /* * cfg80211_pmsr_process_abort() may have already moved this request * to the free list, and will free it later. In this case, don't free * it here. */ list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) { if (tmp == req) { list_del(&req->list); to_free = req; break; } } spin_unlock_bh(&wdev->pmsr_lock); kfree(to_free); } EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete); static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg, struct cfg80211_pmsr_result *res) { if (res->status == NL80211_PMSR_STATUS_FAILURE) { if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON, res->ftm.failure_reason)) goto error; if (res->ftm.failure_reason == NL80211_PMSR_FTM_FAILURE_PEER_BUSY && res->ftm.busy_retry_time && nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME, res->ftm.busy_retry_time)) goto error; return 0; } #define PUT(tp, attr, val) \ do { \ if (nla_put_##tp(msg, \ NL80211_PMSR_FTM_RESP_ATTR_##attr, \ res->ftm.val)) \ goto error; \ } while (0) #define PUTOPT(tp, attr, val) \ do { \ if (res->ftm.val##_valid) \ PUT(tp, attr, val); \ } while (0) #define PUT_U64(attr, val) \ do { \ if (nla_put_u64_64bit(msg, \ NL80211_PMSR_FTM_RESP_ATTR_##attr,\ res->ftm.val, \ NL80211_PMSR_FTM_RESP_ATTR_PAD)) \ goto error; \ } while (0) #define PUTOPT_U64(attr, val) \ do { \ if (res->ftm.val##_valid) \ PUT_U64(attr, val); \ } while (0) if (res->ftm.burst_index >= 0) PUT(u32, BURST_INDEX, burst_index); PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts); PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes); PUT(u8, NUM_BURSTS_EXP, num_bursts_exp); PUT(u8, BURST_DURATION, burst_duration); PUT(u8, FTMS_PER_BURST, ftms_per_burst); PUTOPT(s32, RSSI_AVG, rssi_avg); PUTOPT(s32, RSSI_SPREAD, rssi_spread); if (res->ftm.tx_rate_valid && !nl80211_put_sta_rate(msg, &res->ftm.tx_rate, NL80211_PMSR_FTM_RESP_ATTR_TX_RATE)) goto error; if (res->ftm.rx_rate_valid && !nl80211_put_sta_rate(msg, &res->ftm.rx_rate, NL80211_PMSR_FTM_RESP_ATTR_RX_RATE)) goto error; PUTOPT_U64(RTT_AVG, rtt_avg); PUTOPT_U64(RTT_VARIANCE, rtt_variance); PUTOPT_U64(RTT_SPREAD, rtt_spread); PUTOPT_U64(DIST_AVG, dist_avg); PUTOPT_U64(DIST_VARIANCE, dist_variance); PUTOPT_U64(DIST_SPREAD, dist_spread); if (res->ftm.lci && res->ftm.lci_len && nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI, res->ftm.lci_len, res->ftm.lci)) goto error; if (res->ftm.civicloc && res->ftm.civicloc_len && nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC, res->ftm.civicloc_len, res->ftm.civicloc)) goto error; #undef PUT #undef PUTOPT #undef PUT_U64 #undef PUTOPT_U64 return 0; error: return -ENOSPC; } static int nl80211_pmsr_send_result(struct sk_buff *msg, struct cfg80211_pmsr_result *res) { struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata; pmsr = nla_nest_start_noflag(msg, NL80211_ATTR_PEER_MEASUREMENTS); if (!pmsr) goto error; peers = nla_nest_start_noflag(msg, NL80211_PMSR_ATTR_PEERS); if (!peers) goto error; peer = nla_nest_start_noflag(msg, 1); if (!peer) goto error; if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr)) goto error; resp = nla_nest_start_noflag(msg, NL80211_PMSR_PEER_ATTR_RESP); if (!resp) goto error; if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) || nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME, res->host_time, NL80211_PMSR_RESP_ATTR_PAD)) goto error; if (res->ap_tsf_valid && nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF, res->ap_tsf, NL80211_PMSR_RESP_ATTR_PAD)) goto error; if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL)) goto error; data = nla_nest_start_noflag(msg, NL80211_PMSR_RESP_ATTR_DATA); if (!data) goto error; typedata = nla_nest_start_noflag(msg, res->type); if (!typedata) goto error; switch (res->type) { case NL80211_PMSR_TYPE_FTM: if (nl80211_pmsr_send_ftm_res(msg, res)) goto error; break; default: WARN_ON(1); } nla_nest_end(msg, typedata); nla_nest_end(msg, data); nla_nest_end(msg, resp); nla_nest_end(msg, peer); nla_nest_end(msg, peers); nla_nest_end(msg, pmsr); return 0; error: return -ENOSPC; } void cfg80211_pmsr_report(struct wireless_dev *wdev, struct cfg80211_pmsr_request *req, struct cfg80211_pmsr_result *result, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct sk_buff *msg; void *hdr; int err; trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie, result->addr); /* * Currently, only variable items are LCI and civic location, * both of which are reasonably short so we don't need to * worry about them here for the allocation. */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT); if (!hdr) goto free; if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), NL80211_ATTR_PAD)) goto free; if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie, NL80211_ATTR_PAD)) goto free; err = nl80211_pmsr_send_result(msg, result); if (err) { pr_err_ratelimited("peer measurement result: message didn't fit!"); goto free; } genlmsg_end(msg, hdr); genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid); return; free: nlmsg_free(msg); } EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); struct cfg80211_pmsr_request *req, *tmp; LIST_HEAD(free_list); lockdep_assert_wiphy(wdev->wiphy); spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { if (req->nl_portid) continue; list_move_tail(&req->list, &free_list); } spin_unlock_bh(&wdev->pmsr_lock); list_for_each_entry_safe(req, tmp, &free_list, list) { rdev_abort_pmsr(rdev, wdev, req); kfree(req); } } void cfg80211_pmsr_free_wk(struct work_struct *work) { struct wireless_dev *wdev = container_of(work, struct wireless_dev, pmsr_free_wk); guard(wiphy)(wdev->wiphy); cfg80211_pmsr_process_abort(wdev); } void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) { struct cfg80211_pmsr_request *req; bool found = false; spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry(req, &wdev->pmsr_list, list) { found = true; req->nl_portid = 0; } spin_unlock_bh(&wdev->pmsr_lock); if (found) cfg80211_pmsr_process_abort(wdev); WARN_ON(!list_empty(&wdev->pmsr_list)); } void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid) { struct cfg80211_pmsr_request *req; spin_lock_bh(&wdev->pmsr_lock); list_for_each_entry(req, &wdev->pmsr_list, list) { if (req->nl_portid == portid) { req->nl_portid = 0; schedule_work(&wdev->pmsr_free_wk); } } spin_unlock_bh(&wdev->pmsr_lock); }
4 4 4 4 4 4 4 1 1 1 1 1 1 1 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 // SPDX-License-Identifier: GPL-2.0 /* dvb-usb-urb.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the * USB and URB stuff. */ #include "dvb-usb-common.h" int dvb_usb_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen, int delay_ms) { int actlen = 0, ret = -ENOMEM; if (!d || wbuf == NULL || wlen == 0) return -EINVAL; if (d->props.generic_bulk_ctrl_endpoint == 0) { err("endpoint for generic control not specified."); return -EINVAL; } if ((ret = mutex_lock_interruptible(&d->usb_mutex))) return ret; deb_xfer(">>> "); debug_dump(wbuf,wlen,deb_xfer); ret = usb_bulk_msg(d->udev,usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint), wbuf,wlen,&actlen, 2000); if (ret) err("bulk message failed: %d (%d/%d)",ret,wlen,actlen); else ret = actlen != wlen ? -1 : 0; /* an answer is expected, and no error before */ if (!ret && rbuf && rlen) { if (delay_ms) msleep(delay_ms); ret = usb_bulk_msg(d->udev,usb_rcvbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint_response ? d->props.generic_bulk_ctrl_endpoint_response : d->props.generic_bulk_ctrl_endpoint),rbuf,rlen,&actlen, 2000); if (ret) err("recv bulk message failed: %d",ret); else { deb_xfer("<<< "); debug_dump(rbuf,actlen,deb_xfer); } } mutex_unlock(&d->usb_mutex); return ret; } EXPORT_SYMBOL(dvb_usb_generic_rw); int dvb_usb_generic_write(struct dvb_usb_device *d, u8 *buf, u16 len) { return dvb_usb_generic_rw(d,buf,len,NULL,0,0); } EXPORT_SYMBOL(dvb_usb_generic_write); static void dvb_usb_data_complete(struct usb_data_stream *stream, u8 *buffer, size_t length) { struct dvb_usb_adapter *adap = stream->user_priv; if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB) dvb_dmx_swfilter(&adap->demux, buffer, length); } static void dvb_usb_data_complete_204(struct usb_data_stream *stream, u8 *buffer, size_t length) { struct dvb_usb_adapter *adap = stream->user_priv; if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB) dvb_dmx_swfilter_204(&adap->demux, buffer, length); } static void dvb_usb_data_complete_raw(struct usb_data_stream *stream, u8 *buffer, size_t length) { struct dvb_usb_adapter *adap = stream->user_priv; if (adap->feedcount > 0 && adap->state & DVB_USB_ADAP_STATE_DVB) dvb_dmx_swfilter_raw(&adap->demux, buffer, length); } int dvb_usb_adapter_stream_init(struct dvb_usb_adapter *adap) { int i, ret = 0; for (i = 0; i < adap->props.num_frontends; i++) { adap->fe_adap[i].stream.udev = adap->dev->udev; if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_204_BYTE_TS) adap->fe_adap[i].stream.complete = dvb_usb_data_complete_204; else if (adap->props.fe[i].caps & DVB_USB_ADAP_RECEIVES_RAW_PAYLOAD) adap->fe_adap[i].stream.complete = dvb_usb_data_complete_raw; else adap->fe_adap[i].stream.complete = dvb_usb_data_complete; adap->fe_adap[i].stream.user_priv = adap; ret = usb_urb_init(&adap->fe_adap[i].stream, &adap->props.fe[i].stream); if (ret < 0) break; } return ret; } int dvb_usb_adapter_stream_exit(struct dvb_usb_adapter *adap) { int i; for (i = 0; i < adap->props.num_frontends; i++) usb_urb_exit(&adap->fe_adap[i].stream); return 0; }
5 243 264 75 62 62 61 60 28 61 1 34 4 61 22 294 34 16 34 7 34 7 31 2 300 4 4 5 34 58 34 2 245 244 38 37 240 56 50 133 197 73 93 5 205 5 207 299 58 4 58 54 54 299 198 198 197 5 240 58 4 300 58 4 58 258 6 1 301 284 298 200 199 5 197 299 58 58 4 299 21 6 21 11 21 3 20 20 21 3 301 54 284 272 240 253 256 254 21 9 3 293 86 291 87 291 292 34 1 34 34 286 288 287 23 278 280 192 224 284 280 284 280 54 283 21 284 284 284 15 193 225 277 287 90 25 1 1 1 25 404 405 404 403 403 361 363 361 401 407 402 7 31 2 9 34 2 33 33 33 34 33 34 33 34 18 16 123 122 48 121 123 123 123 123 121 110 76 61 79 18 58 20 16 20 3 20 20 13 13 20 18 18 10 3 3 15 7 16 10 10 10 8 10 2 20 20 247 248 245 249 247 247 202 247 202 199 8 195 12 2 1 11 11 204 14 14 14 14 14 5 1 5 5 5 6 14 14 5 14 14 14 227 228 227 211 226 205 167 164 166 257 13 256 227 177 226 226 223 227 226 227 226 13 227 2 2 2 2 229 141 94 232 140 141 229 229 231 229 230 158 231 229 231 9 231 199 228 6 229 2 227 200 229 221 229 229 227 228 230 229 229 230 230 229 230 229 52 228 1 168 67 38 66 66 67 66 66 1 67 68 68 68 4 1 68 123 123 112 111 57 110 73 4 75 75 9 6 28 15 3 15 3 3 295 88 88 88 87 297 88 3 88 296 87 167 168 126 88 88 88 15 88 87 47 31 8 47 16 46 45 47 47 9 4 4 47 40 39 40 39 15 40 10 39 40 255 255 254 198 5 255 5 3 299 297 255 299 298 254 96 209 73 73 73 73 9 9 6 5 202 200 90 74 73 68 68 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 1 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 177 171 41 39 1 37 166 204 206 167 168 39 207 205 23 4 23 4 205 204 173 173 98 63 63 96 199 199 198 174 27 18 9 163 58 164 164 155 164 155 34 34 165 25 14 14 11 14 166 155 166 79 30 84 79 80 79 77 80 179 98 78 30 207 179 206 208 208 84 201 113 199 200 199 165 80 166 79 79 80 205 34 1 41 5 209 208 209 106 207 206 204 4 4 4 4 4 3 1 2 2 3 3 3 3 4 1 4 4 38 38 38 38 38 38 38 38 10 8 8 8 13 10 10 10 5 1 1 5 10 10 10 10 10 10 8 1 8 8 8 8 8 10 13 20 20 20 19 9 20 20 48 48 48 48 48 48 48 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 // SPDX-License-Identifier: GPL-2.0-or-later /* * VMA-specific functions. */ #include "vma_internal.h" #include "vma.h" struct mmap_state { struct mm_struct *mm; struct vma_iterator *vmi; unsigned long addr; unsigned long end; pgoff_t pgoff; unsigned long pglen; vm_flags_t vm_flags; struct file *file; pgprot_t page_prot; /* User-defined fields, perhaps updated by .mmap_prepare(). */ const struct vm_operations_struct *vm_ops; void *vm_private_data; unsigned long charged; struct vm_area_struct *prev; struct vm_area_struct *next; /* Unmapping state. */ struct vma_munmap_struct vms; struct ma_state mas_detach; struct maple_tree mt_detach; /* Determine if we can check KSM flags early in mmap() logic. */ bool check_ksm_early :1; /* If we map new, hold the file rmap lock on mapping. */ bool hold_file_rmap_lock :1; }; #define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, vm_flags_, file_) \ struct mmap_state name = { \ .mm = mm_, \ .vmi = vmi_, \ .addr = addr_, \ .end = (addr_) + (len_), \ .pgoff = pgoff_, \ .pglen = PHYS_PFN(len_), \ .vm_flags = vm_flags_, \ .file = file_, \ .page_prot = vm_get_page_prot(vm_flags_), \ } #define VMG_MMAP_STATE(name, map_, vma_) \ struct vma_merge_struct name = { \ .mm = (map_)->mm, \ .vmi = (map_)->vmi, \ .start = (map_)->addr, \ .end = (map_)->end, \ .vm_flags = (map_)->vm_flags, \ .pgoff = (map_)->pgoff, \ .file = (map_)->file, \ .prev = (map_)->prev, \ .middle = vma_, \ .next = (vma_) ? NULL : (map_)->next, \ .state = VMA_MERGE_START, \ } /* * If, at any point, the VMA had unCoW'd mappings from parents, it will maintain * more than one anon_vma_chain connecting it to more than one anon_vma. A merge * would mean a wider range of folios sharing the root anon_vma lock, and thus * potential lock contention, we do not wish to encourage merging such that this * scales to a problem. */ static bool vma_had_uncowed_parents(struct vm_area_struct *vma) { /* * The list_is_singular() test is to avoid merging VMA cloned from * parents. This can improve scalability caused by anon_vma lock. */ return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain); } static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next) { struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; if (!mpol_equal(vmg->policy, vma_policy(vma))) return false; if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_IGNORE_MERGE) return false; if (vma->vm_file != vmg->file) return false; if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) return false; if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) return false; return true; } static bool is_mergeable_anon_vma(struct vma_merge_struct *vmg, bool merge_next) { struct vm_area_struct *tgt = merge_next ? vmg->next : vmg->prev; struct vm_area_struct *src = vmg->middle; /* existing merge case. */ struct anon_vma *tgt_anon = tgt->anon_vma; struct anon_vma *src_anon = vmg->anon_vma; /* * We _can_ have !src, vmg->anon_vma via copy_vma(). In this instance we * will remove the existing VMA's anon_vma's so there's no scalability * concerns. */ VM_WARN_ON(src && src_anon != src->anon_vma); /* Case 1 - we will dup_anon_vma() from src into tgt. */ if (!tgt_anon && src_anon) return !vma_had_uncowed_parents(src); /* Case 2 - we will simply use tgt's anon_vma. */ if (tgt_anon && !src_anon) return !vma_had_uncowed_parents(tgt); /* Case 3 - the anon_vma's are already shared. */ return src_anon == tgt_anon; } /* * init_multi_vma_prep() - Initializer for struct vma_prepare * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked * @vmg: The merge state that will be used to determine adjustment and VMA * removal. */ static void init_multi_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma, struct vma_merge_struct *vmg) { struct vm_area_struct *adjust; struct vm_area_struct **remove = &vp->remove; memset(vp, 0, sizeof(struct vma_prepare)); vp->vma = vma; vp->anon_vma = vma->anon_vma; if (vmg && vmg->__remove_middle) { *remove = vmg->middle; remove = &vp->remove2; } if (vmg && vmg->__remove_next) *remove = vmg->next; if (vmg && vmg->__adjust_middle_start) adjust = vmg->middle; else if (vmg && vmg->__adjust_next_start) adjust = vmg->next; else adjust = NULL; vp->adj_next = adjust; if (!vp->anon_vma && adjust) vp->anon_vma = adjust->anon_vma; VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma && vp->anon_vma != adjust->anon_vma); vp->file = vma->vm_file; if (vp->file) vp->mapping = vma->vm_file->f_mapping; if (vmg && vmg->skip_vma_uprobe) vp->skip_vma_uprobe = true; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * in front of (at a lower virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We don't check here for the merged mmap wrapping around the end of pagecache * indices (16TB on ia32) because do_mmap() does not permit mmap's which * wrap, nor mmaps which cover the final page at index -1UL. * * We assume the vma may be removed as part of the merge. */ static bool can_vma_merge_before(struct vma_merge_struct *vmg) { pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); if (is_mergeable_vma(vmg, /* merge_next = */ true) && is_mergeable_anon_vma(vmg, /* merge_next = */ true)) { if (vmg->next->vm_pgoff == vmg->pgoff + pglen) return true; } return false; } /* * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) * beyond (at a higher virtual address and file offset than) the vma. * * We cannot merge two vmas if they have differently assigned (non-NULL) * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. * * We assume that vma is not removed as part of the merge. */ static bool can_vma_merge_after(struct vma_merge_struct *vmg) { if (is_mergeable_vma(vmg, /* merge_next = */ false) && is_mergeable_anon_vma(vmg, /* merge_next = */ false)) { if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff) return true; } return false; } static void __vma_link_file(struct vm_area_struct *vma, struct address_space *mapping) { if (vma_is_shared_maywrite(vma)) mapping_allow_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * Requires inode->i_mapping->i_mmap_rwsem */ static void __remove_shared_vm_struct(struct vm_area_struct *vma, struct address_space *mapping) { if (vma_is_shared_maywrite(vma)) mapping_unmap_writable(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); } /* * vma has some anon_vma assigned, and is already inserted on that * anon_vma's interval trees. * * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the * vma must be removed from the anon_vma's interval trees using * anon_vma_interval_tree_pre_update_vma(). * * After the update, the vma will be reinserted using * anon_vma_interval_tree_post_update_vma(). * * The entire update must be protected by exclusive mmap_lock and by * the root anon_vma's mutex. */ static void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } static void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); } /* * vma_prepare() - Helper function for handling locking VMAs prior to altering * @vp: The initialized vma_prepare struct */ static void vma_prepare(struct vma_prepare *vp) { if (vp->file) { uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); if (vp->adj_next) uprobe_munmap(vp->adj_next, vp->adj_next->vm_start, vp->adj_next->vm_end); i_mmap_lock_write(vp->mapping); if (vp->insert && vp->insert->vm_file) { /* * Put into interval tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page * throughout; but we cannot insert into address * space until vma start or end is updated. */ __vma_link_file(vp->insert, vp->insert->vm_file->f_mapping); } } if (vp->anon_vma) { anon_vma_lock_write(vp->anon_vma); anon_vma_interval_tree_pre_update_vma(vp->vma); if (vp->adj_next) anon_vma_interval_tree_pre_update_vma(vp->adj_next); } if (vp->file) { flush_dcache_mmap_lock(vp->mapping); vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); if (vp->adj_next) vma_interval_tree_remove(vp->adj_next, &vp->mapping->i_mmap); } } /* * vma_complete- Helper function for handling the unlocking after altering VMAs, * or for inserting a VMA. * * @vp: The vma_prepare struct * @vmi: The vma iterator * @mm: The mm_struct */ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, struct mm_struct *mm) { if (vp->file) { if (vp->adj_next) vma_interval_tree_insert(vp->adj_next, &vp->mapping->i_mmap); vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); flush_dcache_mmap_unlock(vp->mapping); } if (vp->remove && vp->file) { __remove_shared_vm_struct(vp->remove, vp->mapping); if (vp->remove2) __remove_shared_vm_struct(vp->remove2, vp->mapping); } else if (vp->insert) { /* * split_vma has split insert from vma, and needs * us to insert it before dropping the locks * (it may either follow vma or precede it). */ vma_iter_store_new(vmi, vp->insert); mm->map_count++; } if (vp->anon_vma) { anon_vma_interval_tree_post_update_vma(vp->vma); if (vp->adj_next) anon_vma_interval_tree_post_update_vma(vp->adj_next); anon_vma_unlock_write(vp->anon_vma); } if (vp->file) { i_mmap_unlock_write(vp->mapping); if (!vp->skip_vma_uprobe) { uprobe_mmap(vp->vma); if (vp->adj_next) uprobe_mmap(vp->adj_next); } } if (vp->remove) { again: vma_mark_detached(vp->remove); if (vp->file) { uprobe_munmap(vp->remove, vp->remove->vm_start, vp->remove->vm_end); fput(vp->file); } if (vp->remove->anon_vma) anon_vma_merge(vp->vma, vp->remove); mm->map_count--; mpol_put(vma_policy(vp->remove)); if (!vp->remove2) WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); vm_area_free(vp->remove); /* * In mprotect's case 6 (see comments on vma_merge), * we are removing both mid and next vmas */ if (vp->remove2) { vp->remove = vp->remove2; vp->remove2 = NULL; goto again; } } if (vp->insert && vp->file) uprobe_mmap(vp->insert); } /* * init_vma_prep() - Initializer wrapper for vma_prepare struct * @vp: The vma_prepare struct * @vma: The vma that will be altered once locked */ static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma) { init_multi_vma_prep(vp, vma, NULL); } /* * Can the proposed VMA be merged with the left (previous) VMA taking into * account the start position of the proposed range. */ static bool can_vma_merge_left(struct vma_merge_struct *vmg) { return vmg->prev && vmg->prev->vm_end == vmg->start && can_vma_merge_after(vmg); } /* * Can the proposed VMA be merged with the right (next) VMA taking into * account the end position of the proposed range. * * In addition, if we can merge with the left VMA, ensure that left and right * anon_vma's are also compatible. */ static bool can_vma_merge_right(struct vma_merge_struct *vmg, bool can_merge_left) { struct vm_area_struct *next = vmg->next; struct vm_area_struct *prev; if (!next || vmg->end != next->vm_start || !can_vma_merge_before(vmg)) return false; if (!can_merge_left) return true; /* * If we can merge with prev (left) and next (right), indicating that * each VMA's anon_vma is compatible with the proposed anon_vma, this * does not mean prev and next are compatible with EACH OTHER. * * We therefore check this in addition to mergeability to either side. */ prev = vmg->prev; return !prev->anon_vma || !next->anon_vma || prev->anon_vma == next->anon_vma; } /* * Close a vm structure and free it. */ void remove_vma(struct vm_area_struct *vma) { might_sleep(); vma_close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); vm_area_free(vma); } /* * Get rid of page table information in the indicated region. * * Called with the mm semaphore held. */ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; tlb_gather_mmu(&tlb, mm); update_hiwater_rss(mm); unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end); mas_set(mas, vma->vm_end); free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : USER_PGTABLES_CEILING, /* mm_wr_locked = */ true); tlb_finish_mmu(&tlb); } /* * __split_vma() bypasses sysctl_max_map_count checking. We use this where it * has already been checked or doesn't make sense to fail. * VMA Iterator will point to the original VMA. */ static __must_check int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) { struct vma_prepare vp; struct vm_area_struct *new; int err; WARN_ON(vma->vm_start >= addr); WARN_ON(vma->vm_end <= addr); if (vma->vm_ops && vma->vm_ops->may_split) { err = vma->vm_ops->may_split(vma, addr); if (err) return err; } new = vm_area_dup(vma); if (!new) return -ENOMEM; if (new_below) { new->vm_end = addr; } else { new->vm_start = addr; new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } err = -ENOMEM; vma_iter_config(vmi, new->vm_start, new->vm_end); if (vma_iter_prealloc(vmi, new)) goto out_free_vma; err = vma_dup_policy(vma, new); if (err) goto out_free_vmi; err = anon_vma_clone(new, vma); if (err) goto out_free_mpol; if (new->vm_file) get_file(new->vm_file); if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); vma_start_write(vma); vma_start_write(new); init_vma_prep(&vp, vma); vp.insert = new; vma_prepare(&vp); /* * Get rid of huge pages and shared page tables straddling the split * boundary. */ vma_adjust_trans_huge(vma, vma->vm_start, addr, NULL); if (is_vm_hugetlb_page(vma)) hugetlb_split(vma, addr); if (new_below) { vma->vm_start = addr; vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; } else { vma->vm_end = addr; } /* vma_complete stores the new vma */ vma_complete(&vp, vmi, vma->vm_mm); validate_mm(vma->vm_mm); /* Success. */ if (new_below) vma_next(vmi); else vma_prev(vmi); return 0; out_free_mpol: mpol_put(vma_policy(new)); out_free_vmi: vma_iter_free(vmi); out_free_vma: vm_area_free(new); return err; } /* * Split a vma into two pieces at address 'addr', a new vma is allocated * either for the first part or the tail. */ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, int new_below) { if (vma->vm_mm->map_count >= sysctl_max_map_count) return -ENOMEM; return __split_vma(vmi, vma, addr, new_below); } /* * dup_anon_vma() - Helper function to duplicate anon_vma on VMA merge in the * instance that the destination VMA has no anon_vma but the source does. * * @dst: The destination VMA * @src: The source VMA * @dup: Pointer to the destination VMA when successful. * * Returns: 0 on success. */ static int dup_anon_vma(struct vm_area_struct *dst, struct vm_area_struct *src, struct vm_area_struct **dup) { /* * There are three cases to consider for correctly propagating * anon_vma's on merge. * * The first is trivial - neither VMA has anon_vma, we need not do * anything. * * The second where both have anon_vma is also a no-op, as they must * then be the same, so there is simply nothing to copy. * * Here we cover the third - if the destination VMA has no anon_vma, * that is it is unfaulted, we need to ensure that the newly merged * range is referenced by the anon_vma's of the source. */ if (src->anon_vma && !dst->anon_vma) { int ret; vma_assert_write_locked(dst); dst->anon_vma = src->anon_vma; ret = anon_vma_clone(dst, src); if (ret) return ret; *dup = dst; } return 0; } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mt_validate(&mm->mm_mt); for_each_vma(vmi, vma) { #ifdef CONFIG_DEBUG_VM_RB struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; #endif unsigned long vmi_start, vmi_end; bool warn = 0; vmi_start = vma_iter_addr(&vmi); vmi_end = vma_iter_end(&vmi); if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) warn = 1; if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) warn = 1; if (warn) { pr_emerg("issue in %s\n", current->comm); dump_stack(); dump_vma(vma); pr_emerg("tree range: %px start %lx end %lx\n", vma, vmi_start, vmi_end - 1); vma_iter_dump_tree(&vmi); } #ifdef CONFIG_DEBUG_VM_RB if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_verify(avc); anon_vma_unlock_read(anon_vma); } #endif /* Check for a infinite loop */ if (++i > mm->map_count + 10) { i = -1; break; } } if (i != mm->map_count) { pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ /* * Based on the vmg flag indicating whether we need to adjust the vm_start field * for the middle or next VMA, we calculate what the range of the newly adjusted * VMA ought to be, and set the VMA's range accordingly. */ static void vmg_adjust_set_range(struct vma_merge_struct *vmg) { struct vm_area_struct *adjust; pgoff_t pgoff; if (vmg->__adjust_middle_start) { adjust = vmg->middle; pgoff = adjust->vm_pgoff + PHYS_PFN(vmg->end - adjust->vm_start); } else if (vmg->__adjust_next_start) { adjust = vmg->next; pgoff = adjust->vm_pgoff - PHYS_PFN(adjust->vm_start - vmg->end); } else { return; } vma_set_range(adjust, vmg->end, adjust->vm_end, pgoff); } /* * Actually perform the VMA merge operation. * * IMPORTANT: We guarantee that, should vmg->give_up_on_oom is set, to not * modify any VMAs or cause inconsistent state should an OOM condition arise. * * Returns 0 on success, or an error value on failure. */ static int commit_merge(struct vma_merge_struct *vmg) { struct vm_area_struct *vma; struct vma_prepare vp; if (vmg->__adjust_next_start) { /* We manipulate middle and adjust next, which is the target. */ vma = vmg->middle; vma_iter_config(vmg->vmi, vmg->end, vmg->next->vm_end); } else { vma = vmg->target; /* Note: vma iterator must be pointing to 'start'. */ vma_iter_config(vmg->vmi, vmg->start, vmg->end); } init_multi_vma_prep(&vp, vma, vmg); /* * If vmg->give_up_on_oom is set, we're safe, because we don't actually * manipulate any VMAs until we succeed at preallocation. * * Past this point, we will not return an error. */ if (vma_iter_prealloc(vmg->vmi, vma)) return -ENOMEM; vma_prepare(&vp); /* * THP pages may need to do additional splits if we increase * middle->vm_start. */ vma_adjust_trans_huge(vma, vmg->start, vmg->end, vmg->__adjust_middle_start ? vmg->middle : NULL); vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff); vmg_adjust_set_range(vmg); vma_iter_store_overwrite(vmg->vmi, vmg->target); vma_complete(&vp, vmg->vmi, vma->vm_mm); return 0; } /* We can only remove VMAs when merging if they do not have a close hook. */ static bool can_merge_remove_vma(struct vm_area_struct *vma) { return !vma->vm_ops || !vma->vm_ops->close; } /* * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its * attributes modified. * * @vmg: Describes the modifications being made to a VMA and associated * metadata. * * When the attributes of a range within a VMA change, then it might be possible * for immediately adjacent VMAs to be merged into that VMA due to having * identical properties. * * This function checks for the existence of any such mergeable VMAs and updates * the maple tree describing the @vmg->middle->vm_mm address space to account * for this, as well as any VMAs shrunk/expanded/deleted as a result of this * merge. * * As part of this operation, if a merge occurs, the @vmg object will have its * vma, start, end, and pgoff fields modified to execute the merge. Subsequent * calls to this function should reset these fields. * * Returns: The merged VMA if merge succeeds, or NULL otherwise. * * ASSUMPTIONS: * - The caller must assign the VMA to be modified to @vmg->middle. * - The caller must have set @vmg->prev to the previous VMA, if there is one. * - The caller must not set @vmg->next, as we determine this. * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. * - vmi must be positioned within [@vmg->middle->vm_start, @vmg->middle->vm_end). */ static __must_check struct vm_area_struct *vma_merge_existing_range( struct vma_merge_struct *vmg) { vm_flags_t sticky_flags = vmg->vm_flags & VM_STICKY; struct vm_area_struct *middle = vmg->middle; struct vm_area_struct *prev = vmg->prev; struct vm_area_struct *next; struct vm_area_struct *anon_dup = NULL; unsigned long start = vmg->start; unsigned long end = vmg->end; bool left_side = middle && start == middle->vm_start; bool right_side = middle && end == middle->vm_end; int err = 0; bool merge_left, merge_right, merge_both; mmap_assert_write_locked(vmg->mm); VM_WARN_ON_VMG(!middle, vmg); /* We are modifying a VMA, so caller must specify. */ VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); VM_WARN_ON_VMG(start >= end, vmg); /* * If middle == prev, then we are offset into a VMA. Otherwise, if we are * not, we must span a portion of the VMA. */ VM_WARN_ON_VMG(middle && ((middle != prev && vmg->start != middle->vm_start) || vmg->end > middle->vm_end), vmg); /* The vmi must be positioned within vmg->middle. */ VM_WARN_ON_VMG(middle && !(vma_iter_addr(vmg->vmi) >= middle->vm_start && vma_iter_addr(vmg->vmi) < middle->vm_end), vmg); vmg->state = VMA_MERGE_NOMERGE; /* * If a special mapping or if the range being modified is neither at the * furthermost left or right side of the VMA, then we have no chance of * merging and should abort. */ if (vmg->vm_flags & VM_SPECIAL || (!left_side && !right_side)) return NULL; if (left_side) merge_left = can_vma_merge_left(vmg); else merge_left = false; if (right_side) { next = vmg->next = vma_iter_next_range(vmg->vmi); vma_iter_prev_range(vmg->vmi); merge_right = can_vma_merge_right(vmg, merge_left); } else { merge_right = false; next = NULL; } if (merge_left) /* If merging prev, position iterator there. */ vma_prev(vmg->vmi); else if (!merge_right) /* If we have nothing to merge, abort. */ return NULL; merge_both = merge_left && merge_right; /* If we span the entire VMA, a merge implies it will be deleted. */ vmg->__remove_middle = left_side && right_side; /* * If we need to remove middle in its entirety but are unable to do so, * we have no sensible recourse but to abort the merge. */ if (vmg->__remove_middle && !can_merge_remove_vma(middle)) return NULL; /* * If we merge both VMAs, then next is also deleted. This implies * merge_will_delete_vma also. */ vmg->__remove_next = merge_both; /* * If we cannot delete next, then we can reduce the operation to merging * prev and middle (thereby deleting middle). */ if (vmg->__remove_next && !can_merge_remove_vma(next)) { vmg->__remove_next = false; merge_right = false; merge_both = false; } /* No matter what happens, we will be adjusting middle. */ vma_start_write(middle); if (merge_right) { vma_start_write(next); vmg->target = next; sticky_flags |= (next->vm_flags & VM_STICKY); } if (merge_left) { vma_start_write(prev); vmg->target = prev; sticky_flags |= (prev->vm_flags & VM_STICKY); } if (merge_both) { /* * |<-------------------->| * |-------********-------| * prev middle next * extend delete delete */ vmg->start = prev->vm_start; vmg->end = next->vm_end; vmg->pgoff = prev->vm_pgoff; /* * We already ensured anon_vma compatibility above, so now it's * simply a case of, if prev has no anon_vma object, which of * next or middle contains the anon_vma we must duplicate. */ err = dup_anon_vma(prev, next->anon_vma ? next : middle, &anon_dup); } else if (merge_left) { /* * |<------------>| OR * |<----------------->| * |-------************* * prev middle * extend shrink/delete */ vmg->start = prev->vm_start; vmg->pgoff = prev->vm_pgoff; if (!vmg->__remove_middle) vmg->__adjust_middle_start = true; err = dup_anon_vma(prev, middle, &anon_dup); } else { /* merge_right */ /* * |<------------->| OR * |<----------------->| * *************-------| * middle next * shrink/delete extend */ pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); VM_WARN_ON_VMG(!merge_right, vmg); /* If we are offset into a VMA, then prev must be middle. */ VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg); if (vmg->__remove_middle) { vmg->end = next->vm_end; vmg->pgoff = next->vm_pgoff - pglen; } else { /* We shrink middle and expand next. */ vmg->__adjust_next_start = true; vmg->start = middle->vm_start; vmg->end = start; vmg->pgoff = middle->vm_pgoff; } err = dup_anon_vma(next, middle, &anon_dup); } if (err || commit_merge(vmg)) goto abort; vm_flags_set(vmg->target, sticky_flags); khugepaged_enter_vma(vmg->target, vmg->vm_flags); vmg->state = VMA_MERGE_SUCCESS; return vmg->target; abort: vma_iter_set(vmg->vmi, start); vma_iter_load(vmg->vmi); if (anon_dup) unlink_anon_vmas(anon_dup); /* * This means we have failed to clone anon_vma's correctly, but no * actual changes to VMAs have occurred, so no harm no foul - if the * user doesn't want this reported and instead just wants to give up on * the merge, allow it. */ if (!vmg->give_up_on_oom) vmg->state = VMA_MERGE_ERROR_NOMEM; return NULL; } /* * vma_merge_new_range - Attempt to merge a new VMA into address space * * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end * (exclusive), which we try to merge with any adjacent VMAs if possible. * * We are about to add a VMA to the address space starting at @vmg->start and * ending at @vmg->end. There are three different possible scenarios: * * 1. There is a VMA with identical properties immediately adjacent to the * proposed new VMA [@vmg->start, @vmg->end) either before or after it - * EXPAND that VMA: * * Proposed: |-----| or |-----| * Existing: |----| |----| * * 2. There are VMAs with identical properties immediately adjacent to the * proposed new VMA [@vmg->start, @vmg->end) both before AND after it - * EXPAND the former and REMOVE the latter: * * Proposed: |-----| * Existing: |----| |----| * * 3. There are no VMAs immediately adjacent to the proposed new VMA or those * VMAs do not have identical attributes - NO MERGE POSSIBLE. * * In instances where we can merge, this function returns the expanded VMA which * will have its range adjusted accordingly and the underlying maple tree also * adjusted. * * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer * to the VMA we expanded. * * This function adjusts @vmg to provide @vmg->next if not already specified, * and adjusts [@vmg->start, @vmg->end) to span the expanded range. * * ASSUMPTIONS: * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. * - The caller must have determined that [@vmg->start, @vmg->end) is empty, other than VMAs that will be unmapped should the operation succeed. * - The caller must have specified the previous vma in @vmg->prev. * - The caller must have specified the next vma in @vmg->next. * - The caller must have positioned the vmi at or before the gap. */ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) { struct vm_area_struct *prev = vmg->prev; struct vm_area_struct *next = vmg->next; unsigned long end = vmg->end; bool can_merge_left, can_merge_right; mmap_assert_write_locked(vmg->mm); VM_WARN_ON_VMG(vmg->middle, vmg); VM_WARN_ON_VMG(vmg->target, vmg); /* vmi must point at or before the gap. */ VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); vmg->state = VMA_MERGE_NOMERGE; /* Special VMAs are unmergeable, also if no prev/next. */ if ((vmg->vm_flags & VM_SPECIAL) || (!prev && !next)) return NULL; can_merge_left = can_vma_merge_left(vmg); can_merge_right = !vmg->just_expand && can_vma_merge_right(vmg, can_merge_left); /* If we can merge with the next VMA, adjust vmg accordingly. */ if (can_merge_right) { vmg->end = next->vm_end; vmg->target = next; } /* If we can merge with the previous VMA, adjust vmg accordingly. */ if (can_merge_left) { vmg->start = prev->vm_start; vmg->target = prev; vmg->pgoff = prev->vm_pgoff; /* * If this merge would result in removal of the next VMA but we * are not permitted to do so, reduce the operation to merging * prev and vma. */ if (can_merge_right && !can_merge_remove_vma(next)) vmg->end = end; /* In expand-only case we are already positioned at prev. */ if (!vmg->just_expand) { /* Equivalent to going to the previous range. */ vma_prev(vmg->vmi); } } /* * Now try to expand adjacent VMA(s). This takes care of removing the * following VMA if we have VMAs on both sides. */ if (vmg->target && !vma_expand(vmg)) { khugepaged_enter_vma(vmg->target, vmg->vm_flags); vmg->state = VMA_MERGE_SUCCESS; return vmg->target; } return NULL; } /* * vma_expand - Expand an existing VMA * * @vmg: Describes a VMA expansion operation. * * Expand @vma to vmg->start and vmg->end. Can expand off the start and end. * Will expand over vmg->next if it's different from vmg->target and vmg->end == * vmg->next->vm_end. Checking if the vmg->target can expand and merge with * vmg->next needs to be handled by the caller. * * Returns: 0 on success. * * ASSUMPTIONS: * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. * - The caller must have set @vmg->target and @vmg->next. */ int vma_expand(struct vma_merge_struct *vmg) { struct vm_area_struct *anon_dup = NULL; bool remove_next = false; struct vm_area_struct *target = vmg->target; struct vm_area_struct *next = vmg->next; vm_flags_t sticky_flags; sticky_flags = vmg->vm_flags & VM_STICKY; sticky_flags |= target->vm_flags & VM_STICKY; VM_WARN_ON_VMG(!target, vmg); mmap_assert_write_locked(vmg->mm); vma_start_write(target); if (next && (target != next) && (vmg->end == next->vm_end)) { int ret; sticky_flags |= next->vm_flags & VM_STICKY; remove_next = true; /* This should already have been checked by this point. */ VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg); vma_start_write(next); /* * In this case we don't report OOM, so vmg->give_up_on_mm is * safe. */ ret = dup_anon_vma(target, next, &anon_dup); if (ret) return ret; } /* Not merging but overwriting any part of next is not handled. */ VM_WARN_ON_VMG(next && !remove_next && next != target && vmg->end > next->vm_start, vmg); /* Only handles expanding */ VM_WARN_ON_VMG(target->vm_start < vmg->start || target->vm_end > vmg->end, vmg); if (remove_next) vmg->__remove_next = true; if (commit_merge(vmg)) goto nomem; vm_flags_set(target, sticky_flags); return 0; nomem: if (anon_dup) unlink_anon_vmas(anon_dup); /* * If the user requests that we just give upon OOM, we are safe to do so * here, as commit merge provides this contract to us. Nothing has been * changed - no harm no foul, just don't report it. */ if (!vmg->give_up_on_oom) vmg->state = VMA_MERGE_ERROR_NOMEM; return -ENOMEM; } /* * vma_shrink() - Reduce an existing VMAs memory area * @vmi: The vma iterator * @vma: The VMA to modify * @start: The new start * @end: The new end * * Returns: 0 on success, -ENOMEM otherwise */ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff) { struct vma_prepare vp; WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); if (vma->vm_start < start) vma_iter_config(vmi, vma->vm_start, start); else vma_iter_config(vmi, end, vma->vm_end); if (vma_iter_prealloc(vmi, NULL)) return -ENOMEM; vma_start_write(vma); init_vma_prep(&vp, vma); vma_prepare(&vp); vma_adjust_trans_huge(vma, start, end, NULL); vma_iter_clear(vmi); vma_set_range(vma, start, end, pgoff); vma_complete(&vp, vmi, vma->vm_mm); validate_mm(vma->vm_mm); return 0; } static inline void vms_clear_ptes(struct vma_munmap_struct *vms, struct ma_state *mas_detach, bool mm_wr_locked) { struct mmu_gather tlb; if (!vms->clear_ptes) /* Nothing to do */ return; /* * We can free page tables without write-locking mmap_lock because VMAs * were isolated before we downgraded mmap_lock. */ mas_set(mas_detach, 1); tlb_gather_mmu(&tlb, vms->vma->vm_mm); update_hiwater_rss(vms->vma->vm_mm); unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, vms->vma_count); mas_set(mas_detach, 1); /* start and end may be different if there is no prev or next vma. */ free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, vms->unmap_end, mm_wr_locked); tlb_finish_mmu(&tlb); vms->clear_ptes = false; } static void vms_clean_up_area(struct vma_munmap_struct *vms, struct ma_state *mas_detach) { struct vm_area_struct *vma; if (!vms->nr_pages) return; vms_clear_ptes(vms, mas_detach, true); mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) vma_close(vma); } /* * vms_complete_munmap_vmas() - Finish the munmap() operation * @vms: The vma munmap struct * @mas_detach: The maple state of the detached vmas * * This updates the mm_struct, unmaps the region, frees the resources * used for the munmap() and may downgrade the lock - if requested. Everything * needed to be done once the vma maple tree is updated. */ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, struct ma_state *mas_detach) { struct vm_area_struct *vma; struct mm_struct *mm; mm = current->mm; mm->map_count -= vms->vma_count; mm->locked_vm -= vms->locked_vm; if (vms->unlock) mmap_write_downgrade(mm); if (!vms->nr_pages) return; vms_clear_ptes(vms, mas_detach, !vms->unlock); /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); /* Stat accounting */ WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages); /* Paranoid bookkeeping */ VM_WARN_ON(vms->exec_vm > mm->exec_vm); VM_WARN_ON(vms->stack_vm > mm->stack_vm); VM_WARN_ON(vms->data_vm > mm->data_vm); mm->exec_vm -= vms->exec_vm; mm->stack_vm -= vms->stack_vm; mm->data_vm -= vms->data_vm; /* Remove and clean up vmas */ mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) remove_vma(vma); vm_unacct_memory(vms->nr_accounted); validate_mm(mm); if (vms->unlock) mmap_read_unlock(mm); __mt_destroy(mas_detach->tree); } /* * reattach_vmas() - Undo any munmap work and free resources * @mas_detach: The maple state with the detached maple tree * * Reattach any detached vmas and free up the maple tree used to track the vmas. */ static void reattach_vmas(struct ma_state *mas_detach) { struct vm_area_struct *vma; mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) vma_mark_attached(vma); __mt_destroy(mas_detach->tree); } /* * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree * for removal at a later date. Handles splitting first and last if necessary * and marking the vmas as isolated. * * @vms: The vma munmap struct * @mas_detach: The maple state tracking the detached tree * * Return: 0 on success, error otherwise */ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, struct ma_state *mas_detach) { struct vm_area_struct *next = NULL; int error; /* * If we need to split any vma, do it now to save pain later. * Does it split the first one? */ if (vms->start > vms->vma->vm_start) { /* * Make sure that map_count on return from munmap() will * not exceed its limit; but let map_count go just above * its limit temporarily, to help free resources as expected. */ if (vms->end < vms->vma->vm_end && vms->vma->vm_mm->map_count >= sysctl_max_map_count) { error = -ENOMEM; goto map_count_exceeded; } /* Don't bother splitting the VMA if we can't unmap it anyway */ if (vma_is_sealed(vms->vma)) { error = -EPERM; goto start_split_failed; } error = __split_vma(vms->vmi, vms->vma, vms->start, 1); if (error) goto start_split_failed; } vms->prev = vma_prev(vms->vmi); if (vms->prev) vms->unmap_start = vms->prev->vm_end; /* * Detach a range of VMAs from the mm. Using next as a temp variable as * it is always overwritten. */ for_each_vma_range(*(vms->vmi), next, vms->end) { long nrpages; if (vma_is_sealed(next)) { error = -EPERM; goto modify_vma_failed; } /* Does it split the end? */ if (next->vm_end > vms->end) { error = __split_vma(vms->vmi, next, vms->end, 0); if (error) goto end_split_failed; } vma_start_write(next); mas_set(mas_detach, vms->vma_count++); error = mas_store_gfp(mas_detach, next, GFP_KERNEL); if (error) goto munmap_gather_failed; vma_mark_detached(next); nrpages = vma_pages(next); vms->nr_pages += nrpages; if (next->vm_flags & VM_LOCKED) vms->locked_vm += nrpages; if (next->vm_flags & VM_ACCOUNT) vms->nr_accounted += nrpages; if (is_exec_mapping(next->vm_flags)) vms->exec_vm += nrpages; else if (is_stack_mapping(next->vm_flags)) vms->stack_vm += nrpages; else if (is_data_mapping(next->vm_flags)) vms->data_vm += nrpages; if (vms->uf) { /* * If userfaultfd_unmap_prep returns an error the vmas * will remain split, but userland will get a * highly unexpected error anyway. This is no * different than the case where the first of the two * __split_vma fails, but we don't undo the first * split, despite we could. This is unlikely enough * failure that it's not worth optimizing it for. */ error = userfaultfd_unmap_prep(next, vms->start, vms->end, vms->uf); if (error) goto userfaultfd_error; } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE BUG_ON(next->vm_start < vms->start); BUG_ON(next->vm_start > vms->end); #endif } vms->next = vma_next(vms->vmi); if (vms->next) vms->unmap_end = vms->next->vm_start; #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) /* Make sure no VMAs are about to be lost. */ { MA_STATE(test, mas_detach->tree, 0, 0); struct vm_area_struct *vma_mas, *vma_test; int test_count = 0; vma_iter_set(vms->vmi, vms->start); rcu_read_lock(); vma_test = mas_find(&test, vms->vma_count - 1); for_each_vma_range(*(vms->vmi), vma_mas, vms->end) { BUG_ON(vma_mas != vma_test); test_count++; vma_test = mas_next(&test, vms->vma_count - 1); } rcu_read_unlock(); BUG_ON(vms->vma_count != test_count); } #endif while (vma_iter_addr(vms->vmi) > vms->start) vma_iter_prev_range(vms->vmi); vms->clear_ptes = true; return 0; userfaultfd_error: munmap_gather_failed: end_split_failed: modify_vma_failed: reattach_vmas(mas_detach); start_split_failed: map_count_exceeded: return error; } /* * init_vma_munmap() - Initializer wrapper for vma_munmap_struct * @vms: The vma munmap struct * @vmi: The vma iterator * @vma: The first vm_area_struct to munmap * @start: The aligned start address to munmap * @end: The aligned end address to munmap * @uf: The userfaultfd list_head * @unlock: Unlock after the operation. Only unlocked on success */ static void init_vma_munmap(struct vma_munmap_struct *vms, struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) { vms->vmi = vmi; vms->vma = vma; if (vma) { vms->start = start; vms->end = end; } else { vms->start = vms->end = 0; } vms->unlock = unlock; vms->uf = uf; vms->vma_count = 0; vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0; vms->exec_vm = vms->stack_vm = vms->data_vm = 0; vms->unmap_start = FIRST_USER_ADDRESS; vms->unmap_end = USER_PGTABLES_CEILING; vms->clear_ptes = false; } /* * do_vmi_align_munmap() - munmap the aligned region from @start to @end. * @vmi: The vma iterator * @vma: The starting vm_area_struct * @mm: The mm_struct * @start: The aligned start address to munmap. * @end: The aligned end address to munmap. * @uf: The userfaultfd list_head * @unlock: Set to true to drop the mmap_lock. unlocking only happens on * success. * * Return: 0 on success and drops the lock if so directed, error and leaves the * lock held otherwise. */ int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm, unsigned long start, unsigned long end, struct list_head *uf, bool unlock) { struct maple_tree mt_detach; MA_STATE(mas_detach, &mt_detach, 0, 0); mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_on_stack(mt_detach); struct vma_munmap_struct vms; int error; init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock); error = vms_gather_munmap_vmas(&vms, &mas_detach); if (error) goto gather_failed; error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL); if (error) goto clear_tree_failed; /* Point of no return */ vms_complete_munmap_vmas(&vms, &mas_detach); return 0; clear_tree_failed: reattach_vmas(&mas_detach); gather_failed: validate_mm(mm); return error; } /* * do_vmi_munmap() - munmap a given range. * @vmi: The vma iterator * @mm: The mm_struct * @start: The start address to munmap * @len: The length of the range to munmap * @uf: The userfaultfd list_head * @unlock: set to true if the user wants to drop the mmap_lock on success * * This function takes a @mas that is either pointing to the previous VMA or set * to MA_START and sets it up to remove the mapping(s). The @len will be * aligned. * * Return: 0 on success and drops the lock if so directed, error and leaves the * lock held otherwise. */ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock) { unsigned long end; struct vm_area_struct *vma; if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; end = start + PAGE_ALIGN(len); if (end == start) return -EINVAL; /* Find the first overlapping VMA */ vma = vma_find(vmi, end); if (!vma) { if (unlock) mmap_write_unlock(mm); return 0; } return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); } /* * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd * context and anonymous VMA name within the range [start, end). * * As a result, we might be able to merge the newly modified VMA range with an * adjacent VMA with identical properties. * * If no merge is possible and the range does not span the entirety of the VMA, * we then need to split the VMA to accommodate the change. * * The function returns either the merged VMA, the original VMA if a split was * required instead, or an error if the split failed. */ static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) { struct vm_area_struct *vma = vmg->middle; unsigned long start = vmg->start; unsigned long end = vmg->end; struct vm_area_struct *merged; /* First, try to merge. */ merged = vma_merge_existing_range(vmg); if (merged) return merged; if (vmg_nomem(vmg)) return ERR_PTR(-ENOMEM); /* * Split can fail for reasons other than OOM, so if the user requests * this it's probably a mistake. */ VM_WARN_ON(vmg->give_up_on_oom && (vma->vm_start != start || vma->vm_end != end)); /* Split any preceding portion of the VMA. */ if (vma->vm_start < start) { int err = split_vma(vmg->vmi, vma, start, 1); if (err) return ERR_PTR(err); } /* Split any trailing portion of the VMA. */ if (vma->vm_end > end) { int err = split_vma(vmg->vmi, vma, end, 0); if (err) return ERR_PTR(err); } return vma; } struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, vm_flags_t *vm_flags_ptr) { VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); const vm_flags_t vm_flags = *vm_flags_ptr; struct vm_area_struct *ret; vmg.vm_flags = vm_flags; ret = vma_modify(&vmg); if (IS_ERR(ret)) return ret; /* * For a merge to succeed, the flags must match those * requested. However, sticky flags may have been retained, so propagate * them to the caller. */ if (vmg.state == VMA_MERGE_SUCCESS) *vm_flags_ptr = ret->vm_flags; return ret; } struct vm_area_struct *vma_modify_name(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct anon_vma_name *new_name) { VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); vmg.anon_name = new_name; return vma_modify(&vmg); } struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct mempolicy *new_pol) { VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); vmg.policy = new_pol; return vma_modify(&vmg); } struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, vm_flags_t vm_flags, struct vm_userfaultfd_ctx new_ctx, bool give_up_on_oom) { VMG_VMA_STATE(vmg, vmi, prev, vma, start, end); vmg.vm_flags = vm_flags; vmg.uffd_ctx = new_ctx; if (give_up_on_oom) vmg.give_up_on_oom = true; return vma_modify(&vmg); } /* * Expand vma by delta bytes, potentially merging with an immediately adjacent * VMA with identical properties. */ struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long delta) { VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta); vmg.next = vma_iter_next_rewind(vmi, NULL); vmg.middle = NULL; /* We use the VMA to populate VMG fields only. */ return vma_merge_new_range(&vmg); } void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb) { vb->count = 0; } static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb) { struct address_space *mapping; int i; mapping = vb->vmas[0]->vm_file->f_mapping; i_mmap_lock_write(mapping); for (i = 0; i < vb->count; i++) { VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping); __remove_shared_vm_struct(vb->vmas[i], mapping); } i_mmap_unlock_write(mapping); unlink_file_vma_batch_init(vb); } void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb, struct vm_area_struct *vma) { if (vma->vm_file == NULL) return; if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) || vb->count == ARRAY_SIZE(vb->vmas)) unlink_file_vma_batch_process(vb); vb->vmas[vb->count] = vma; vb->count++; } void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb) { if (vb->count > 0) unlink_file_vma_batch_process(vb); } static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock) { struct file *file = vma->vm_file; struct address_space *mapping; if (file) { mapping = file->f_mapping; i_mmap_lock_write(mapping); __vma_link_file(vma, mapping); if (!hold_rmap_lock) i_mmap_unlock_write(mapping); } } static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) { VMA_ITERATOR(vmi, mm, 0); vma_iter_config(&vmi, vma->vm_start, vma->vm_end); if (vma_iter_prealloc(&vmi, vma)) return -ENOMEM; vma_start_write(vma); vma_iter_store_new(&vmi, vma); vma_link_file(vma, /* hold_rmap_lock= */false); mm->map_count++; validate_mm(mm); return 0; } /* * Copy the vma structure to a new location in the same mm, * prior to moving page table entries, to effect an mremap move. */ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) { struct vm_area_struct *vma = *vmap; unsigned long vma_start = vma->vm_start; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *new_vma; bool faulted_in_anon_vma = true; VMA_ITERATOR(vmi, mm, addr); VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len); /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. */ if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { pgoff = addr >> PAGE_SHIFT; faulted_in_anon_vma = false; } /* * If the VMA we are copying might contain a uprobe PTE, ensure * that we do not establish one upon merge. Otherwise, when mremap() * moves page tables, it will orphan the newly created PTE. */ if (vma->vm_file) vmg.skip_vma_uprobe = true; new_vma = find_vma_prev(mm, addr, &vmg.prev); if (new_vma && new_vma->vm_start < addr + len) return NULL; /* should never get here */ vmg.middle = NULL; /* New VMA range. */ vmg.pgoff = pgoff; vmg.next = vma_iter_next_rewind(&vmi, NULL); new_vma = vma_merge_new_range(&vmg); if (new_vma) { /* * Source vma may have been merged into new_vma */ if (unlikely(vma_start >= new_vma->vm_start && vma_start < new_vma->vm_end)) { /* * The only way we can get a vma_merge with * self during an mremap is if the vma hasn't * been faulted in yet and we were allowed to * reset the dst vma->vm_pgoff to the * destination address of the mremap to allow * the merge to happen. mremap must change the * vm_pgoff linearity between src and dst vmas * (in turn preventing a vma_merge) to be * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); } else { new_vma = vm_area_dup(vma); if (!new_vma) goto out; vma_set_range(new_vma, addr, addr + len, pgoff); if (vma_dup_policy(vma, new_vma)) goto out_free_vma; if (anon_vma_clone(new_vma, vma)) goto out_free_mempol; if (new_vma->vm_file) get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); if (vma_link(mm, new_vma)) goto out_vma_link; *need_rmap_locks = false; } return new_vma; out_vma_link: fixup_hugetlb_reservations(new_vma); vma_close(new_vma); if (new_vma->vm_file) fput(new_vma->vm_file); unlink_anon_vmas(new_vma); out_free_mempol: mpol_put(vma_policy(new_vma)); out_free_vma: vm_area_free(new_vma); out: return NULL; } /* * Rough compatibility check to quickly see if it's even worth looking * at sharing an anon_vma. * * They need to have the same vm_file, and the flags can only differ * in things that mprotect may change. * * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that * we can merge the two vma's. For example, we refuse to merge a vma if * there is a vm_ops->close() function, because that indicates that the * driver is doing some kind of reference counting. But that doesn't * really matter for the anon_vma sharing case. */ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) { return a->vm_end == b->vm_start && mpol_equal(vma_policy(a), vma_policy(b)) && a->vm_file == b->vm_file && !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_IGNORE_MERGE)) && b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); } /* * Do some basic sanity checking to see if we can re-use the anon_vma * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be * the same as 'old', the other will be the new one that is trying * to share the anon_vma. * * NOTE! This runs with mmap_lock held for reading, so it is possible that * the anon_vma of 'old' is concurrently in the process of being set up * by another page fault trying to merge _that_. But that's ok: if it * is being set up, that automatically means that it will be a singleton * acceptable for merging, so we can do all of this optimistically. But * we do that READ_ONCE() to make sure that we never re-load the pointer. * * IOW: that the "list_is_singular()" test on the anon_vma_chain only * matters for the 'stable anon_vma' case (ie the thing we want to avoid * is to return an anon_vma that is "complex" due to having gone through * a fork). * * We also make sure that the two vma's are compatible (adjacent, * and with the same memory policies). That's all stable, even with just * a read lock on the mmap_lock. */ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) { if (anon_vma_compatible(a, b)) { struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); if (anon_vma && list_is_singular(&old->anon_vma_chain)) return anon_vma; } return NULL; } /* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive * sequence of mprotects and faults may otherwise lead to distinct * anon_vmas being allocated, preventing vma merge in subsequent * mprotect. */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma = NULL; struct vm_area_struct *prev, *next; VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end); /* Try next first. */ next = vma_iter_load(&vmi); if (next) { anon_vma = reusable_anon_vma(next, vma, next); if (anon_vma) return anon_vma; } prev = vma_prev(&vmi); VM_BUG_ON_VMA(prev != vma, vma); prev = vma_prev(&vmi); /* Try prev next. */ if (prev) anon_vma = reusable_anon_vma(prev, prev, vma); /* * We might reach here with anon_vma == NULL if we can't find * any reusable anon_vma. * There's no absolute need to look only at touching neighbours: * we could search further afield for "compatible" anon_vmas. * But it would probably just be a waste of time searching, * or lead to too many vmas hanging off the same anon_vma. * We're trying to allow mprotect remerging later on, * not trying to minimize memory used for anon_vmas. */ return anon_vma; } static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) { return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); } static bool vma_is_shared_writable(struct vm_area_struct *vma) { return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == (VM_WRITE | VM_SHARED); } static bool vma_fs_can_writeback(struct vm_area_struct *vma) { /* No managed pages to writeback. */ if (vma->vm_flags & VM_PFNMAP) return false; return vma->vm_file && vma->vm_file->f_mapping && mapping_can_writeback(vma->vm_file->f_mapping); } /* * Does this VMA require the underlying folios to have their dirty state * tracked? */ bool vma_needs_dirty_tracking(struct vm_area_struct *vma) { /* Only shared, writable VMAs require dirty tracking. */ if (!vma_is_shared_writable(vma)) return false; /* Does the filesystem need to be notified? */ if (vm_ops_needs_writenotify(vma->vm_ops)) return true; /* * Even if the filesystem doesn't indicate a need for writenotify, if it * can writeback, dirty tracking is still required. */ return vma_fs_can_writeback(vma); } /* * Some shared mappings will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot * to the private version (using protection_map[] without the * VM_SHARED bit). */ bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { /* If it was private or non-writable, the write bit is already clear */ if (!vma_is_shared_writable(vma)) return false; /* The backer wishes to know when pages are first written to? */ if (vm_ops_needs_writenotify(vma->vm_ops)) return true; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) return false; /* * Do we need to track softdirty? hugetlb does not support softdirty * tracking yet. */ if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) return true; /* Do we need write faults for uffd-wp tracking? */ if (userfaultfd_wp(vma)) return true; /* Can the mapping track the dirty pages? */ return vma_fs_can_writeback(vma); } static DEFINE_MUTEX(mm_all_locks_mutex); static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) { if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); /* * We can safely modify head.next after taking the * anon_vma->root->rwsem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the * anon_vma->root->rwsem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); } } static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) { if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change from under us because * we hold the mm_all_locks_mutex. * * Operations on ->flags have to be atomic because * even if AS_MM_ALL_LOCKS is stable thanks to the * mm_all_locks_mutex, there may be other cpus * changing other bitflags in parallel to us. */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); } } /* * This operation locks against the VM for all pte/vma/mm related * operations that could ever happen on a certain mm. This includes * vmtruncate, try_to_unmap, and all page faults. * * The caller must take the mmap_lock in write mode before calling * mm_take_all_locks(). The caller isn't allowed to release the * mmap_lock until mm_drop_all_locks() returns. * * mmap_lock in write mode is required in order to block all operations * that could modify pagetables and free pages without need of * altering the vma layout. It's also needed in write mode to avoid new * anon_vmas to be associated with existing vmas. * * A single task can't take more than one mm_take_all_locks() in a row * or it would deadlock. * * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in * mapping->flags avoid to take the same lock twice, if more than one * vma in this mm is backed by the same anon_vma or address_space. * * We take locks in following order, accordingly to comment at beginning * of mm/rmap.c: * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for * hugetlb mapping); * - all vmas marked locked * - all i_mmap_rwsem locks; * - all anon_vma->rwseml * * We can take all locks within these types randomly because the VM code * doesn't nest them and we protected from parallel mm_take_all_locks() by * mm_all_locks_mutex. * * mm_take_all_locks() and mm_drop_all_locks are expensive operations * that may have to take thousand of locks. * * mm_take_all_locks() can fail if it's interrupted by signals. */ int mm_take_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; VMA_ITERATOR(vmi, mm, 0); mmap_assert_write_locked(mm); mutex_lock(&mm_all_locks_mutex); /* * vma_start_write() does not have a complement in mm_drop_all_locks() * because vma_start_write() is always asymmetrical; it marks a VMA as * being written to until mmap_write_unlock() or mmap_write_downgrade() * is reached. */ for_each_vma(vmi, vma) { if (signal_pending(current)) goto out_unlock; vma_start_write(vma); } vma_iter_init(&vmi, mm, 0); for_each_vma(vmi, vma) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } vma_iter_init(&vmi, mm, 0); for_each_vma(vmi, vma) { if (signal_pending(current)) goto out_unlock; if (vma->vm_file && vma->vm_file->f_mapping && !is_vm_hugetlb_page(vma)) vm_lock_mapping(mm, vma->vm_file->f_mapping); } vma_iter_init(&vmi, mm, 0); for_each_vma(vmi, vma) { if (signal_pending(current)) goto out_unlock; if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_lock_anon_vma(mm, avc->anon_vma); } return 0; out_unlock: mm_drop_all_locks(mm); return -EINTR; } static void vm_unlock_anon_vma(struct anon_vma *anon_vma) { if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { /* * The LSB of head.next can't change to 0 from under * us because we hold the mm_all_locks_mutex. * * We must however clear the bitflag before unlocking * the vma so the users using the anon_vma->rb_root will * never see our bitflag. * * No need of atomic instructions here, head.next * can't change from under us until we release the * anon_vma->root->rwsem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) BUG(); anon_vma_unlock_write(anon_vma); } } static void vm_unlock_mapping(struct address_space *mapping) { if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { /* * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ i_mmap_unlock_write(mapping); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); } } /* * The mmap_lock cannot be released by the caller until * mm_drop_all_locks() returns. */ void mm_drop_all_locks(struct mm_struct *mm) { struct vm_area_struct *vma; struct anon_vma_chain *avc; VMA_ITERATOR(vmi, mm, 0); mmap_assert_write_locked(mm); BUG_ON(!mutex_is_locked(&mm_all_locks_mutex)); for_each_vma(vmi, vma) { if (vma->anon_vma) list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) vm_unlock_anon_vma(avc->anon_vma); if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } mutex_unlock(&mm_all_locks_mutex); } /* * We account for memory if it's a private writeable mapping, * not hugepages and VM_NORESERVE wasn't set. */ static bool accountable_mapping(struct file *file, vm_flags_t vm_flags) { /* * hugetlb has its own accounting separate from the core VM * VM_HUGETLB may not be set yet so we cannot check for that flag. */ if (file && is_file_hugepages(file)) return false; return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; } /* * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap() * operation. * @vms: The vma unmap structure * @mas_detach: The maple state with the detached maple tree * * Reattach any detached vmas, free up the maple tree used to track the vmas. * If that's not possible because the ptes are cleared (and vm_ops->closed() may * have been called), then a NULL is written over the vmas and the vmas are * removed (munmap() completed). */ static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms, struct ma_state *mas_detach) { struct ma_state *mas = &vms->vmi->mas; if (!vms->nr_pages) return; if (vms->clear_ptes) return reattach_vmas(mas_detach); /* * Aborting cannot just call the vm_ops open() because they are often * not symmetrical and state data has been lost. Resort to the old * failure method of leaving a gap where the MAP_FIXED mapping failed. */ mas_set_range(mas, vms->start, vms->end - 1); mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL); /* Clean up the insertion of the unfortunate gap */ vms_complete_munmap_vmas(vms, mas_detach); } static void update_ksm_flags(struct mmap_state *map) { map->vm_flags = ksm_vma_flags(map->mm, map->file, map->vm_flags); } static void set_desc_from_map(struct vm_area_desc *desc, const struct mmap_state *map) { desc->start = map->addr; desc->end = map->end; desc->pgoff = map->pgoff; desc->vm_file = map->file; desc->vm_flags = map->vm_flags; desc->page_prot = map->page_prot; } /* * __mmap_setup() - Prepare to gather any overlapping VMAs that need to be * unmapped once the map operation is completed, check limits, account mapping * and clean up any pre-existing VMAs. * * As a result it sets up the @map and @desc objects. * * @map: Mapping state. * @desc: VMA descriptor * @uf: Userfaultfd context list. * * Returns: 0 on success, error code otherwise. */ static int __mmap_setup(struct mmap_state *map, struct vm_area_desc *desc, struct list_head *uf) { int error; struct vma_iterator *vmi = map->vmi; struct vma_munmap_struct *vms = &map->vms; /* Find the first overlapping VMA and initialise unmap state. */ vms->vma = vma_find(vmi, map->end); init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf, /* unlock = */ false); /* OK, we have overlapping VMAs - prepare to unmap them. */ if (vms->vma) { mt_init_flags(&map->mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_on_stack(map->mt_detach); mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0); /* Prepare to unmap any existing mapping in the area */ error = vms_gather_munmap_vmas(vms, &map->mas_detach); if (error) { /* On error VMAs will already have been reattached. */ vms->nr_pages = 0; return error; } map->next = vms->next; map->prev = vms->prev; } else { map->next = vma_iter_next_rewind(vmi, &map->prev); } /* Check against address space limit. */ if (!may_expand_vm(map->mm, map->vm_flags, map->pglen - vms->nr_pages)) return -ENOMEM; /* Private writable mapping: check memory availability. */ if (accountable_mapping(map->file, map->vm_flags)) { map->charged = map->pglen; map->charged -= vms->nr_accounted; if (map->charged) { error = security_vm_enough_memory_mm(map->mm, map->charged); if (error) return error; } vms->nr_accounted = 0; map->vm_flags |= VM_ACCOUNT; } /* * Clear PTEs while the vma is still in the tree so that rmap * cannot race with the freeing later in the truncate scenario. * This is also needed for mmap_file(), which is why vm_ops * close function is called. */ vms_clean_up_area(vms, &map->mas_detach); set_desc_from_map(desc, map); return 0; } static int __mmap_new_file_vma(struct mmap_state *map, struct vm_area_struct *vma) { struct vma_iterator *vmi = map->vmi; int error; vma->vm_file = get_file(map->file); if (!map->file->f_op->mmap) return 0; error = mmap_file(vma->vm_file, vma); if (error) { fput(vma->vm_file); vma->vm_file = NULL; vma_iter_set(vmi, vma->vm_end); /* Undo any partial mapping done by a device driver. */ unmap_region(&vmi->mas, vma, map->prev, map->next); return error; } /* Drivers cannot alter the address of the VMA. */ WARN_ON_ONCE(map->addr != vma->vm_start); /* * Drivers should not permit writability when previously it was * disallowed. */ VM_WARN_ON_ONCE(map->vm_flags != vma->vm_flags && !(map->vm_flags & VM_MAYWRITE) && (vma->vm_flags & VM_MAYWRITE)); map->file = vma->vm_file; map->vm_flags = vma->vm_flags; return 0; } /* * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not * possible. * * @map: Mapping state. * @vmap: Output pointer for the new VMA. * * Returns: Zero on success, or an error. */ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) { struct vma_iterator *vmi = map->vmi; int error = 0; struct vm_area_struct *vma; /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but * not unmapped, but the maps are removed from the list. */ vma = vm_area_alloc(map->mm); if (!vma) return -ENOMEM; vma_iter_config(vmi, map->addr, map->end); vma_set_range(vma, map->addr, map->end, map->pgoff); vm_flags_init(vma, map->vm_flags); vma->vm_page_prot = map->page_prot; if (vma_iter_prealloc(vmi, vma)) { error = -ENOMEM; goto free_vma; } if (map->file) error = __mmap_new_file_vma(map, vma); else if (map->vm_flags & VM_SHARED) error = shmem_zero_setup(vma); else vma_set_anonymous(vma); if (error) goto free_iter_vma; if (!map->check_ksm_early) { update_ksm_flags(map); vm_flags_init(vma, map->vm_flags); } #ifdef CONFIG_SPARC64 /* TODO: Fix SPARC ADI! */ WARN_ON_ONCE(!arch_validate_flags(map->vm_flags)); #endif /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); vma_iter_store_new(vmi, vma); map->mm->map_count++; vma_link_file(vma, map->hold_file_rmap_lock); /* * vma_merge_new_range() calls khugepaged_enter_vma() too, the below * call covers the non-merge case. */ if (!vma_is_anonymous(vma)) khugepaged_enter_vma(vma, map->vm_flags); *vmap = vma; return 0; free_iter_vma: vma_iter_free(vmi); free_vma: vm_area_free(vma); return error; } /* * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping * statistics, handle locking and finalise the VMA. * * @map: Mapping state. * @vma: Merged or newly allocated VMA for the mmap()'d region. */ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) { struct mm_struct *mm = map->mm; vm_flags_t vm_flags = vma->vm_flags; perf_event_mmap(vma); /* Unmap any existing mapping in the area. */ vms_complete_munmap_vmas(&map->vms, &map->mas_detach); vm_stat_account(mm, vma->vm_flags, map->pglen); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(mm)) vm_flags_clear(vma, VM_LOCKED_MASK); else mm->locked_vm += map->pglen; } if (vma->vm_file) uprobe_mmap(vma); /* * New (or expanded) vma always get soft dirty status. * Otherwise user-space soft-dirty page tracker won't * be able to distinguish situation when vma area unmapped, * then new mapped in-place (which must be aimed as * a completely new data area). */ if (pgtable_supports_soft_dirty()) vm_flags_set(vma, VM_SOFTDIRTY); vma_set_page_prot(vma); } static void call_action_prepare(struct mmap_state *map, struct vm_area_desc *desc) { struct mmap_action *action = &desc->action; mmap_action_prepare(action, desc); if (action->hide_from_rmap_until_complete) map->hold_file_rmap_lock = true; } /* * Invoke the f_op->mmap_prepare() callback for a file-backed mapping that * specifies it. * * This is called prior to any merge attempt, and updates whitelisted fields * that are permitted to be updated by the caller. * * All but user-defined fields will be pre-populated with original values. * * Returns 0 on success, or an error code otherwise. */ static int call_mmap_prepare(struct mmap_state *map, struct vm_area_desc *desc) { int err; /* Invoke the hook. */ err = vfs_mmap_prepare(map->file, desc); if (err) return err; call_action_prepare(map, desc); /* Update fields permitted to be changed. */ map->pgoff = desc->pgoff; map->file = desc->vm_file; map->vm_flags = desc->vm_flags; map->page_prot = desc->page_prot; /* User-defined fields. */ map->vm_ops = desc->vm_ops; map->vm_private_data = desc->private_data; return 0; } static void set_vma_user_defined_fields(struct vm_area_struct *vma, struct mmap_state *map) { if (map->vm_ops) vma->vm_ops = map->vm_ops; vma->vm_private_data = map->vm_private_data; } /* * Are we guaranteed no driver can change state such as to preclude KSM merging? * If so, let's set the KSM mergeable flag early so we don't break VMA merging. */ static bool can_set_ksm_flags_early(struct mmap_state *map) { struct file *file = map->file; /* Anonymous mappings have no driver which can change them. */ if (!file) return true; /* * If .mmap_prepare() is specified, then the driver will have already * manipulated state prior to updating KSM flags. So no need to worry * about mmap callbacks modifying VMA flags after the KSM flag has been * updated here, which could otherwise affect KSM eligibility. */ if (file->f_op->mmap_prepare) return true; /* shmem is safe. */ if (shmem_file(file)) return true; /* Any other .mmap callback is not safe. */ return false; } static int call_action_complete(struct mmap_state *map, struct vm_area_desc *desc, struct vm_area_struct *vma) { struct mmap_action *action = &desc->action; int ret; ret = mmap_action_complete(action, vma); /* If we held the file rmap we need to release it. */ if (map->hold_file_rmap_lock) { struct file *file = vma->vm_file; i_mmap_unlock_write(file->f_mapping); } return ret; } static unsigned long __mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; bool have_mmap_prepare = file && file->f_op->mmap_prepare; VMA_ITERATOR(vmi, mm, addr); MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file); struct vm_area_desc desc = { .mm = mm, .file = file, .action = { .type = MMAP_NOTHING, /* Default to no further action. */ }, }; bool allocated_new = false; int error; map.check_ksm_early = can_set_ksm_flags_early(&map); error = __mmap_setup(&map, &desc, uf); if (!error && have_mmap_prepare) error = call_mmap_prepare(&map, &desc); if (error) goto abort_munmap; if (map.check_ksm_early) update_ksm_flags(&map); /* Attempt to merge with adjacent VMAs... */ if (map.prev || map.next) { VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL); vma = vma_merge_new_range(&vmg); } /* ...but if we can't, allocate a new VMA. */ if (!vma) { error = __mmap_new_vma(&map, &vma); if (error) goto unacct_error; allocated_new = true; } if (have_mmap_prepare) set_vma_user_defined_fields(vma, &map); __mmap_complete(&map, vma); if (have_mmap_prepare && allocated_new) { error = call_action_complete(&map, &desc, vma); if (error) return error; } return addr; /* Accounting was done by __mmap_setup(). */ unacct_error: if (map.charged) vm_unacct_memory(map.charged); abort_munmap: vms_abort_munmap_vmas(&map.vms, &map.mas_detach); return error; } /** * mmap_region() - Actually perform the userland mapping of a VMA into * current->mm with known, aligned and overflow-checked @addr and @len, and * correctly determined VMA flags @vm_flags and page offset @pgoff. * * This is an internal memory management function, and should not be used * directly. * * The caller must write-lock current->mm->mmap_lock. * * @file: If a file-backed mapping, a pointer to the struct file describing the * file to be mapped, otherwise NULL. * @addr: The page-aligned address at which to perform the mapping. * @len: The page-aligned, non-zero, length of the mapping. * @vm_flags: The VMA flags which should be applied to the mapping. * @pgoff: If @file is specified, the page offset into the file, if not then * the virtual page offset in memory of the anonymous mapping. * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap * events. * * Returns: Either an error, or the address at which the requested mapping has * been performed. */ unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { unsigned long ret; bool writable_file_mapping = false; mmap_assert_write_locked(current->mm); /* Check to see if MDWE is applicable. */ if (map_deny_write_exec(vm_flags, vm_flags)) return -EACCES; /* Allow architectures to sanity-check the vm_flags. */ if (!arch_validate_flags(vm_flags)) return -EINVAL; /* Map writable and ensure this isn't a sealed memfd. */ if (file && is_shared_maywrite(vm_flags)) { int error = mapping_map_writable(file->f_mapping); if (error) return error; writable_file_mapping = true; } ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); /* Clear our write mapping regardless of error. */ if (writable_file_mapping) mapping_unmap_writable(file->f_mapping); validate_mm(current->mm); return ret; } /* * do_brk_flags() - Increase the brk vma if the flags match. * @vmi: The vma iterator * @addr: The start address * @len: The length of the increase * @vma: The vma, * @vm_flags: The VMA Flags * * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags * do not match then create a new anonymous VMA. Eventually we may be able to * do some brk-specific accounting here. */ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long addr, unsigned long len, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. */ vm_flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; vm_flags = ksm_vma_flags(mm, NULL, vm_flags); if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) return -ENOMEM; if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) return -ENOMEM; /* * Expand the existing vma if possible; Note that singular lists do not * occur after forking, so the expand will only happen on new VMAs. */ if (vma && vma->vm_end == addr) { VMG_STATE(vmg, mm, vmi, addr, addr + len, vm_flags, PHYS_PFN(addr)); vmg.prev = vma; /* vmi is positioned at prev, which this mode expects. */ vmg.just_expand = true; if (vma_merge_new_range(&vmg)) goto out; else if (vmg_nomem(&vmg)) goto unacct_fail; } if (vma) vma_iter_next_range(vmi); /* create a vma struct for an anonymous mapping */ vma = vm_area_alloc(mm); if (!vma) goto unacct_fail; vma_set_anonymous(vma); vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); vm_flags_init(vma, vm_flags); vma->vm_page_prot = vm_get_page_prot(vm_flags); vma_start_write(vma); if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) goto mas_store_fail; mm->map_count++; validate_mm(mm); out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; mm->data_vm += len >> PAGE_SHIFT; if (vm_flags & VM_LOCKED) mm->locked_vm += (len >> PAGE_SHIFT); if (pgtable_supports_soft_dirty()) vm_flags_set(vma, VM_SOFTDIRTY); return 0; mas_store_fail: vm_area_free(vma); unacct_fail: vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } /** * unmapped_area() - Find an area between the low_limit and the high_limit with * the correct alignment and offset, all from @info. Note: current->mm is used * for the search. * * @info: The unmapped area information including the range [low_limit - * high_limit), the alignment offset and mask. * * Return: A memory address or -ENOMEM. */ unsigned long unmapped_area(struct vm_unmapped_area_info *info) { unsigned long length, gap; unsigned long low_limit, high_limit; struct vm_area_struct *tmp; VMA_ITERATOR(vmi, current->mm, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask + info->start_gap; if (length < info->length) return -ENOMEM; low_limit = info->low_limit; if (low_limit < mmap_min_addr) low_limit = mmap_min_addr; high_limit = info->high_limit; retry: if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) return -ENOMEM; /* * Adjust for the gap first so it doesn't interfere with the * later alignment. The first step is the minimum needed to * fulill the start gap, the next steps is the minimum to align * that. It is the minimum needed to fulill both. */ gap = vma_iter_addr(&vmi) + info->start_gap; gap += (info->align_offset - gap) & info->align_mask; tmp = vma_next(&vmi); if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ if (vm_start_gap(tmp) < gap + length - 1) { low_limit = tmp->vm_end; vma_iter_reset(&vmi); goto retry; } } else { tmp = vma_prev(&vmi); if (tmp && vm_end_gap(tmp) > gap) { low_limit = vm_end_gap(tmp); vma_iter_reset(&vmi); goto retry; } } return gap; } /** * unmapped_area_topdown() - Find an area between the low_limit and the * high_limit with the correct alignment and offset at the highest available * address, all from @info. Note: current->mm is used for the search. * * @info: The unmapped area information including the range [low_limit - * high_limit), the alignment offset and mask. * * Return: A memory address or -ENOMEM. */ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) { unsigned long length, gap, gap_end; unsigned long low_limit, high_limit; struct vm_area_struct *tmp; VMA_ITERATOR(vmi, current->mm, 0); /* Adjust search length to account for worst case alignment overhead */ length = info->length + info->align_mask + info->start_gap; if (length < info->length) return -ENOMEM; low_limit = info->low_limit; if (low_limit < mmap_min_addr) low_limit = mmap_min_addr; high_limit = info->high_limit; retry: if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) return -ENOMEM; gap = vma_iter_end(&vmi) - info->length; gap -= (gap - info->align_offset) & info->align_mask; gap_end = vma_iter_end(&vmi); tmp = vma_next(&vmi); if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ if (vm_start_gap(tmp) < gap_end) { high_limit = vm_start_gap(tmp); vma_iter_reset(&vmi); goto retry; } } else { tmp = vma_prev(&vmi); if (tmp && vm_end_gap(tmp) > gap) { high_limit = tmp->vm_start; vma_iter_reset(&vmi); goto retry; } } return gap; } /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the * grow-up and grow-down cases. */ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ if (size > rlimit(RLIMIT_STACK)) return -ENOMEM; /* mlock limit tests */ if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) return -ENOMEM; /* Check to ensure the stack will not grow into a hugetlb-only region */ new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : vma->vm_end - size; if (is_hugepage_only_range(vma->vm_mm, new_start, size)) return -EFAULT; /* * Overcommit.. This must be the final test, as it will * update security statistics. */ if (security_vm_enough_memory_mm(mm, grow)) return -ENOMEM; return 0; } #if defined(CONFIG_STACK_GROWSUP) /* * PA-RISC uses this for its stack. * vma is the last one with address > vma->vm_end. Have to extend vma. */ int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; unsigned long gap_addr; int error = 0; VMA_ITERATOR(vmi, mm, vma->vm_start); if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; mmap_assert_write_locked(mm); /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; /* Enforce stack_guard_gap */ gap_addr = address + stack_guard_gap; /* Guard against overflow */ if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; next = find_vma_intersection(mm, vma->vm_end, gap_addr); if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) return -ENOMEM; /* Check that both stack segments have the same anon_vma? */ } if (next) vma_iter_prev_range_limit(&vmi, address); vma_iter_config(&vmi, vma->vm_start, address); if (vma_iter_prealloc(&vmi, vma)) return -ENOMEM; /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) { vma_iter_free(&vmi); return -ENOMEM; } /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* We update the anon VMA tree. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { unsigned long size, grow; size = address - vma->vm_start; grow = (address - vma->vm_end) >> PAGE_SHIFT; error = -ENOMEM; if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; /* Overwrite old entry in mtree. */ vma_iter_store_overwrite(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); vma_iter_free(&vmi); validate_mm(mm); return error; } #endif /* CONFIG_STACK_GROWSUP */ /* * vma is the first one with address < vma->vm_start. Have to extend vma. * mmap_lock held for writing. */ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; int error = 0; VMA_ITERATOR(vmi, mm, vma->vm_start); if (!(vma->vm_flags & VM_GROWSDOWN)) return -EFAULT; mmap_assert_write_locked(mm); address &= PAGE_MASK; if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) return -EPERM; /* Enforce stack_guard_gap */ prev = vma_prev(&vmi); /* Check that both stack segments have the same anon_vma? */ if (prev) { if (!(prev->vm_flags & VM_GROWSDOWN) && vma_is_accessible(prev) && (address - prev->vm_end < stack_guard_gap)) return -ENOMEM; } if (prev) vma_iter_next_range_limit(&vmi, vma->vm_start); vma_iter_config(&vmi, address, vma->vm_end); if (vma_iter_prealloc(&vmi, vma)) return -ENOMEM; /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) { vma_iter_free(&vmi); return -ENOMEM; } /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* We update the anon VMA tree. */ anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { unsigned long size, grow; size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; error = -ENOMEM; if (grow <= vma->vm_pgoff) { error = acct_stack_growth(vma, size, grow); if (!error) { if (vma->vm_flags & VM_LOCKED) mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); vma->vm_start = address; vma->vm_pgoff -= grow; /* Overwrite old entry in mtree. */ vma_iter_store_overwrite(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); } } } anon_vma_unlock_write(vma->anon_vma); vma_iter_free(&vmi); validate_mm(mm); return error; } int __vm_munmap(unsigned long start, size_t len, bool unlock) { int ret; struct mm_struct *mm = current->mm; LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, start); if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); if (ret || !unlock) mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); return ret; } /* Insert vm structure into process list sorted by address * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_rwsem is taken here. */ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { unsigned long charged = vma_pages(vma); if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index * are set. But now set the vm_pgoff it will almost certainly * end up with (unless mremap moves it elsewhere before that * first wfault), so /proc/pid/maps tells a consistent story. * * By setting it to reflect the virtual start address of the * vma, merges and splits can happen in a seamless way, just * using the existing file pgoff checks and manipulations. * Similarly in do_mmap and in do_brk_flags. */ if (vma_is_anonymous(vma)) { BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } if (vma_link(mm, vma)) { if (vma->vm_flags & VM_ACCOUNT) vm_unacct_memory(charged); return -ENOMEM; } return 0; }
8 8 8 8 8 8 8 8 92 98 79 76 78 3 96 97 96 15 94 95 91 3 8 2 6 8 8 8 8 5 1 4 4 4 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 // SPDX-License-Identifier: GPL-2.0 /* * SHA-1 and HMAC-SHA1 library functions */ #include <crypto/hmac.h> #include <crypto/sha1.h> #include <linux/bitops.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/unaligned.h> #include <linux/wordpart.h> #include "fips.h" static const struct sha1_block_state sha1_iv = { .h = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; /* * If you have 32 registers or more, the compiler can (and should) * try to change the array[] accesses into registers. However, on * machines with less than ~25 registers, that won't really work, * and at least gcc will make an unholy mess of it. * * So to avoid that mess which just slows things down, we force * the stores to memory to actually happen (we might be better off * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as * suggested by Artur Skawina - that will also make gcc unable to * try to do the silly "optimize away loads" part because it won't * see what the value will be). * * Ben Herrenschmidt reports that on PPC, the C version comes close * to the optimized asm with this (ie on PPC you don't want that * 'volatile', since there are lots of registers). * * On ARM we get the best code generation by forcing a full memory barrier * between each SHA_ROUND, otherwise gcc happily get wild with spilling and * the stack frame size simply explode and performance goes down the drain. */ #ifdef CONFIG_X86 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) #elif defined(CONFIG_ARM) #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) #else #define setW(x, val) (W(x) = (val)) #endif /* This "rolls" over the 512-bit array */ #define W(x) (array[(x)&15]) /* * Where do we get the source from? The first 16 iterations get it from * the input data, the next mix it from the 512-bit array. */ #define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) #define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ __u32 TEMP = input(t); setW(t, TEMP); \ E += TEMP + rol32(A,5) + (fn) + (constant); \ B = ror32(B, 2); \ TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0) #define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) #define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) #define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) /** * sha1_transform - single block SHA1 transform (deprecated) * * @digest: 160 bit digest to update * @data: 512 bits of data to hash * @array: 16 words of workspace (see note) * * This function executes SHA-1's internal compression function. It updates the * 160-bit internal state (@digest) with a single 512-bit data block (@data). * * Don't use this function. SHA-1 is no longer considered secure. And even if * you do have to use SHA-1, this isn't the correct way to hash something with * SHA-1 as this doesn't handle padding and finalization. * * Note: If the hash is security sensitive, the caller should be sure * to clear the workspace. This is left to the caller to avoid * unnecessary clears between chained hashing operations. */ void sha1_transform(__u32 *digest, const char *data, __u32 *array) { __u32 A, B, C, D, E; unsigned int i = 0; A = digest[0]; B = digest[1]; C = digest[2]; D = digest[3]; E = digest[4]; /* Round 1 - iterations 0-16 take their input from 'data' */ for (; i < 16; ++i) T_0_15(i, A, B, C, D, E); /* Round 1 - tail. Input from 512-bit mixing array */ for (; i < 20; ++i) T_16_19(i, A, B, C, D, E); /* Round 2 */ for (; i < 40; ++i) T_20_39(i, A, B, C, D, E); /* Round 3 */ for (; i < 60; ++i) T_40_59(i, A, B, C, D, E); /* Round 4 */ for (; i < 80; ++i) T_60_79(i, A, B, C, D, E); digest[0] += A; digest[1] += B; digest[2] += C; digest[3] += D; digest[4] += E; } EXPORT_SYMBOL(sha1_transform); /** * sha1_init_raw - initialize the vectors for a SHA1 digest * @buf: vector to initialize */ void sha1_init_raw(__u32 *buf) { buf[0] = 0x67452301; buf[1] = 0xefcdab89; buf[2] = 0x98badcfe; buf[3] = 0x10325476; buf[4] = 0xc3d2e1f0; } EXPORT_SYMBOL(sha1_init_raw); static void __maybe_unused sha1_blocks_generic(struct sha1_block_state *state, const u8 *data, size_t nblocks) { u32 workspace[SHA1_WORKSPACE_WORDS]; do { sha1_transform(state->h, data, workspace); data += SHA1_BLOCK_SIZE; } while (--nblocks); memzero_explicit(workspace, sizeof(workspace)); } #ifdef CONFIG_CRYPTO_LIB_SHA1_ARCH #include "sha1.h" /* $(SRCARCH)/sha1.h */ #else #define sha1_blocks sha1_blocks_generic #endif void sha1_init(struct sha1_ctx *ctx) { ctx->state = sha1_iv; ctx->bytecount = 0; } EXPORT_SYMBOL_GPL(sha1_init); void sha1_update(struct sha1_ctx *ctx, const u8 *data, size_t len) { size_t partial = ctx->bytecount % SHA1_BLOCK_SIZE; ctx->bytecount += len; if (partial + len >= SHA1_BLOCK_SIZE) { size_t nblocks; if (partial) { size_t l = SHA1_BLOCK_SIZE - partial; memcpy(&ctx->buf[partial], data, l); data += l; len -= l; sha1_blocks(&ctx->state, ctx->buf, 1); } nblocks = len / SHA1_BLOCK_SIZE; len %= SHA1_BLOCK_SIZE; if (nblocks) { sha1_blocks(&ctx->state, data, nblocks); data += nblocks * SHA1_BLOCK_SIZE; } partial = 0; } if (len) memcpy(&ctx->buf[partial], data, len); } EXPORT_SYMBOL_GPL(sha1_update); static void __sha1_final(struct sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]) { u64 bitcount = ctx->bytecount << 3; size_t partial = ctx->bytecount % SHA1_BLOCK_SIZE; ctx->buf[partial++] = 0x80; if (partial > SHA1_BLOCK_SIZE - 8) { memset(&ctx->buf[partial], 0, SHA1_BLOCK_SIZE - partial); sha1_blocks(&ctx->state, ctx->buf, 1); partial = 0; } memset(&ctx->buf[partial], 0, SHA1_BLOCK_SIZE - 8 - partial); *(__be64 *)&ctx->buf[SHA1_BLOCK_SIZE - 8] = cpu_to_be64(bitcount); sha1_blocks(&ctx->state, ctx->buf, 1); for (size_t i = 0; i < SHA1_DIGEST_SIZE; i += 4) put_unaligned_be32(ctx->state.h[i / 4], out + i); } void sha1_final(struct sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]) { __sha1_final(ctx, out); memzero_explicit(ctx, sizeof(*ctx)); } EXPORT_SYMBOL_GPL(sha1_final); void sha1(const u8 *data, size_t len, u8 out[SHA1_DIGEST_SIZE]) { struct sha1_ctx ctx; sha1_init(&ctx); sha1_update(&ctx, data, len); sha1_final(&ctx, out); } EXPORT_SYMBOL_GPL(sha1); static void __hmac_sha1_preparekey(struct sha1_block_state *istate, struct sha1_block_state *ostate, const u8 *raw_key, size_t raw_key_len) { union { u8 b[SHA1_BLOCK_SIZE]; unsigned long w[SHA1_BLOCK_SIZE / sizeof(unsigned long)]; } derived_key = { 0 }; if (unlikely(raw_key_len > SHA1_BLOCK_SIZE)) sha1(raw_key, raw_key_len, derived_key.b); else memcpy(derived_key.b, raw_key, raw_key_len); for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++) derived_key.w[i] ^= REPEAT_BYTE(HMAC_IPAD_VALUE); *istate = sha1_iv; sha1_blocks(istate, derived_key.b, 1); for (size_t i = 0; i < ARRAY_SIZE(derived_key.w); i++) derived_key.w[i] ^= REPEAT_BYTE(HMAC_OPAD_VALUE ^ HMAC_IPAD_VALUE); *ostate = sha1_iv; sha1_blocks(ostate, derived_key.b, 1); memzero_explicit(&derived_key, sizeof(derived_key)); } void hmac_sha1_preparekey(struct hmac_sha1_key *key, const u8 *raw_key, size_t raw_key_len) { __hmac_sha1_preparekey(&key->istate, &key->ostate, raw_key, raw_key_len); } EXPORT_SYMBOL_GPL(hmac_sha1_preparekey); void hmac_sha1_init(struct hmac_sha1_ctx *ctx, const struct hmac_sha1_key *key) { ctx->sha_ctx.state = key->istate; ctx->sha_ctx.bytecount = SHA1_BLOCK_SIZE; ctx->ostate = key->ostate; } EXPORT_SYMBOL_GPL(hmac_sha1_init); void hmac_sha1_init_usingrawkey(struct hmac_sha1_ctx *ctx, const u8 *raw_key, size_t raw_key_len) { __hmac_sha1_preparekey(&ctx->sha_ctx.state, &ctx->ostate, raw_key, raw_key_len); ctx->sha_ctx.bytecount = SHA1_BLOCK_SIZE; } EXPORT_SYMBOL_GPL(hmac_sha1_init_usingrawkey); void hmac_sha1_final(struct hmac_sha1_ctx *ctx, u8 out[SHA1_DIGEST_SIZE]) { /* Generate the padded input for the outer hash in ctx->sha_ctx.buf. */ __sha1_final(&ctx->sha_ctx, ctx->sha_ctx.buf); memset(&ctx->sha_ctx.buf[SHA1_DIGEST_SIZE], 0, SHA1_BLOCK_SIZE - SHA1_DIGEST_SIZE); ctx->sha_ctx.buf[SHA1_DIGEST_SIZE] = 0x80; *(__be32 *)&ctx->sha_ctx.buf[SHA1_BLOCK_SIZE - 4] = cpu_to_be32(8 * (SHA1_BLOCK_SIZE + SHA1_DIGEST_SIZE)); /* Compute the outer hash, which gives the HMAC value. */ sha1_blocks(&ctx->ostate, ctx->sha_ctx.buf, 1); for (size_t i = 0; i < SHA1_DIGEST_SIZE; i += 4) put_unaligned_be32(ctx->ostate.h[i / 4], out + i); memzero_explicit(ctx, sizeof(*ctx)); } EXPORT_SYMBOL_GPL(hmac_sha1_final); void hmac_sha1(const struct hmac_sha1_key *key, const u8 *data, size_t data_len, u8 out[SHA1_DIGEST_SIZE]) { struct hmac_sha1_ctx ctx; hmac_sha1_init(&ctx, key); hmac_sha1_update(&ctx, data, data_len); hmac_sha1_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha1); void hmac_sha1_usingrawkey(const u8 *raw_key, size_t raw_key_len, const u8 *data, size_t data_len, u8 out[SHA1_DIGEST_SIZE]) { struct hmac_sha1_ctx ctx; hmac_sha1_init_usingrawkey(&ctx, raw_key, raw_key_len); hmac_sha1_update(&ctx, data, data_len); hmac_sha1_final(&ctx, out); } EXPORT_SYMBOL_GPL(hmac_sha1_usingrawkey); #if defined(sha1_mod_init_arch) || defined(CONFIG_CRYPTO_FIPS) static int __init sha1_mod_init(void) { #ifdef sha1_mod_init_arch sha1_mod_init_arch(); #endif if (fips_enabled) { /* * FIPS cryptographic algorithm self-test. As per the FIPS * Implementation Guidance, testing HMAC-SHA1 satisfies the test * requirement for SHA-1 too. */ u8 mac[SHA1_DIGEST_SIZE]; hmac_sha1_usingrawkey(fips_test_key, sizeof(fips_test_key), fips_test_data, sizeof(fips_test_data), mac); if (memcmp(fips_test_hmac_sha1_value, mac, sizeof(mac)) != 0) panic("sha1: FIPS self-test failed\n"); } return 0; } subsys_initcall(sha1_mod_init); static void __exit sha1_mod_exit(void) { } module_exit(sha1_mod_exit); #endif MODULE_DESCRIPTION("SHA-1 and HMAC-SHA1 library functions"); MODULE_LICENSE("GPL");
92 84 28 28 79 79 79 79 79 79 79 79 4 4 4 4 78 79 76 77 79 79 79 79 79 24 79 79 24 79 74 77 76 69 70 8 8 8 8 8 8 8 8 8 99 32 80 82 80 82 81 1 80 82 83 84 84 83 28 85 87 87 79 75 75 75 75 74 74 13 2 2 2 2 13 13 70 70 69 11 11 11 27 27 27 27 78 78 27 27 26 76 85 87 87 78 42 11 85 83 82 35 34 35 34 34 34 34 30 1 1 1 1 1 1 8 8 8 8 8 8 8 1 1 1 1 8 4 4 4 4 4 4 4 4 2 2 2 2 2 2 2 2 2 2 1 2 1 2 2 7 7 7 6 1 6 1 5 6 6 6 6 1 1 6 5 5 7 5 2 3 5 1 1 4 2 2 3 3 2 2 1 3 2 2 2 2 2 2 2 2 2 2 2 2 1 2 1 25 27 27 8 81 32 79 78 84 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 /* * drm_irq.c IRQ and vblank support * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/export.h> #include <linux/kthread.h> #include <linux/moduleparam.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_framebuffer.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "drm_internal.h" #include "drm_trace.h" /** * DOC: vblank handling * * From the computer's perspective, every time the monitor displays * a new frame the scanout engine has "scanned out" the display image * from top to bottom, one row of pixels at a time. The current row * of pixels is referred to as the current scanline. * * In addition to the display's visible area, there's usually a couple of * extra scanlines which aren't actually displayed on the screen. * These extra scanlines don't contain image data and are occasionally used * for features like audio and infoframes. The region made up of these * scanlines is referred to as the vertical blanking region, or vblank for * short. * * For historical reference, the vertical blanking period was designed to * give the electron gun (on CRTs) enough time to move back to the top of * the screen to start scanning out the next frame. Similar for horizontal * blanking periods. They were designed to give the electron gun enough * time to move back to the other side of the screen to start scanning the * next scanline. * * :: * * * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ * top of | | * display | | * | New frame | * | | * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline, * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the * | | frame as it * | | travels down * | | ("scan out") * | Old frame | * | | * | | * | | * | | physical * | | bottom of * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆ * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽ * new frame * * "Physical top of display" is the reference point for the high-precision/ * corrected timestamp. * * On a lot of display hardware, programming needs to take effect during the * vertical blanking period so that settings like gamma, the image buffer * buffer to be scanned out, etc. can safely be changed without showing * any visual artifacts on the screen. In some unforgiving hardware, some of * this programming has to both start and end in the same vblank. To help * with the timing of the hardware programming, an interrupt is usually * available to notify the driver when it can start the updating of registers. * The interrupt is in this context named the vblank interrupt. * * The vblank interrupt may be fired at different points depending on the * hardware. Some hardware implementations will fire the interrupt when the * new frame start, other implementations will fire the interrupt at different * points in time. * * Vertical blanking plays a major role in graphics rendering. To achieve * tear-free display, users must synchronize page flips and/or rendering to * vertical blanking. The DRM API offers ioctls to perform page flips * synchronized to vertical blanking and wait for vertical blanking. * * The DRM core handles most of the vertical blanking management logic, which * involves filtering out spurious interrupts, keeping race-free blanking * counters, coping with counter wrap-around and resets and keeping use counts. * It relies on the driver to generate vertical blanking interrupts and * optionally provide a hardware vertical blanking counter. * * Drivers must initialize the vertical blanking handling core with a call to * drm_vblank_init(). Minimally, a driver needs to implement * &drm_crtc_funcs.enable_vblank and &drm_crtc_funcs.disable_vblank plus call * drm_crtc_handle_vblank() in its vblank interrupt handler for working vblank * support. * * Vertical blanking interrupts can be enabled by the DRM core or by drivers * themselves (for instance to handle page flipping operations). The DRM core * maintains a vertical blanking use count to ensure that the interrupts are not * disabled while a user still needs them. To increment the use count, drivers * call drm_crtc_vblank_get() and release the vblank reference again with * drm_crtc_vblank_put(). In between these two calls vblank interrupts are * guaranteed to be enabled. * * On many hardware disabling the vblank interrupt cannot be done in a race-free * manner, see &drm_vblank_crtc_config.disable_immediate and * &drm_driver.max_vblank_count. In that case the vblank core only disables the * vblanks after a timer has expired, which can be configured through the * ``vblankoffdelay`` module parameter. * * Drivers for hardware without support for vertical-blanking interrupts can * use DRM vblank timers to send vblank events at the rate of the current * display mode's refresh. While not synchronized to the hardware's * vertical-blanking regions, the timer helps DRM clients and compositors to * adapt their update cycle to the display output. Drivers should set up * vblanking as usual, but call drm_crtc_vblank_start_timer() and * drm_crtc_vblank_cancel_timer() as part of their atomic mode setting. * See also DRM vblank helpers for more information. * * Drivers without support for vertical-blanking interrupts nor timers must * not call drm_vblank_init(). For these drivers, atomic helpers will * automatically generate fake vblank events as part of the display update. * This functionality also can be controlled by the driver by enabling and * disabling struct drm_crtc_state.no_vblank. */ /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ #define DRM_TIMESTAMP_MAXRETRIES 3 /* Threshold in nanoseconds for detection of redundant * vblank irq in drm_handle_vblank(). 1 msec should be ok. */ #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 static bool drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, ktime_t *tvblank, bool in_vblank_irq); static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); static struct drm_vblank_crtc * drm_vblank_crtc(struct drm_device *dev, unsigned int pipe) { return &dev->vblank[pipe]; } struct drm_vblank_crtc * drm_crtc_vblank_crtc(struct drm_crtc *crtc) { return drm_vblank_crtc(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_crtc); static void store_vblank(struct drm_device *dev, unsigned int pipe, u32 vblank_count_inc, ktime_t t_vblank, u32 last) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); assert_spin_locked(&dev->vblank_time_lock); vblank->last = last; write_seqlock(&vblank->seqlock); vblank->time = t_vblank; atomic64_add(vblank_count_inc, &vblank->count); write_sequnlock(&vblank->seqlock); } static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); return vblank->max_vblank_count ?: dev->max_vblank_count; } /* * "No hw counter" fallback implementation of .get_vblank_counter() hook, * if there is no usable hardware frame counter available. */ static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe) { drm_WARN_ON_ONCE(dev, drm_max_vblank_count(dev, pipe) != 0); return 0; } static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (drm_WARN_ON(dev, !crtc)) return 0; if (crtc->funcs->get_vblank_counter) return crtc->funcs->get_vblank_counter(crtc); } return drm_vblank_no_hw_counter(dev, pipe); } /* * Reset the stored timestamp for the current vblank count to correspond * to the last vblank occurred. * * Only to be called from drm_crtc_vblank_on(). * * Note: caller must hold &drm_device.vbl_lock since this reads & writes * device vblank fields. */ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe) { u32 cur_vblank; bool rc; ktime_t t_vblank; int count = DRM_TIMESTAMP_MAXRETRIES; spin_lock(&dev->vblank_time_lock); /* * sample the current counter to avoid random jumps * when drm_vblank_enable() applies the diff */ do { cur_vblank = __get_vblank_counter(dev, pipe); rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); /* * Only reinitialize corresponding vblank timestamp if high-precision query * available and didn't fail. Otherwise reinitialize delayed at next vblank * interrupt and assign 0 for now, to mark the vblanktimestamp as invalid. */ if (!rc) t_vblank = 0; /* * +1 to make sure user will never see the same * vblank counter value before and after a modeset */ store_vblank(dev, pipe, 1, t_vblank, cur_vblank); spin_unlock(&dev->vblank_time_lock); } /* * Call back into the driver to update the appropriate vblank counter * (specified by @pipe). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next * call if necessary. * * Only necessary when going from off->on, to account for frames we * didn't get an interrupt for. * * Note: caller must hold &drm_device.vbl_lock since this reads & writes * device vblank fields. */ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u32 cur_vblank, diff; bool rc; ktime_t t_vblank; int count = DRM_TIMESTAMP_MAXRETRIES; int framedur_ns = vblank->framedur_ns; u32 max_vblank_count = drm_max_vblank_count(dev, pipe); /* * Interrupts were disabled prior to this call, so deal with counter * wrap if needed. * NOTE! It's possible we lost a full dev->max_vblank_count + 1 events * here if the register is small or we had vblank interrupts off for * a long time. * * We repeat the hardware vblank counter & timestamp query until * we get consistent results. This to prevent races between gpu * updating its hardware counter while we are retrieving the * corresponding vblank timestamp. */ do { cur_vblank = __get_vblank_counter(dev, pipe); rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); if (max_vblank_count) { /* trust the hw counter when it's around */ diff = (cur_vblank - vblank->last) & max_vblank_count; } else if (rc && framedur_ns) { u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); /* * Figure out how many vblanks we've missed based * on the difference in the timestamps and the * frame/field duration. */ drm_dbg_vbl(dev, "crtc %u: Calculating number of vblanks." " diff_ns = %lld, framedur_ns = %d)\n", pipe, (long long)diff_ns, framedur_ns); diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns); if (diff == 0 && in_vblank_irq) drm_dbg_vbl(dev, "crtc %u: Redundant vblirq ignored\n", pipe); } else { /* some kind of default for drivers w/o accurate vbl timestamping */ diff = in_vblank_irq ? 1 : 0; } /* * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset * interval? If so then vblank irqs keep running and it will likely * happen that the hardware vblank counter is not trustworthy as it * might reset at some point in that interval and vblank timestamps * are not trustworthy either in that interval. Iow. this can result * in a bogus diff >> 1 which must be avoided as it would cause * random large forward jumps of the software vblank counter. */ if (diff > 1 && (vblank->inmodeset & 0x2)) { drm_dbg_vbl(dev, "clamping vblank bump to 1 on crtc %u: diffr=%u" " due to pre-modeset.\n", pipe, diff); diff = 1; } drm_dbg_vbl(dev, "updating vblank count on crtc %u:" " current=%llu, diff=%u, hw=%u hw_last=%u\n", pipe, (unsigned long long)atomic64_read(&vblank->count), diff, cur_vblank, vblank->last); if (diff == 0) { drm_WARN_ON_ONCE(dev, cur_vblank != vblank->last); return; } /* * Only reinitialize corresponding vblank timestamp if high-precision query * available and didn't fail, or we were called from the vblank interrupt. * Otherwise reinitialize delayed at next vblank interrupt and assign 0 * for now, to mark the vblanktimestamp as invalid. */ if (!rc && !in_vblank_irq) t_vblank = 0; store_vblank(dev, pipe, diff, t_vblank, cur_vblank); } u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u64 count; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return 0; count = atomic64_read(&vblank->count); /* * This read barrier corresponds to the implicit write barrier of the * write seqlock in store_vblank(). Note that this is the only place * where we need an explicit barrier, since all other access goes * through drm_vblank_count_and_time(), which already has the required * read barrier curtesy of the read seqlock. */ smp_rmb(); return count; } /** * drm_crtc_accurate_vblank_count - retrieve the master vblank counter * @crtc: which counter to retrieve * * This function is similar to drm_crtc_vblank_count() but this function * interpolates to handle a race with vblank interrupts using the high precision * timestamping support. * * This is mostly useful for hardware that can obtain the scanout position, but * doesn't have a hardware frame counter. */ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); u64 vblank; unsigned long flags; drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) && !crtc->funcs->get_vblank_timestamp, "This function requires support for accurate vblank timestamps."); spin_lock_irqsave(&dev->vblank_time_lock, flags); drm_update_vblank_count(dev, pipe, false); vblank = drm_vblank_count(dev, pipe); spin_unlock_irqrestore(&dev->vblank_time_lock, flags); return vblank; } EXPORT_SYMBOL(drm_crtc_accurate_vblank_count); static void __disable_vblank(struct drm_device *dev, unsigned int pipe) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (drm_WARN_ON(dev, !crtc)) return; if (crtc->funcs->disable_vblank) crtc->funcs->disable_vblank(crtc); } } /* * Disable vblank irq's on crtc, make sure that last vblank count * of hardware and corresponding consistent software vblank counter * are preserved, even if there are any spurious vblank irq's after * disable. */ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; assert_spin_locked(&dev->vbl_lock); /* Prevent vblank irq processing while disabling vblank irqs, * so no updates of timestamps or count can happen after we've * disabled. Needed to prevent races in case of delayed irq's. */ spin_lock_irqsave(&dev->vblank_time_lock, irqflags); /* * Update vblank count and disable vblank interrupts only if the * interrupts were enabled. This avoids calling the ->disable_vblank() * operation in atomic context with the hardware potentially runtime * suspended. */ if (!vblank->enabled) goto out; /* * Update the count and timestamp to maintain the * appearance that the counter has been ticking all along until * this time. This makes the count account for the entire time * between drm_crtc_vblank_on() and drm_crtc_vblank_off(). */ drm_update_vblank_count(dev, pipe, false); __disable_vblank(dev, pipe); vblank->enabled = false; out: spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); } static void vblank_disable_fn(struct timer_list *t) { struct drm_vblank_crtc *vblank = timer_container_of(vblank, t, disable_timer); struct drm_device *dev = vblank->dev; unsigned int pipe = vblank->pipe; unsigned long irqflags; spin_lock_irqsave(&dev->vbl_lock, irqflags); if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) { drm_dbg_core(dev, "disabling vblank on crtc %u\n", pipe); drm_vblank_disable_and_save(dev, pipe); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } static void drm_vblank_init_release(struct drm_device *dev, void *ptr) { struct drm_vblank_crtc *vblank = ptr; drm_WARN_ON(dev, READ_ONCE(vblank->enabled) && drm_core_check_feature(dev, DRIVER_MODESET)); if (vblank->vblank_timer.crtc) hrtimer_cancel(&vblank->vblank_timer.timer); drm_vblank_destroy_worker(vblank); timer_delete_sync(&vblank->disable_timer); } /** * drm_vblank_init - initialize vblank support * @dev: DRM device * @num_crtcs: number of CRTCs supported by @dev * * This function initializes vblank support for @num_crtcs display pipelines. * Cleanup is handled automatically through a cleanup function added with * drmm_add_action_or_reset(). * * Returns: * Zero on success or a negative error code on failure. */ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) { int ret; unsigned int i; spin_lock_init(&dev->vbl_lock); spin_lock_init(&dev->vblank_time_lock); dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL); if (!dev->vblank) return -ENOMEM; dev->num_crtcs = num_crtcs; for (i = 0; i < num_crtcs; i++) { struct drm_vblank_crtc *vblank = &dev->vblank[i]; vblank->dev = dev; vblank->pipe = i; init_waitqueue_head(&vblank->queue); timer_setup(&vblank->disable_timer, vblank_disable_fn, 0); seqlock_init(&vblank->seqlock); ret = drmm_add_action_or_reset(dev, drm_vblank_init_release, vblank); if (ret) return ret; ret = drm_vblank_worker_init(vblank); if (ret) return ret; } return 0; } EXPORT_SYMBOL(drm_vblank_init); /** * drm_dev_has_vblank - test if vblanking has been initialized for * a device * @dev: the device * * Drivers may call this function to test if vblank support is * initialized for a device. For most hardware this means that vblanking * can also be enabled. * * Atomic helpers use this function to initialize * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset(). * * Returns: * True if vblanking has been initialized for the given device, false * otherwise. */ bool drm_dev_has_vblank(const struct drm_device *dev) { return dev->num_crtcs != 0; } EXPORT_SYMBOL(drm_dev_has_vblank); /** * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC * @crtc: which CRTC's vblank waitqueue to retrieve * * This function returns a pointer to the vblank waitqueue for the CRTC. * Drivers can use this to implement vblank waits using wait_event() and related * functions. */ wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) { return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; } EXPORT_SYMBOL(drm_crtc_vblank_waitqueue); /** * drm_calc_timestamping_constants - calculate vblank timestamp constants * @crtc: drm_crtc whose timestamp constants should be updated. * @mode: display mode containing the scanout timings * * Calculate and store various constants which are later needed by vblank and * swap-completion timestamping, e.g, by * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from * CRTC's true scanout timing, so they take things like panel scaling or * other adjustments into account. */ void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); int linedur_ns = 0, framedur_ns = 0; int dotclock = mode->crtc_clock; if (!drm_dev_has_vblank(dev)) return; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; /* Valid dotclock? */ if (dotclock > 0) { int frame_size = mode->crtc_htotal * mode->crtc_vtotal; /* * Convert scanline length in pixels and video * dot clock to line duration and frame duration * in nanoseconds: */ linedur_ns = div_u64((u64) mode->crtc_htotal * 1000000, dotclock); framedur_ns = div_u64((u64) frame_size * 1000000, dotclock); /* * Fields of interlaced scanout modes are only half a frame duration. */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) framedur_ns /= 2; } else { drm_err(dev, "crtc %u: Can't calculate constants, dotclock = 0!\n", crtc->base.id); } vblank->linedur_ns = linedur_ns; vblank->framedur_ns = framedur_ns; drm_mode_copy(&vblank->hwmode, mode); drm_dbg_core(dev, "crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n", crtc->base.id, mode->crtc_htotal, mode->crtc_vtotal, mode->crtc_vdisplay); drm_dbg_core(dev, "crtc %u: clock %d kHz framedur %d linedur %d\n", crtc->base.id, dotclock, framedur_ns, linedur_ns); } EXPORT_SYMBOL(drm_calc_timestamping_constants); /** * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank * timestamp helper * @crtc: CRTC whose vblank timestamp to retrieve * @max_error: Desired maximum allowable error in timestamps (nanosecs) * On return contains true maximum error of timestamp * @vblank_time: Pointer to time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * @get_scanout_position: * Callback function to retrieve the scanout position. See * @struct drm_crtc_helper_funcs.get_scanout_position. * * Implements calculation of exact vblank timestamps from given drm_display_mode * timings and current video scanout position of a CRTC. * * The current implementation only handles standard video modes. For double scan * and interlaced modes the driver is supposed to adjust the hardware mode * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to * match the scanout position reported. * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * Returns true on success, and false on failure, i.e. when no accurate * timestamp could be acquired. */ bool drm_crtc_vblank_helper_get_vblank_timestamp_internal( struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq, drm_vblank_get_scanout_position_func get_scanout_position) { struct drm_device *dev = crtc->dev; unsigned int pipe = crtc->index; struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct timespec64 ts_etime, ts_vblank_time; ktime_t stime, etime; bool vbl_status; const struct drm_display_mode *mode; int vpos, hpos, i; int delta_ns, duration_ns; if (pipe >= dev->num_crtcs) { drm_err(dev, "Invalid crtc %u\n", pipe); return false; } /* Scanout position query not supported? Should not happen. */ if (!get_scanout_position) { drm_err(dev, "Called from CRTC w/o get_scanout_position()!?\n"); return false; } if (drm_drv_uses_atomic_modeset(dev)) mode = &vblank->hwmode; else mode = &crtc->hwmode; /* If mode timing undefined, just return as no-op: * Happens during initial modesetting of a crtc. */ if (mode->crtc_clock == 0) { drm_dbg_core(dev, "crtc %u: Noop due to uninitialized mode.\n", pipe); drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev)); return false; } /* Get current scanout position with system timestamp. * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times * if single query takes longer than max_error nanoseconds. * * This guarantees a tight bound on maximum error if * code gets preempted or delayed for some reason. */ for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { /* * Get vertical and horizontal scanout position vpos, hpos, * and bounding timestamps stime, etime, pre/post query. */ vbl_status = get_scanout_position(crtc, in_vblank_irq, &vpos, &hpos, &stime, &etime, mode); /* Return as no-op if scanout query unsupported or failed. */ if (!vbl_status) { drm_dbg_core(dev, "crtc %u : scanoutpos query failed.\n", pipe); return false; } /* Compute uncertainty in timestamp of scanout position query. */ duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime); /* Accept result with < max_error nsecs timing uncertainty. */ if (duration_ns <= *max_error) break; } /* Noisy system timing? */ if (i == DRM_TIMESTAMP_MAXRETRIES) { drm_dbg_core(dev, "crtc %u: Noisy timestamp %d us > %d us [%d reps].\n", pipe, duration_ns / 1000, *max_error / 1000, i); } /* Return upper bound of timestamp precision error. */ *max_error = duration_ns; /* Convert scanout position into elapsed time at raw_time query * since start of scanout at first display scanline. delta_ns * can be negative if start of scanout hasn't happened yet. */ delta_ns = div_s64(1000000LL * (vpos * mode->crtc_htotal + hpos), mode->crtc_clock); /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ *vblank_time = ktime_sub_ns(etime, delta_ns); if (!drm_debug_enabled(DRM_UT_VBL)) return true; ts_etime = ktime_to_timespec64(etime); ts_vblank_time = ktime_to_timespec64(*vblank_time); drm_dbg_vbl(dev, "crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n", pipe, hpos, vpos, &ts_etime, &ts_vblank_time, duration_ns / 1000, i); return true; } EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal); /** * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp * helper * @crtc: CRTC whose vblank timestamp to retrieve * @max_error: Desired maximum allowable error in timestamps (nanosecs) * On return contains true maximum error of timestamp * @vblank_time: Pointer to time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * * Implements calculation of exact vblank timestamps from given drm_display_mode * timings and current video scanout position of a CRTC. This can be directly * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented. * * The current implementation only handles standard video modes. For double scan * and interlaced modes the driver is supposed to adjust the hardware mode * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to * match the scanout position reported. * * Note that atomic drivers must call drm_calc_timestamping_constants() before * enabling a CRTC. The atomic helpers already take care of that in * drm_atomic_helper_calc_timestamping_constants(). * * Returns: * Returns true on success, and false on failure, i.e. when no accurate * timestamp could be acquired. */ bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq) { return drm_crtc_vblank_helper_get_vblank_timestamp_internal( crtc, max_error, vblank_time, in_vblank_irq, crtc->helper_private->get_scanout_position); } EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp); /** * drm_crtc_get_last_vbltimestamp - retrieve raw timestamp for the most * recent vblank interval * @crtc: CRTC whose vblank timestamp to retrieve * @tvblank: Pointer to target time which should receive the timestamp * @in_vblank_irq: * True when called from drm_crtc_handle_vblank(). Some drivers * need to apply some workarounds for gpu-specific vblank irq quirks * if flag is set. * * Fetches the system timestamp corresponding to the time of the most recent * vblank interval on specified CRTC. May call into kms-driver to * compute the timestamp with a high-precision GPU specific method. * * Returns zero if timestamp originates from uncorrected do_gettimeofday() * call, i.e., it isn't very precisely locked to the true vblank. * * Returns: * True if timestamp is considered to be very precise, false otherwise. */ static bool drm_crtc_get_last_vbltimestamp(struct drm_crtc *crtc, ktime_t *tvblank, bool in_vblank_irq) { bool ret = false; /* Define requested maximum error on timestamps (nanoseconds). */ int max_error = (int) drm_timestamp_precision * 1000; /* Query driver if possible and precision timestamping enabled. */ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) { ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error, tvblank, in_vblank_irq); } /* GPU high precision timestamp query unsupported or failed. * Return current monotonic/gettimeofday timestamp as best estimate. */ if (!ret) *tvblank = ktime_get(); return ret; } static bool drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, ktime_t *tvblank, bool in_vblank_irq) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); return drm_crtc_get_last_vbltimestamp(crtc, tvblank, in_vblank_irq); } /** * drm_crtc_vblank_count - retrieve "cooked" vblank counter value * @crtc: which counter to retrieve * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Note that this timer isn't correct against a racing * vblank interrupt (since it only reports the software vblank counter), see * drm_crtc_accurate_vblank_count() for such use-cases. * * Note that for a given vblank counter value drm_crtc_handle_vblank() * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() * provide a barrier: Any writes done before calling * drm_crtc_handle_vblank() will be visible to callers of the later * functions, if the vblank count is the same or a later one. * * See also &drm_vblank_crtc.count. * * Returns: * The software vblank counter. */ u64 drm_crtc_vblank_count(struct drm_crtc *crtc) { return drm_vblank_count(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_count); /** * drm_vblank_count_and_time - retrieve "cooked" vblank counter value and the * system timestamp corresponding to that vblank counter value. * @dev: DRM device * @pipe: index of CRTC whose counter to retrieve * @vblanktime: Pointer to ktime_t to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. * * This is the legacy version of drm_crtc_vblank_count_and_time(). */ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, ktime_t *vblanktime) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); u64 vblank_count; unsigned int seq; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) { *vblanktime = 0; return 0; } do { seq = read_seqbegin(&vblank->seqlock); vblank_count = atomic64_read(&vblank->count); *vblanktime = vblank->time; } while (read_seqretry(&vblank->seqlock, seq)); return vblank_count; } /** * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value * and the system timestamp corresponding to that vblank counter value * @crtc: which counter to retrieve * @vblanktime: Pointer to time to receive the vblank timestamp. * * Fetches the "cooked" vblank count value that represents the number of * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. * * Note that for a given vblank counter value drm_crtc_handle_vblank() * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() * provide a barrier: Any writes done before calling * drm_crtc_handle_vblank() will be visible to callers of the later * functions, if the vblank count is the same or a later one. * * See also &drm_vblank_crtc.count. */ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime) { return drm_vblank_count_and_time(crtc->dev, drm_crtc_index(crtc), vblanktime); } EXPORT_SYMBOL(drm_crtc_vblank_count_and_time); /** * drm_crtc_next_vblank_start - calculate the time of the next vblank * @crtc: the crtc for which to calculate next vblank time * @vblanktime: pointer to time to receive the next vblank timestamp. * * Calculate the expected time of the start of the next vblank period, * based on time of previous vblank and frame duration */ int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime) { struct drm_vblank_crtc *vblank; struct drm_display_mode *mode; u64 vblank_start; if (!drm_dev_has_vblank(crtc->dev)) return -EINVAL; vblank = drm_crtc_vblank_crtc(crtc); mode = &vblank->hwmode; if (!vblank->framedur_ns || !vblank->linedur_ns) return -EINVAL; if (!drm_crtc_get_last_vbltimestamp(crtc, vblanktime, false)) return -EINVAL; vblank_start = DIV_ROUND_DOWN_ULL( (u64)vblank->framedur_ns * mode->crtc_vblank_start, mode->crtc_vtotal); *vblanktime = ktime_add(*vblanktime, ns_to_ktime(vblank_start)); return 0; } EXPORT_SYMBOL(drm_crtc_next_vblank_start); static void send_vblank_event(struct drm_device *dev, struct drm_pending_vblank_event *e, u64 seq, ktime_t now) { struct timespec64 tv; switch (e->event.base.type) { case DRM_EVENT_VBLANK: case DRM_EVENT_FLIP_COMPLETE: tv = ktime_to_timespec64(now); e->event.vbl.sequence = seq; /* * e->event is a user space structure, with hardcoded unsigned * 32-bit seconds/microseconds. This is safe as we always use * monotonic timestamps since linux-4.15 */ e->event.vbl.tv_sec = tv.tv_sec; e->event.vbl.tv_usec = tv.tv_nsec / 1000; break; case DRM_EVENT_CRTC_SEQUENCE: if (seq) e->event.seq.sequence = seq; e->event.seq.time_ns = ktime_to_ns(now); break; } trace_drm_vblank_event_delivered(e->base.file_priv, e->pipe, seq); /* * Use the same timestamp for any associated fence signal to avoid * mismatch in timestamps for vsync & fence events triggered by the * same HW event. Frameworks like SurfaceFlinger in Android expects the * retire-fence timestamp to match exactly with HW vsync as it uses it * for its software vsync modeling. */ drm_send_event_timestamp_locked(dev, &e->base, now); } /** * drm_crtc_arm_vblank_event - arm vblank event after pageflip * @crtc: the source CRTC of the vblank event * @e: the event to send * * A lot of drivers need to generate vblank events for the very next vblank * interrupt. For example when the page flip interrupt happens when the page * flip gets armed, but not when it actually executes within the next vblank * period. This helper function implements exactly the required vblank arming * behaviour. * * NOTE: Drivers using this to send out the &drm_crtc_state.event as part of an * atomic commit must ensure that the next vblank happens at exactly the same * time as the atomic commit is committed to the hardware. This function itself * does **not** protect against the next vblank interrupt racing with either this * function call or the atomic commit operation. A possible sequence could be: * * 1. Driver commits new hardware state into vblank-synchronized registers. * 2. A vblank happens, committing the hardware state. Also the corresponding * vblank interrupt is fired off and fully processed by the interrupt * handler. * 3. The atomic commit operation proceeds to call drm_crtc_arm_vblank_event(). * 4. The event is only send out for the next vblank, which is wrong. * * An equivalent race can happen when the driver calls * drm_crtc_arm_vblank_event() before writing out the new hardware state. * * The only way to make this work safely is to prevent the vblank from firing * (and the hardware from committing anything else) until the entire atomic * commit sequence has run to completion. If the hardware does not have such a * feature (e.g. using a "go" bit), then it is unsafe to use this functions. * Instead drivers need to manually send out the event from their interrupt * handler by calling drm_crtc_send_vblank_event() and make sure that there's no * possible race with the hardware committing the atomic update. * * Caller must hold a vblank reference for the event @e acquired by a * drm_crtc_vblank_get(), which will be dropped when the next vblank arrives. */ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); assert_spin_locked(&dev->event_lock); e->pipe = pipe; e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1; list_add_tail(&e->base.link, &dev->vblank_event_list); } EXPORT_SYMBOL(drm_crtc_arm_vblank_event); /** * drm_crtc_send_vblank_event - helper to send vblank event after pageflip * @crtc: the source CRTC of the vblank event * @e: the event to send * * Updates sequence # and timestamp on event for the most recently processed * vblank, and sends it to userspace. Caller must hold event lock. * * See drm_crtc_arm_vblank_event() for a helper which can be used in certain * situation, especially to send out events for atomic commit operations. */ void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { struct drm_device *dev = crtc->dev; u64 seq; unsigned int pipe = drm_crtc_index(crtc); ktime_t now; if (drm_dev_has_vblank(dev)) { seq = drm_vblank_count_and_time(dev, pipe, &now); } else { seq = 0; now = ktime_get(); } e->pipe = pipe; send_vblank_event(dev, e, seq, now); } EXPORT_SYMBOL(drm_crtc_send_vblank_event); static int __enable_vblank(struct drm_device *dev, unsigned int pipe) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (drm_WARN_ON(dev, !crtc)) return 0; if (crtc->funcs->enable_vblank) return crtc->funcs->enable_vblank(crtc); } return -EINVAL; } static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int ret = 0; assert_spin_locked(&dev->vbl_lock); spin_lock(&dev->vblank_time_lock); if (!vblank->enabled) { /* * Enable vblank irqs under vblank_time_lock protection. * All vblank count & timestamp updates are held off * until we are done reinitializing master counter and * timestamps. Filtercode in drm_handle_vblank() will * prevent double-accounting of same vblank interval. */ ret = __enable_vblank(dev, pipe); drm_dbg_core(dev, "enabling vblank on crtc %u, ret: %d\n", pipe, ret); if (ret) { atomic_dec(&vblank->refcount); } else { drm_update_vblank_count(dev, pipe, 0); /* drm_update_vblank_count() includes a wmb so we just * need to ensure that the compiler emits the write * to mark the vblank as enabled after the call * to drm_update_vblank_count(). */ WRITE_ONCE(vblank->enabled, true); } } spin_unlock(&dev->vblank_time_lock); return ret; } int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; int ret = 0; if (!drm_dev_has_vblank(dev)) return -EINVAL; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return -EINVAL; spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &vblank->refcount) == 1) { ret = drm_vblank_enable(dev, pipe); } else { if (!vblank->enabled) { atomic_dec(&vblank->refcount); ret = -EINVAL; } } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); return ret; } /** * drm_crtc_vblank_get - get a reference count on vblank events * @crtc: which CRTC to own * * Acquire a reference count on vblank events to avoid having them disabled * while in use. * * Returns: * Zero on success or a negative error code on failure. */ int drm_crtc_vblank_get(struct drm_crtc *crtc) { return drm_vblank_get(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_get); void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int vblank_offdelay = vblank->config.offdelay_ms; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; if (drm_WARN_ON(dev, atomic_read(&vblank->refcount) == 0)) return; /* Last user schedules interrupt disable */ if (atomic_dec_and_test(&vblank->refcount)) { if (!vblank_offdelay) return; else if (vblank_offdelay < 0) vblank_disable_fn(&vblank->disable_timer); else if (!vblank->config.disable_immediate) mod_timer(&vblank->disable_timer, jiffies + ((vblank_offdelay * HZ) / 1000)); } } /** * drm_crtc_vblank_put - give up ownership of vblank events * @crtc: which counter to give up * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after &drm_vblank_crtc_config.offdelay_ms * milliseconds. */ void drm_crtc_vblank_put(struct drm_crtc *crtc) { drm_vblank_put(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_vblank_put); /** * drm_wait_one_vblank - wait for one vblank * @dev: DRM device * @pipe: CRTC index * * This waits for one vblank to pass on @pipe, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @pipe is disabled, e.g. * due to lack of driver support or because the crtc is off. * * This is the legacy version of drm_crtc_wait_one_vblank(). */ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); int ret; u64 last; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; ret = drm_vblank_get(dev, pipe); if (drm_WARN(dev, ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret)) return; last = drm_vblank_count(dev, pipe); ret = wait_event_timeout(vblank->queue, last != drm_vblank_count(dev, pipe), msecs_to_jiffies(1000)); drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe); drm_vblank_put(dev, pipe); } EXPORT_SYMBOL(drm_wait_one_vblank); /** * drm_crtc_wait_one_vblank - wait for one vblank * @crtc: DRM crtc * * This waits for one vblank to pass on @crtc, using the irq driver interfaces. * It is a failure to call this when the vblank irq for @crtc is disabled, e.g. * due to lack of driver support or because the crtc is off. */ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc) { drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_wait_one_vblank); /** * drm_crtc_vblank_off - disable vblank events on a CRTC * @crtc: CRTC in question * * Drivers can use this function to shut down the vblank interrupt handling when * disabling a crtc. This function ensures that the latest vblank frame count is * stored so that drm_vblank_on can restore it again. * * Drivers must use this function when the hardware vblank counter can get * reset, e.g. when suspending or disabling the @crtc in general. */ void drm_crtc_vblank_off(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct drm_pending_vblank_event *e, *t; ktime_t now; u64 seq; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; /* * Grab event_lock early to prevent vblank work from being scheduled * while we're in the middle of shutting down vblank interrupts */ spin_lock_irq(&dev->event_lock); spin_lock(&dev->vbl_lock); drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n", pipe, vblank->enabled, vblank->inmodeset); /* Avoid redundant vblank disables without previous * drm_crtc_vblank_on(). */ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset) drm_vblank_disable_and_save(dev, pipe); wake_up(&vblank->queue); /* * Prevent subsequent drm_vblank_get() from re-enabling * the vblank interrupt by bumping the refcount. */ if (!vblank->inmodeset) { atomic_inc(&vblank->refcount); vblank->inmodeset = 1; } spin_unlock(&dev->vbl_lock); /* Send any queued vblank events, lest the natives grow disquiet */ seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != pipe) continue; drm_dbg_core(dev, "Sending premature vblank event on disable: " "wanted %llu, current %llu\n", e->sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); } /* Cancel any leftover pending vblank work */ drm_vblank_cancel_pending_works(vblank); spin_unlock_irq(&dev->event_lock); /* Will be reset by the modeset helpers when re-enabling the crtc by * calling drm_calc_timestamping_constants(). */ vblank->hwmode.crtc_clock = 0; /* Wait for any vblank work that's still executing to finish */ drm_vblank_flush_worker(vblank); } EXPORT_SYMBOL(drm_crtc_vblank_off); /** * drm_crtc_vblank_reset - reset vblank state to off on a CRTC * @crtc: CRTC in question * * Drivers can use this function to reset the vblank state to off at load time. * Drivers should use this together with the drm_crtc_vblank_off() and * drm_crtc_vblank_on() functions. The difference compared to * drm_crtc_vblank_off() is that this function doesn't save the vblank counter * and hence doesn't need to call any driver hooks. * * This is useful for recovering driver state e.g. on driver load, or on resume. */ void drm_crtc_vblank_reset(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); spin_lock_irq(&dev->vbl_lock); /* * Prevent subsequent drm_vblank_get() from enabling the vblank * interrupt by bumping the refcount. */ if (!vblank->inmodeset) { atomic_inc(&vblank->refcount); vblank->inmodeset = 1; } spin_unlock_irq(&dev->vbl_lock); drm_WARN_ON(dev, !list_empty(&dev->vblank_event_list)); drm_WARN_ON(dev, !list_empty(&vblank->pending_work)); } EXPORT_SYMBOL(drm_crtc_vblank_reset); /** * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value * @crtc: CRTC in question * @max_vblank_count: max hardware vblank counter value * * Update the maximum hardware vblank counter value for @crtc * at runtime. Useful for hardware where the operation of the * hardware vblank counter depends on the currently active * display configuration. * * For example, if the hardware vblank counter does not work * when a specific connector is active the maximum can be set * to zero. And when that specific connector isn't active the * maximum can again be set to the appropriate non-zero value. * * If used, must be called before drm_vblank_on(). */ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc, u32 max_vblank_count) { struct drm_device *dev = crtc->dev; struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); drm_WARN_ON(dev, dev->max_vblank_count); drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset)); vblank->max_vblank_count = max_vblank_count; } EXPORT_SYMBOL(drm_crtc_set_max_vblank_count); /** * drm_crtc_vblank_on_config - enable vblank events on a CRTC with custom * configuration options * @crtc: CRTC in question * @config: Vblank configuration value * * See drm_crtc_vblank_on(). In addition, this function allows you to provide a * custom vblank configuration for a given CRTC. * * Note that @config is copied, the pointer does not need to stay valid beyond * this function call. For details of the parameters see * struct drm_vblank_crtc_config. */ void drm_crtc_vblank_on_config(struct drm_crtc *crtc, const struct drm_vblank_crtc_config *config) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; spin_lock_irq(&dev->vbl_lock); drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n", pipe, vblank->enabled, vblank->inmodeset); vblank->config = *config; /* Drop our private "prevent drm_vblank_get" refcount */ if (vblank->inmodeset) { atomic_dec(&vblank->refcount); vblank->inmodeset = 0; } drm_reset_vblank_timestamp(dev, pipe); /* * re-enable interrupts if there are users left, or the * user wishes vblank interrupts to be enabled all the time. */ if (atomic_read(&vblank->refcount) != 0 || !vblank->config.offdelay_ms) drm_WARN_ON(dev, drm_vblank_enable(dev, pipe)); spin_unlock_irq(&dev->vbl_lock); } EXPORT_SYMBOL(drm_crtc_vblank_on_config); /** * drm_crtc_vblank_on - enable vblank events on a CRTC * @crtc: CRTC in question * * This functions restores the vblank interrupt state captured with * drm_crtc_vblank_off() again and is generally called when enabling @crtc. Note * that calls to drm_crtc_vblank_on() and drm_crtc_vblank_off() can be * unbalanced and so can also be unconditionally called in driver load code to * reflect the current hardware state of the crtc. * * Note that unlike in drm_crtc_vblank_on_config(), default values are used. */ void drm_crtc_vblank_on(struct drm_crtc *crtc) { const struct drm_vblank_crtc_config config = { .offdelay_ms = drm_vblank_offdelay, .disable_immediate = crtc->dev->vblank_disable_immediate }; drm_crtc_vblank_on_config(crtc, &config); } EXPORT_SYMBOL(drm_crtc_vblank_on); static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) { ktime_t t_vblank; struct drm_vblank_crtc *vblank; int framedur_ns; u64 diff_ns; u32 cur_vblank, diff = 1; int count = DRM_TIMESTAMP_MAXRETRIES; u32 max_vblank_count = drm_max_vblank_count(dev, pipe); if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return; assert_spin_locked(&dev->vbl_lock); assert_spin_locked(&dev->vblank_time_lock); vblank = drm_vblank_crtc(dev, pipe); drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns, "Cannot compute missed vblanks without frame duration\n"); framedur_ns = vblank->framedur_ns; do { cur_vblank = __get_vblank_counter(dev, pipe); drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false); } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); if (framedur_ns) diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns); drm_dbg_vbl(dev, "missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n", diff, diff_ns, framedur_ns, cur_vblank - vblank->last); vblank->last = (cur_vblank - diff) & max_vblank_count; } /** * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count. * @crtc: CRTC in question * * Power manamement features can cause frame counter resets between vblank * disable and enable. Drivers can use this function in their * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since * the last &drm_crtc_funcs.disable_vblank using timestamps and update the * vblank counter. * * Note that drivers must have race-free high-precision timestamping support, * i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and * &drm_vblank_crtc_config.disable_immediate must be set to indicate the * time-stamping functions are race-free against vblank hardware counter * increments. */ void drm_crtc_vblank_restore(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); drm_WARN_ON_ONCE(dev, !crtc->funcs->get_vblank_timestamp); drm_WARN_ON_ONCE(dev, vblank->inmodeset); drm_WARN_ON_ONCE(dev, !vblank->config.disable_immediate); drm_vblank_restore(dev, pipe); } EXPORT_SYMBOL(drm_crtc_vblank_restore); static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, u64 req_seq, union drm_wait_vblank *vblwait, struct drm_file *file_priv) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); struct drm_pending_vblank_event *e; ktime_t now; u64 seq; int ret; e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) { ret = -ENOMEM; goto err_put; } e->pipe = pipe; e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof(e->event.vbl); e->event.vbl.user_data = vblwait->request.signal; e->event.vbl.crtc_id = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); if (crtc) e->event.vbl.crtc_id = crtc->base.id; } spin_lock_irq(&dev->event_lock); /* * drm_crtc_vblank_off() might have been called after we called * drm_vblank_get(). drm_crtc_vblank_off() holds event_lock around the * vblank disable, so no need for further locking. The reference from * drm_vblank_get() protects against vblank disable from another source. */ if (!READ_ONCE(vblank->enabled)) { ret = -EINVAL; goto err_unlock; } ret = drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base); if (ret) goto err_unlock; seq = drm_vblank_count_and_time(dev, pipe, &now); drm_dbg_core(dev, "event on vblank count %llu, current %llu, crtc %u\n", req_seq, seq, pipe); trace_drm_vblank_event_queued(file_priv, pipe, req_seq); e->sequence = req_seq; if (drm_vblank_passed(seq, req_seq)) { drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); vblwait->reply.sequence = seq; } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); vblwait->reply.sequence = req_seq; } spin_unlock_irq(&dev->event_lock); return 0; err_unlock: spin_unlock_irq(&dev->event_lock); kfree(e); err_put: drm_vblank_put(dev, pipe); return ret; } static bool drm_wait_vblank_is_query(union drm_wait_vblank *vblwait) { if (vblwait->request.sequence) return false; return _DRM_VBLANK_RELATIVE == (vblwait->request.type & (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_EVENT | _DRM_VBLANK_NEXTONMISS)); } /* * Widen a 32-bit param to 64-bits. * * \param narrow 32-bit value (missing upper 32 bits) * \param near 64-bit value that should be 'close' to near * * This function returns a 64-bit value using the lower 32-bits from * 'narrow' and constructing the upper 32-bits so that the result is * as close as possible to 'near'. */ static u64 widen_32_to_64(u32 narrow, u64 near) { return near + (s32) (narrow - near); } static void drm_wait_vblank_reply(struct drm_device *dev, unsigned int pipe, struct drm_wait_vblank_reply *reply) { ktime_t now; struct timespec64 ts; /* * drm_wait_vblank_reply is a UAPI structure that uses 'long' * to store the seconds. This is safe as we always use monotonic * timestamps since linux-4.15. */ reply->sequence = drm_vblank_count_and_time(dev, pipe, &now); ts = ktime_to_timespec64(now); reply->tval_sec = (u32)ts.tv_sec; reply->tval_usec = ts.tv_nsec / 1000; } static bool drm_wait_vblank_supported(struct drm_device *dev) { return drm_dev_has_vblank(dev); } int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_vblank_crtc *vblank; union drm_wait_vblank *vblwait = data; int ret; u64 req_seq, seq; unsigned int pipe_index; unsigned int flags, pipe, high_pipe; if (!drm_wait_vblank_supported(dev)) return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return -EINVAL; if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | _DRM_VBLANK_HIGH_CRTC_MASK)) { drm_dbg_core(dev, "Unsupported type value 0x%x, supported mask 0x%x\n", vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | _DRM_VBLANK_HIGH_CRTC_MASK)); return -EINVAL; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; high_pipe = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); if (high_pipe) pipe_index = high_pipe >> _DRM_VBLANK_HIGH_CRTC_SHIFT; else pipe_index = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; /* Convert lease-relative crtc index into global crtc index */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { pipe = 0; drm_for_each_crtc(crtc, dev) { if (drm_lease_held(file_priv, crtc->base.id)) { if (pipe_index == 0) break; pipe_index--; } pipe++; } } else { pipe = pipe_index; } if (pipe >= dev->num_crtcs) return -EINVAL; vblank = &dev->vblank[pipe]; /* If the counter is currently enabled and accurate, short-circuit * queries to return the cached timestamp of the last vblank. */ if (vblank->config.disable_immediate && drm_wait_vblank_is_query(vblwait) && READ_ONCE(vblank->enabled)) { drm_wait_vblank_reply(dev, pipe, &vblwait->reply); return 0; } ret = drm_vblank_get(dev, pipe); if (ret) { drm_dbg_core(dev, "crtc %d failed to acquire vblank counter, %d\n", pipe, ret); return ret; } seq = drm_vblank_count(dev, pipe); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: req_seq = seq + vblwait->request.sequence; vblwait->request.sequence = req_seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; break; case _DRM_VBLANK_ABSOLUTE: req_seq = widen_32_to_64(vblwait->request.sequence, seq); break; default: ret = -EINVAL; goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && drm_vblank_passed(seq, req_seq)) { req_seq = seq + 1; vblwait->request.type &= ~_DRM_VBLANK_NEXTONMISS; vblwait->request.sequence = req_seq; } if (flags & _DRM_VBLANK_EVENT) { /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously */ return drm_queue_vblank_event(dev, pipe, req_seq, vblwait, file_priv); } if (req_seq != seq) { int wait; drm_dbg_core(dev, "waiting on vblank count %llu, crtc %u\n", req_seq, pipe); wait = wait_event_interruptible_timeout(vblank->queue, drm_vblank_passed(drm_vblank_count(dev, pipe), req_seq) || !READ_ONCE(vblank->enabled), msecs_to_jiffies(3000)); switch (wait) { case 0: /* timeout */ ret = -EBUSY; break; case -ERESTARTSYS: /* interrupted by signal */ ret = -EINTR; break; default: ret = 0; break; } } if (ret != -EINTR) { drm_wait_vblank_reply(dev, pipe, &vblwait->reply); drm_dbg_core(dev, "crtc %d returning %u to client\n", pipe, vblwait->reply.sequence); } else { drm_dbg_core(dev, "crtc %d vblank wait interrupted by signal\n", pipe); } done: drm_vblank_put(dev, pipe); return ret; } static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) { struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); bool high_prec = false; struct drm_pending_vblank_event *e, *t; ktime_t now; u64 seq; assert_spin_locked(&dev->event_lock); seq = drm_vblank_count_and_time(dev, pipe, &now); list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { if (e->pipe != pipe) continue; if (!drm_vblank_passed(seq, e->sequence)) continue; drm_dbg_core(dev, "vblank event on %llu, current %llu\n", e->sequence, seq); list_del(&e->base.link); drm_vblank_put(dev, pipe); send_vblank_event(dev, e, seq, now); } if (crtc && crtc->funcs->get_vblank_timestamp) high_prec = true; trace_drm_vblank_event(pipe, seq, now, high_prec); } /** * drm_handle_vblank - handle a vblank event * @dev: DRM device * @pipe: index of CRTC where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. * * This is the legacy version of drm_crtc_handle_vblank(). */ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = drm_vblank_crtc(dev, pipe); unsigned long irqflags; bool disable_irq; if (drm_WARN_ON_ONCE(dev, !drm_dev_has_vblank(dev))) return false; if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) return false; spin_lock_irqsave(&dev->event_lock, irqflags); /* Need timestamp lock to prevent concurrent execution with * vblank enable/disable, as this would cause inconsistent * or corrupted timestamps and vblank counts. */ spin_lock(&dev->vblank_time_lock); /* Vblank irq handling disabled. Nothing to do. */ if (!vblank->enabled) { spin_unlock(&dev->vblank_time_lock); spin_unlock_irqrestore(&dev->event_lock, irqflags); return false; } drm_update_vblank_count(dev, pipe, true); spin_unlock(&dev->vblank_time_lock); wake_up(&vblank->queue); /* With instant-off, we defer disabling the interrupt until after * we finish processing the following vblank after all events have * been signaled. The disable has to be last (after * drm_handle_vblank_events) so that the timestamp is always accurate. */ disable_irq = (vblank->config.disable_immediate && vblank->config.offdelay_ms > 0 && !atomic_read(&vblank->refcount)); drm_handle_vblank_events(dev, pipe); drm_handle_vblank_works(vblank); spin_unlock_irqrestore(&dev->event_lock, irqflags); if (disable_irq) vblank_disable_fn(&vblank->disable_timer); return true; } EXPORT_SYMBOL(drm_handle_vblank); /** * drm_crtc_handle_vblank - handle a vblank event * @crtc: where this event occurred * * Drivers should call this routine in their vblank interrupt handlers to * update the vblank counter and send any signals that may be pending. * * This is the native KMS version of drm_handle_vblank(). * * Note that for a given vblank counter value drm_crtc_handle_vblank() * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() * provide a barrier: Any writes done before calling * drm_crtc_handle_vblank() will be visible to callers of the later * functions, if the vblank count is the same or a later one. * * See also &drm_vblank_crtc.count. * * Returns: * True if the event was successfully handled, false on failure. */ bool drm_crtc_handle_vblank(struct drm_crtc *crtc) { return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc)); } EXPORT_SYMBOL(drm_crtc_handle_vblank); /* * Get crtc VBLANK count. * * \param dev DRM device * \param data user argument, pointing to a drm_crtc_get_sequence structure. * \param file_priv drm file private for the user's open file descriptor */ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_vblank_crtc *vblank; int pipe; struct drm_crtc_get_sequence *get_seq = data; ktime_t now; bool vblank_enabled; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; if (!drm_dev_has_vblank(dev)) return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); if (!crtc) return -ENOENT; pipe = drm_crtc_index(crtc); vblank = drm_crtc_vblank_crtc(crtc); vblank_enabled = READ_ONCE(vblank->config.disable_immediate) && READ_ONCE(vblank->enabled); if (!vblank_enabled) { ret = drm_crtc_vblank_get(crtc); if (ret) { drm_dbg_core(dev, "crtc %d failed to acquire vblank counter, %d\n", pipe, ret); return ret; } } drm_modeset_lock(&crtc->mutex, NULL); if (crtc->state) get_seq->active = crtc->state->enable; else get_seq->active = crtc->enabled; drm_modeset_unlock(&crtc->mutex); get_seq->sequence = drm_vblank_count_and_time(dev, pipe, &now); get_seq->sequence_ns = ktime_to_ns(now); if (!vblank_enabled) drm_crtc_vblank_put(crtc); return 0; } /* * Queue a event for VBLANK sequence * * \param dev DRM device * \param data user argument, pointing to a drm_crtc_queue_sequence structure. * \param file_priv drm file private for the user's open file descriptor */ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_crtc *crtc; struct drm_vblank_crtc *vblank; int pipe; struct drm_crtc_queue_sequence *queue_seq = data; ktime_t now; struct drm_pending_vblank_event *e; u32 flags; u64 seq; u64 req_seq; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; if (!drm_dev_has_vblank(dev)) return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); if (!crtc) return -ENOENT; flags = queue_seq->flags; /* Check valid flag bits */ if (flags & ~(DRM_CRTC_SEQUENCE_RELATIVE| DRM_CRTC_SEQUENCE_NEXT_ON_MISS)) return -EINVAL; pipe = drm_crtc_index(crtc); vblank = drm_crtc_vblank_crtc(crtc); e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) return -ENOMEM; ret = drm_crtc_vblank_get(crtc); if (ret) { drm_dbg_core(dev, "crtc %d failed to acquire vblank counter, %d\n", pipe, ret); goto err_free; } seq = drm_vblank_count_and_time(dev, pipe, &now); req_seq = queue_seq->sequence; if (flags & DRM_CRTC_SEQUENCE_RELATIVE) req_seq += seq; if ((flags & DRM_CRTC_SEQUENCE_NEXT_ON_MISS) && drm_vblank_passed(seq, req_seq)) req_seq = seq + 1; e->pipe = pipe; e->event.base.type = DRM_EVENT_CRTC_SEQUENCE; e->event.base.length = sizeof(e->event.seq); e->event.seq.user_data = queue_seq->user_data; spin_lock_irq(&dev->event_lock); /* * drm_crtc_vblank_off() might have been called after we called * drm_crtc_vblank_get(). drm_crtc_vblank_off() holds event_lock around the * vblank disable, so no need for further locking. The reference from * drm_crtc_vblank_get() protects against vblank disable from another source. */ if (!READ_ONCE(vblank->enabled)) { ret = -EINVAL; goto err_unlock; } ret = drm_event_reserve_init_locked(dev, file_priv, &e->base, &e->event.base); if (ret) goto err_unlock; e->sequence = req_seq; if (drm_vblank_passed(seq, req_seq)) { drm_crtc_vblank_put(crtc); send_vblank_event(dev, e, seq, now); queue_seq->sequence = seq; } else { /* drm_handle_vblank_events will call drm_vblank_put */ list_add_tail(&e->base.link, &dev->vblank_event_list); queue_seq->sequence = req_seq; } spin_unlock_irq(&dev->event_lock); return 0; err_unlock: spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); err_free: kfree(e); return ret; } /* * VBLANK timer */ static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer) { struct drm_vblank_crtc_timer *vtimer = container_of(timer, struct drm_vblank_crtc_timer, timer); struct drm_crtc *crtc = vtimer->crtc; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_device *dev = crtc->dev; unsigned long flags; ktime_t interval; u64 ret_overrun; bool succ; spin_lock_irqsave(&vtimer->interval_lock, flags); interval = vtimer->interval; spin_unlock_irqrestore(&vtimer->interval_lock, flags); if (!interval) return HRTIMER_NORESTART; ret_overrun = hrtimer_forward_now(&vtimer->timer, interval); if (ret_overrun != 1) drm_dbg_vbl(dev, "vblank timer overrun\n"); if (crtc_funcs->handle_vblank_timeout) succ = crtc_funcs->handle_vblank_timeout(crtc); else succ = drm_crtc_handle_vblank(crtc); if (!succ) return HRTIMER_NORESTART; return HRTIMER_RESTART; } /** * drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC * @crtc: the CRTC * * Drivers should call this function from their CRTC's enable_vblank * function to start a vblank timer. The timer will fire after the duration * of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer. * * Returns: * 0 on success, or a negative errno code otherwise. */ int drm_crtc_vblank_start_timer(struct drm_crtc *crtc) { struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; unsigned long flags; if (!vtimer->crtc) { /* * Set up the data structures on the first invocation. */ vtimer->crtc = crtc; spin_lock_init(&vtimer->interval_lock); hrtimer_setup(&vtimer->timer, drm_vblank_timer_function, CLOCK_MONOTONIC, HRTIMER_MODE_REL); } else { /* * Timer should not be active. If it is, wait for the * previous cancel operations to finish. */ while (hrtimer_active(&vtimer->timer)) hrtimer_try_to_cancel(&vtimer->timer); } drm_calc_timestamping_constants(crtc, &crtc->mode); spin_lock_irqsave(&vtimer->interval_lock, flags); vtimer->interval = ns_to_ktime(vblank->framedur_ns); spin_unlock_irqrestore(&vtimer->interval_lock, flags); hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL); return 0; } EXPORT_SYMBOL(drm_crtc_vblank_start_timer); /** * drm_crtc_vblank_cancel_timer - Cancels the given CRTC's vblank timer * @crtc: the CRTC * * Drivers should call this function from their CRTC's disable_vblank * function to stop a vblank timer. */ void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc) { struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; unsigned long flags; /* * Calling hrtimer_cancel() can result in a deadlock with DRM's * vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So * clear interval and indicate cancellation. The timer function * will cancel itself on the next invocation. */ spin_lock_irqsave(&vtimer->interval_lock, flags); vtimer->interval = 0; spin_unlock_irqrestore(&vtimer->interval_lock, flags); hrtimer_try_to_cancel(&vtimer->timer); } EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer); /** * drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout * @crtc: The CRTC * @vblank_time: Returns the next vblank timestamp * * The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank * timestamp of the CRTC's vblank timer according to the timer's expiry * time. */ void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time) { struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer; u64 cur_count; ktime_t cur_time; if (!READ_ONCE(vblank->enabled)) { *vblank_time = ktime_get(); return; } /* * A concurrent vblank timeout could update the expires field before * we compare it with the vblank time. Hence we'd compare the old * expiry time to the new vblank time; deducing the timer had already * expired. Reread until we get consistent values from both fields. */ do { cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time); *vblank_time = READ_ONCE(vtimer->timer.node.expires); } while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time)); if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time))) return; /* Already expired */ /* * To prevent races we roll the hrtimer forward before we do any * interrupt processing - this is how real hw works (the interrupt * is only generated after all the vblank registers are updated) * and what the vblank core expects. Therefore we need to always * correct the timestamp by one frame. */ *vblank_time = ktime_sub(*vblank_time, vtimer->interval); } EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout);
67 4 7 4 4 3 4 49 48 5 5 5 6 6 66 63 6 6 6 61 1 6 4 4 6 1 1 67 1 68 62 6 6 3 3 66 65 9 9 15 15 9 9 16 15 66 63 65 61 66 74 73 73 70 74 3 44 73 29 31 74 34 41 41 37 38 38 4 4 4 4 74 39 36 64 60 63 4 4 4 61 61 63 61 25 60 1 24 25 59 64 60 26 59 27 27 58 74 74 73 12 9 1 1 1 1 1 1 1 1 70 45 72 20 72 25 72 31 72 2 71 1 72 25 71 31 71 72 72 72 70 2 2 2 2 2 2 2 2 71 4 4 4 4 4 4 4 3 2 2 67 3 3 3 1 1 3 2 69 4 3 4 4 4 4 4 1 4 4 74 3 32 74 32 1 1 1 1 1 33 30 2 2 1 1 1 70 74 74 3 73 73 72 70 73 74 73 73 14 1 1 1 74 73 73 74 74 72 72 72 71 71 1 1 1 74 28 74 10 10 1 9 9 9 9 9 67 67 1 67 67 72 2 73 3 3 3 3 3 70 73 64 2 2 65 33 73 25 73 73 4 4 4 4 4 106 71 1 106 45 45 3 74 48 70 4 4 67 66 42 41 42 36 42 13 2 2 2 2 68 68 67 18 68 18 16 15 15 16 16 1 16 50 67 49 48 48 19 18 1 1 1 1 19 19 18 36 30 18 8 8 8 10 36 68 67 64 68 68 68 68 2 1 67 68 68 21 67 74 24 24 24 24 25 7 25 25 8 1 1 1 2 2 1 20 25 1 1 2 1 20 25 22 21 25 25 21 4 19 19 2 19 5 19 5 19 2 1 1 19 3 19 2 19 1 19 1 19 1 19 1 19 1 19 2 1 1 19 3 1 2 19 1 19 2 19 2 1 1 19 2 2 19 1 1 1 18 17 22 19 22 22 21 22 22 22 22 22 22 34 34 35 35 35 35 35 35 34 34 35 35 35 35 35 31 35 35 35 35 35 35 35 35 35 35 34 35 35 35 35 34 34 35 35 35 35 34 34 35 35 35 35 34 35 35 35 35 34 34 35 35 35 35 35 35 34 34 1 3 2 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* COMMON Applications Kept Enhanced (CAKE) discipline * * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com> * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk> * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com> * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de> * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk> * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au> * * The CAKE Principles: * (or, how to have your cake and eat it too) * * This is a combination of several shaping, AQM and FQ techniques into one * easy-to-use package: * * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE * equipment and bloated MACs. This operates in deficit mode (as in sch_fq), * eliminating the need for any sort of burst parameter (eg. token bucket * depth). Burst support is limited to that necessary to overcome scheduling * latency. * * - A Diffserv-aware priority queue, giving more priority to certain classes, * up to a specified fraction of bandwidth. Above that bandwidth threshold, * the priority is reduced to avoid starving other tins. * * - Each priority tin has a separate Flow Queue system, to isolate traffic * flows from each other. This prevents a burst on one flow from increasing * the delay to another. Flows are distributed to queues using a * set-associative hash function. * * - Each queue is actively managed by Cobalt, which is a combination of the * Codel and Blue AQM algorithms. This serves flows fairly, and signals * congestion early via ECN (if available) and/or packet drops, to keep * latency low. The codel parameters are auto-tuned based on the bandwidth * setting, as is necessary at low bandwidths. * * The configuration parameters are kept deliberately simple for ease of use. * Everything has sane defaults. Complete generality of configuration is *not* * a goal. * * The priority queue operates according to a weighted DRR scheme, combined with * a bandwidth tracker which reuses the shaper logic to detect which side of the * bandwidth sharing threshold the tin is operating. This determines whether a * priority-based weight (high) or a bandwidth-based weight (low) is used for * that tin in the current pass. * * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly * granted us permission to leverage. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/reciprocal_div.h> #include <net/netlink.h> #include <linux/if_vlan.h> #include <net/gso.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tcp.h> #include <net/flow_dissector.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack_core.h> #endif #define CAKE_SET_WAYS (8) #define CAKE_MAX_TINS (8) #define CAKE_QUEUES (1024) #define CAKE_FLOW_MASK 63 #define CAKE_FLOW_NAT_FLAG 64 /* struct cobalt_params - contains codel and blue parameters * @interval: codel initial drop rate * @target: maximum persistent sojourn time & blue update rate * @mtu_time: serialisation delay of maximum-size packet * @p_inc: increment of blue drop probability (0.32 fxp) * @p_dec: decrement of blue drop probability (0.32 fxp) */ struct cobalt_params { u64 interval; u64 target; u64 mtu_time; u32 p_inc; u32 p_dec; }; /* struct cobalt_vars - contains codel and blue variables * @count: codel dropping frequency * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 * @drop_next: time to drop next packet, or when we dropped last * @blue_timer: Blue time to next drop * @p_drop: BLUE drop probability (0.32 fxp) * @dropping: set if in dropping state * @ecn_marked: set if marked */ struct cobalt_vars { u32 count; u32 rec_inv_sqrt; ktime_t drop_next; ktime_t blue_timer; u32 p_drop; bool dropping; bool ecn_marked; }; enum { CAKE_SET_NONE = 0, CAKE_SET_SPARSE, CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */ CAKE_SET_BULK, CAKE_SET_DECAYING }; struct cake_flow { /* this stuff is all needed per-flow at dequeue time */ struct sk_buff *head; struct sk_buff *tail; struct list_head flowchain; s32 deficit; u32 dropped; struct cobalt_vars cvars; u16 srchost; /* index into cake_host table */ u16 dsthost; u8 set; }; /* please try to keep this structure <= 64 bytes */ struct cake_host { u32 srchost_tag; u32 dsthost_tag; u16 srchost_bulk_flow_count; u16 dsthost_bulk_flow_count; }; struct cake_heap_entry { u16 t:3, b:10; }; struct cake_tin_data { struct cake_flow flows[CAKE_QUEUES]; u32 backlogs[CAKE_QUEUES]; u32 tags[CAKE_QUEUES]; /* for set association */ u16 overflow_idx[CAKE_QUEUES]; struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */ u16 flow_quantum; struct cobalt_params cparams; u32 drop_overlimit; u16 bulk_flow_count; u16 sparse_flow_count; u16 decaying_flow_count; u16 unresponsive_flow_count; u32 max_skblen; struct list_head new_flows; struct list_head old_flows; struct list_head decaying_flows; /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ ktime_t time_next_packet; u64 tin_rate_ns; u64 tin_rate_bps; u16 tin_rate_shft; u16 tin_quantum; s32 tin_deficit; u32 tin_backlog; u32 tin_dropped; u32 tin_ecn_mark; u32 packets; u64 bytes; u32 ack_drops; /* moving averages */ u64 avge_delay; u64 peak_delay; u64 base_delay; /* hash function stats */ u32 way_directs; u32 way_hits; u32 way_misses; u32 way_collisions; }; /* number of tins is small, so size of this struct doesn't matter much */ struct cake_sched_data { struct tcf_proto __rcu *filter_list; /* optional external classifier */ struct tcf_block *block; struct cake_tin_data *tins; struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS]; u16 overflow_timeout; u16 tin_cnt; u8 tin_mode; u8 flow_mode; u8 ack_filter; u8 atm_mode; u32 fwmark_mask; u16 fwmark_shft; /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ u16 rate_shft; ktime_t time_next_packet; ktime_t failsafe_next_packet; u64 rate_ns; u64 rate_bps; u16 rate_flags; s16 rate_overhead; u16 rate_mpu; u64 interval; u64 target; /* resource tracking */ u32 buffer_used; u32 buffer_max_used; u32 buffer_limit; u32 buffer_config_limit; /* indices for dequeue */ u16 cur_tin; u16 cur_flow; struct qdisc_watchdog watchdog; const u8 *tin_index; const u8 *tin_order; /* bandwidth capacity estimate */ ktime_t last_packet_time; ktime_t avg_window_begin; u64 avg_packet_interval; u64 avg_window_bytes; u64 avg_peak_bandwidth; ktime_t last_reconfig_time; /* packet length stats */ u32 avg_netoff; u16 max_netlen; u16 max_adjlen; u16 min_netlen; u16 min_adjlen; }; enum { CAKE_FLAG_OVERHEAD = BIT(0), CAKE_FLAG_AUTORATE_INGRESS = BIT(1), CAKE_FLAG_INGRESS = BIT(2), CAKE_FLAG_WASH = BIT(3), CAKE_FLAG_SPLIT_GSO = BIT(4) }; /* COBALT operates the Codel and BLUE algorithms in parallel, in order to * obtain the best features of each. Codel is excellent on flows which * respond to congestion signals in a TCP-like way. BLUE is more effective on * unresponsive flows. */ struct cobalt_skb_cb { ktime_t enqueue_time; u32 adjusted_len; }; static u64 us_to_ns(u64 us) { return us * NSEC_PER_USEC; } static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb)); return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data; } static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb) { return get_cobalt_cb(skb)->enqueue_time; } static void cobalt_set_enqueue_time(struct sk_buff *skb, ktime_t now) {