Total coverage: 378192 (19%)of 2030019
28 22 5 3 1 3 12 22 22 17 8 9 4 5 13 17 17 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 // SPDX-License-Identifier: GPL-2.0-only /* Xtables module to match packets using a BPF filter. * Copyright 2013 Google Inc. * Written by Willem de Bruijn <willemb@google.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/syscalls.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/netfilter/xt_bpf.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>"); MODULE_DESCRIPTION("Xtables: BPF filter match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_bpf"); MODULE_ALIAS("ip6t_bpf"); static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, struct bpf_prog **ret) { struct sock_fprog_kern program; if (len > XT_BPF_MAX_NUM_INSTR) return -EINVAL; program.len = len; program.filter = insns; if (bpf_prog_create(ret, &program)) { pr_info_ratelimited("check failed: parse error\n"); return -EINVAL; } return 0; } static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) { struct bpf_prog *prog; prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); *ret = prog; return 0; } static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) { if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) return -EINVAL; *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER); return PTR_ERR_OR_ZERO(*ret); } static int bpf_mt_check(const struct xt_mtchk_param *par) { struct xt_bpf_info *info = par->matchinfo; return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); } static int bpf_mt_check_v1(const struct xt_mtchk_param *par) { struct xt_bpf_info_v1 *info = par->matchinfo; if (info->mode == XT_BPF_MODE_BYTECODE) return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); else if (info->mode == XT_BPF_MODE_FD_ELF) return __bpf_mt_check_fd(info->fd, &info->filter); else if (info->mode == XT_BPF_MODE_PATH_PINNED) return __bpf_mt_check_path(info->path, &info->filter); else return -EINVAL; } static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info *info = par->matchinfo; return bpf_prog_run(info->filter, skb); } static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb); } static void bpf_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_bpf_info *info = par->matchinfo; bpf_prog_destroy(info->filter); } static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; bpf_prog_destroy(info->filter); } static struct xt_match bpf_mt_reg[] __read_mostly = { { .name = "bpf", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check, .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), .usersize = offsetof(struct xt_bpf_info, filter), .me = THIS_MODULE, }, { .name = "bpf", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check_v1, .match = bpf_mt_v1, .destroy = bpf_mt_destroy_v1, .matchsize = sizeof(struct xt_bpf_info_v1), .usersize = offsetof(struct xt_bpf_info_v1, filter), .me = THIS_MODULE, }, }; static int __init bpf_mt_init(void) { return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } static void __exit bpf_mt_exit(void) { xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } module_init(bpf_mt_init); module_exit(bpf_mt_exit);
1 8 8 1 1 4 2 8 1 7 12 11 10 9 8 1 7 7 12 2 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 // SPDX-License-Identifier: GPL-2.0-only /* * File: datagram.c * * Datagram (ISI) Phonet sockets * * Copyright (C) 2008 Nokia Corporation. * * Authors: Sakari Ailus <sakari.ailus@nokia.com> * Rémi Denis-Courmont */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/socket.h> #include <asm/ioctls.h> #include <net/sock.h> #include <linux/phonet.h> #include <linux/export.h> #include <net/phonet/phonet.h> static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb); /* associated socket ceases to exist */ static void pn_sock_close(struct sock *sk, long timeout) { sk_common_release(sk); } static int pn_ioctl(struct sock *sk, int cmd, int *karg) { struct sk_buff *skb; switch (cmd) { case SIOCINQ: spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); *karg = skb ? skb->len : 0; spin_unlock_bh(&sk->sk_receive_queue.lock); return 0; case SIOCPNADDRESOURCE: case SIOCPNDELRESOURCE: { u32 res = *karg; if (res >= 256) return -EINVAL; if (cmd == SIOCPNADDRESOURCE) return pn_sock_bind_res(sk, res); else return pn_sock_unbind_res(sk, res); } } return -ENOIOCTLCMD; } /* Destroy socket. All references are gone. */ static void pn_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); } static int pn_init(struct sock *sk) { sk->sk_destruct = pn_destruct; return 0; } static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { DECLARE_SOCKADDR(struct sockaddr_pn *, target, msg->msg_name); struct sk_buff *skb; int err; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| MSG_CMSG_COMPAT)) return -EOPNOTSUPP; if (target == NULL) return -EDESTADDRREQ; if (msg->msg_namelen < sizeof(struct sockaddr_pn)) return -EINVAL; if (target->spn_family != AF_PHONET) return -EAFNOSUPPORT; skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) return err; skb_reserve(skb, MAX_PHONET_HEADER); err = memcpy_from_msg((void *)skb_put(skb, len), msg, len); if (err < 0) { kfree_skb(skb); return err; } /* * Fill in the Phonet header and * finally pass the packet forwards. */ err = pn_skb_send(sk, skb, target); /* If ok, return len. */ return (err >= 0) ? len : err; } static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct sk_buff *skb = NULL; struct sockaddr_pn sa; int rval = -EOPNOTSUPP; int copylen; if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL| MSG_CMSG_COMPAT)) goto out_nofree; skb = skb_recv_datagram(sk, flags, &rval); if (skb == NULL) goto out_nofree; pn_skb_get_src_sockaddr(skb, &sa); copylen = skb->len; if (len < copylen) { msg->msg_flags |= MSG_TRUNC; copylen = len; } rval = skb_copy_datagram_msg(skb, 0, msg, copylen); if (rval) { rval = -EFAULT; goto out; } rval = (flags & MSG_TRUNC) ? skb->len : copylen; if (msg->msg_name != NULL) { __sockaddr_check_size(sizeof(sa)); memcpy(msg->msg_name, &sa, sizeof(sa)); *addr_len = sizeof(sa); } out: skb_free_datagram(sk, skb); out_nofree: return rval; } /* Queue an skb for a sock. */ static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb) { int err = sock_queue_rcv_skb(sk, skb); if (err < 0) kfree_skb(skb); return err ? NET_RX_DROP : NET_RX_SUCCESS; } /* Module registration */ static struct proto pn_proto = { .close = pn_sock_close, .ioctl = pn_ioctl, .init = pn_init, .sendmsg = pn_sendmsg, .recvmsg = pn_recvmsg, .backlog_rcv = pn_backlog_rcv, .hash = pn_sock_hash, .unhash = pn_sock_unhash, .get_port = pn_sock_get_port, .obj_size = sizeof(struct pn_sock), .owner = THIS_MODULE, .name = "PHONET", }; static const struct phonet_protocol pn_dgram_proto = { .ops = &phonet_dgram_ops, .prot = &pn_proto, .sock_type = SOCK_DGRAM, }; int __init isi_register(void) { return phonet_proto_register(PN_PROTO_PHONET, &pn_dgram_proto); } void __exit isi_unregister(void) { phonet_proto_unregister(PN_PROTO_PHONET, &pn_dgram_proto); }
111 3 6 56 5 6 27 26 3524 184 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __FS_NOTIFY_FSNOTIFY_H_ #define __FS_NOTIFY_FSNOTIFY_H_ #include <linux/list.h> #include <linux/fsnotify.h> #include <linux/srcu.h> #include <linux/types.h> #include "../mount.h" /* * fsnotify_connp_t is what we embed in objects which connector can be attached * to. */ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; static inline struct inode *fsnotify_conn_inode( struct fsnotify_mark_connector *conn) { return conn->obj; } static inline struct mount *fsnotify_conn_mount( struct fsnotify_mark_connector *conn) { return real_mount(conn->obj); } static inline struct super_block *fsnotify_conn_sb( struct fsnotify_mark_connector *conn) { return conn->obj; } static inline struct mnt_namespace *fsnotify_conn_mntns( struct fsnotify_mark_connector *conn) { return conn->obj; } static inline struct super_block *fsnotify_object_sb(void *obj, enum fsnotify_obj_type obj_type) { switch (obj_type) { case FSNOTIFY_OBJ_TYPE_INODE: return ((struct inode *)obj)->i_sb; case FSNOTIFY_OBJ_TYPE_VFSMOUNT: return ((struct vfsmount *)obj)->mnt_sb; case FSNOTIFY_OBJ_TYPE_SB: return (struct super_block *)obj; default: return NULL; } } static inline struct super_block *fsnotify_connector_sb( struct fsnotify_mark_connector *conn) { return fsnotify_object_sb(conn->obj, conn->type); } static inline fsnotify_connp_t *fsnotify_sb_marks(struct super_block *sb) { struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); return sbinfo ? &sbinfo->sb_marks : NULL; } /* destroy all events sitting in this groups notification queue */ extern void fsnotify_flush_notify(struct fsnotify_group *group); /* protects reads of inode and vfsmount marks list */ extern struct srcu_struct fsnotify_mark_srcu; /* compare two groups for sorting of marks lists */ extern int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b); /* Destroy all marks attached to an object via connector */ extern void fsnotify_destroy_marks(fsnotify_connp_t *connp); /* run the list of all marks associated with inode and destroy them */ static inline void fsnotify_clear_marks_by_inode(struct inode *inode) { fsnotify_destroy_marks(&inode->i_fsnotify_marks); } /* run the list of all marks associated with vfsmount and destroy them */ static inline void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) { fsnotify_destroy_marks(&real_mount(mnt)->mnt_fsnotify_marks); } /* run the list of all marks associated with sb and destroy them */ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb) { fsnotify_destroy_marks(fsnotify_sb_marks(sb)); } static inline void fsnotify_clear_marks_by_mntns(struct mnt_namespace *mntns) { fsnotify_destroy_marks(&mntns->n_fsnotify_marks); } /* * update the dentry->d_flags of all of inode's children to indicate if inode cares * about events that happen to its children. */ extern void fsnotify_set_children_dentry_flags(struct inode *inode); extern struct kmem_cache *fsnotify_mark_connector_cachep; #endif /* __FS_NOTIFY_FSNOTIFY_H_ */
69 43 241 61 504 251 62 157 37 170 561 54 579 1187 1190 475 267 305 2 1 1 41 2 283 279 281 120 60 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for inet_sock * * Authors: Many, reorganised here by * Arnaldo Carvalho de Melo <acme@mandriva.com> */ #ifndef _INET_SOCK_H #define _INET_SOCK_H #include <linux/bitops.h> #include <linux/string.h> #include <linux/types.h> #include <linux/jhash.h> #include <linux/netdevice.h> #include <net/flow.h> #include <net/inet_dscp.h> #include <net/sock.h> #include <net/request_sock.h> #include <net/netns/hash.h> #include <net/tcp_states.h> #include <net/l3mdev.h> /** struct ip_options - IP Options * * @faddr - Saved first hop address * @nexthop - Saved nexthop address in LSRR and SSRR * @is_strictroute - Strict source route * @srr_is_hit - Packet destination addr was our one * @is_changed - IP checksum more not valid * @rr_needaddr - Need to record addr of outgoing dev * @ts_needtime - Need to record timestamp * @ts_needaddr - Need to record addr of outgoing dev */ struct ip_options { __be32 faddr; __be32 nexthop; unsigned char optlen; unsigned char srr; unsigned char rr; unsigned char ts; unsigned char is_strictroute:1, srr_is_hit:1, is_changed:1, rr_needaddr:1, ts_needtime:1, ts_needaddr:1; unsigned char router_alert; unsigned char cipso; unsigned char __pad2; unsigned char __data[]; }; struct ip_options_rcu { struct rcu_head rcu; struct ip_options opt; }; struct ip_options_data { struct ip_options_rcu opt; char data[40]; }; struct inet_request_sock { struct request_sock req; #define ir_loc_addr req.__req_common.skc_rcv_saddr #define ir_rmt_addr req.__req_common.skc_daddr #define ir_num req.__req_common.skc_num #define ir_rmt_port req.__req_common.skc_dport #define ir_v6_rmt_addr req.__req_common.skc_v6_daddr #define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr #define ir_iif req.__req_common.skc_bound_dev_if #define ir_cookie req.__req_common.skc_cookie #define ireq_net req.__req_common.skc_net #define ireq_state req.__req_common.skc_state #define ireq_family req.__req_common.skc_family u16 snd_wscale : 4, rcv_wscale : 4, tstamp_ok : 1, sack_ok : 1, wscale_ok : 1, ecn_ok : 1, acked : 1, no_srccheck: 1, smc_ok : 1; u32 ir_mark; union { struct ip_options_rcu __rcu *ireq_opt; #if IS_ENABLED(CONFIG_IPV6) struct { struct ipv6_txoptions *ipv6_opt; struct sk_buff *pktopts; }; #endif }; }; static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk) { return (struct inet_request_sock *)sk; } static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { u32 mark = READ_ONCE(sk->sk_mark); if (!mark && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark; return mark; } static inline int inet_request_bound_dev_if(const struct sock *sk, struct sk_buff *skb) { int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, skb->skb_iif); #endif return bound_dev_if; } static inline int inet_sk_bound_l3mdev(const struct sock *sk) { #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk); if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, sk->sk_bound_dev_if); #endif return 0; } static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if, int dif, int sdif) { if (!bound_dev_if) return !sdif || l3mdev_accept; return bound_dev_if == dif || bound_dev_if == sdif; } static inline bool inet_sk_bound_dev_eq(const struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); #endif } struct inet_cork { unsigned int flags; __be32 addr; struct ip_options *opt; unsigned int fragsize; int length; /* Total length of all frames */ struct dst_entry *dst; u8 tx_flags; __u8 ttl; __s16 tos; u32 priority; __u16 gso_size; u32 ts_opt_id; u64 transmit_time; u32 mark; }; struct inet_cork_full { struct inet_cork base; struct flowi fl; }; struct ip_mc_socklist; struct ipv6_pinfo; struct rtable; /** struct inet_sock - representation of INET sockets * * @sk - ancestor class * @pinet6 - pointer to IPv6 control block * @inet_daddr - Foreign IPv4 addr * @inet_rcv_saddr - Bound local IPv4 addr * @inet_dport - Destination port * @inet_num - Local port * @inet_flags - various atomic flags * @inet_saddr - Sending source * @uc_ttl - Unicast TTL * @inet_sport - Source port * @inet_id - ID counter for DF pkts * @tos - TOS * @mc_ttl - Multicasting TTL * @uc_index - Unicast outgoing device index * @mc_index - Multicast device index * @mc_list - Group array * @cork - info to build ip hdr on each ip frag while socket is corked */ struct inet_sock { /* sk and pinet6 has to be the first two members of inet_sock */ struct sock sk; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *pinet6; #endif /* Socket demultiplex comparisons on incoming packets. */ #define inet_daddr sk.__sk_common.skc_daddr #define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr #define inet_dport sk.__sk_common.skc_dport #define inet_num sk.__sk_common.skc_num unsigned long inet_flags; __be32 inet_saddr; __s16 uc_ttl; __be16 inet_sport; struct ip_options_rcu __rcu *inet_opt; atomic_t inet_id; __u8 tos; __u8 min_ttl; __u8 mc_ttl; __u8 pmtudisc; __u8 rcv_tos; __u8 convert_csum; int uc_index; int mc_index; __be32 mc_addr; u32 local_port_range; /* high << 16 | low */ struct ip_mc_socklist __rcu *mc_list; struct inet_cork_full cork; }; #define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ #define IPCORK_TS_OPT_ID 2 /* ts_opt_id field is valid, overriding sk_tskey */ enum { INET_FLAGS_PKTINFO = 0, INET_FLAGS_TTL = 1, INET_FLAGS_TOS = 2, INET_FLAGS_RECVOPTS = 3, INET_FLAGS_RETOPTS = 4, INET_FLAGS_PASSSEC = 5, INET_FLAGS_ORIGDSTADDR = 6, INET_FLAGS_CHECKSUM = 7, INET_FLAGS_RECVFRAGSIZE = 8, INET_FLAGS_RECVERR = 9, INET_FLAGS_RECVERR_RFC4884 = 10, INET_FLAGS_FREEBIND = 11, INET_FLAGS_HDRINCL = 12, INET_FLAGS_MC_LOOP = 13, INET_FLAGS_MC_ALL = 14, INET_FLAGS_TRANSPARENT = 15, INET_FLAGS_IS_ICSK = 16, INET_FLAGS_NODEFRAG = 17, INET_FLAGS_BIND_ADDRESS_NO_PORT = 18, INET_FLAGS_DEFER_CONNECT = 19, INET_FLAGS_MC6_LOOP = 20, INET_FLAGS_RECVERR6_RFC4884 = 21, INET_FLAGS_MC6_ALL = 22, INET_FLAGS_AUTOFLOWLABEL_SET = 23, INET_FLAGS_AUTOFLOWLABEL = 24, INET_FLAGS_DONTFRAG = 25, INET_FLAGS_RECVERR6 = 26, INET_FLAGS_REPFLOW = 27, INET_FLAGS_RTALERT_ISOLATE = 28, INET_FLAGS_SNDFLOW = 29, INET_FLAGS_RTALERT = 30, }; /* cmsg flags for inet */ #define IP_CMSG_PKTINFO BIT(INET_FLAGS_PKTINFO) #define IP_CMSG_TTL BIT(INET_FLAGS_TTL) #define IP_CMSG_TOS BIT(INET_FLAGS_TOS) #define IP_CMSG_RECVOPTS BIT(INET_FLAGS_RECVOPTS) #define IP_CMSG_RETOPTS BIT(INET_FLAGS_RETOPTS) #define IP_CMSG_PASSSEC BIT(INET_FLAGS_PASSSEC) #define IP_CMSG_ORIGDSTADDR BIT(INET_FLAGS_ORIGDSTADDR) #define IP_CMSG_CHECKSUM BIT(INET_FLAGS_CHECKSUM) #define IP_CMSG_RECVFRAGSIZE BIT(INET_FLAGS_RECVFRAGSIZE) #define IP_CMSG_ALL (IP_CMSG_PKTINFO | IP_CMSG_TTL | \ IP_CMSG_TOS | IP_CMSG_RECVOPTS | \ IP_CMSG_RETOPTS | IP_CMSG_PASSSEC | \ IP_CMSG_ORIGDSTADDR | IP_CMSG_CHECKSUM | \ IP_CMSG_RECVFRAGSIZE) static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) { return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL; } static inline dscp_t inet_sk_dscp(const struct inet_sock *inet) { return inet_dsfield_to_dscp(READ_ONCE(inet->tos)); } #define inet_test_bit(nr, sk) \ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) #define inet_set_bit(nr, sk) \ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) #define inet_clear_bit(nr, sk) \ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags) #define inet_assign_bit(nr, sk, val) \ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) /** * sk_to_full_sk - Access to a full socket * @sk: pointer to a socket * * SYNACK messages might be attached to request sockets. * Some places want to reach the listener in this case. */ static inline struct sock *sk_to_full_sk(struct sock *sk) { #ifdef CONFIG_INET if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) sk = inet_reqsk(sk)->rsk_listener; if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) sk = NULL; #endif return sk; } /* sk_to_full_sk() variant with a const argument */ static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) { #ifdef CONFIG_INET if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) sk = ((const struct request_sock *)sk)->rsk_listener; if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) sk = NULL; #endif return sk; } static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) { return sk_to_full_sk(skb->sk); } #define inet_sk(ptr) container_of_const(ptr, struct inet_sock, sk) static inline void __inet_sk_copy_descendant(struct sock *sk_to, const struct sock *sk_from, const int ancestor_size) { memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1, sk_from->sk_prot->obj_size - ancestor_size); } int inet_sk_rebuild_header(struct sock *sk); /** * inet_sk_state_load - read sk->sk_state for lockless contexts * @sk: socket pointer * * Paired with inet_sk_state_store(). Used in places we don't hold socket lock: * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... */ static inline int inet_sk_state_load(const struct sock *sk) { /* state change might impact lockless readers. */ return smp_load_acquire(&sk->sk_state); } /** * inet_sk_state_store - update sk->sk_state * @sk: socket pointer * @newstate: new state * * Paired with inet_sk_state_load(). Should be used in contexts where * state change might impact lockless readers. */ void inet_sk_state_store(struct sock *sk, int newstate); void inet_sk_set_state(struct sock *sk, int state); static inline unsigned int __inet_ehashfn(const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport, u32 initval) { return jhash_3words((__force __u32) laddr, (__force __u32) faddr, ((__u32) lport) << 16 | (__force __u32)fport, initval); } struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, bool attach_listener); static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { __u8 flags = 0; if (inet_test_bit(TRANSPARENT, sk) || inet_test_bit(HDRINCL, sk)) flags |= FLOWI_FLAG_ANYSRC; return flags; } static inline void inet_inc_convert_csum(struct sock *sk) { inet_sk(sk)->convert_csum++; } static inline void inet_dec_convert_csum(struct sock *sk) { if (inet_sk(sk)->convert_csum > 0) inet_sk(sk)->convert_csum--; } static inline bool inet_get_convert_csum(struct sock *sk) { return !!inet_sk(sk)->convert_csum; } static inline bool inet_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || test_bit(INET_FLAGS_FREEBIND, &inet->inet_flags) || test_bit(INET_FLAGS_TRANSPARENT, &inet->inet_flags); } static inline bool inet_addr_valid_or_nonlocal(struct net *net, struct inet_sock *inet, __be32 addr, int addr_type) { return inet_can_nonlocal_bind(net, inet) || addr == htonl(INADDR_ANY) || addr_type == RTN_LOCAL || addr_type == RTN_MULTICAST || addr_type == RTN_BROADCAST; } #endif /* _INET_SOCK_H */
8 8 8 1 1 7 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * DES & Triple DES EDE Cipher Algorithms. * * Copyright (c) 2005 Dag Arne Osvik <da@osvik.no> */ #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/fips.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <linux/unaligned.h> #include <crypto/des.h> #include <crypto/internal/des.h> #define ROL(x, r) ((x) = rol32((x), (r))) #define ROR(x, r) ((x) = ror32((x), (r))) /* Lookup tables for key expansion */ static const u8 pc1[256] = { 0x00, 0x00, 0x40, 0x04, 0x10, 0x10, 0x50, 0x14, 0x04, 0x40, 0x44, 0x44, 0x14, 0x50, 0x54, 0x54, 0x02, 0x02, 0x42, 0x06, 0x12, 0x12, 0x52, 0x16, 0x06, 0x42, 0x46, 0x46, 0x16, 0x52, 0x56, 0x56, 0x80, 0x08, 0xc0, 0x0c, 0x90, 0x18, 0xd0, 0x1c, 0x84, 0x48, 0xc4, 0x4c, 0x94, 0x58, 0xd4, 0x5c, 0x82, 0x0a, 0xc2, 0x0e, 0x92, 0x1a, 0xd2, 0x1e, 0x86, 0x4a, 0xc6, 0x4e, 0x96, 0x5a, 0xd6, 0x5e, 0x20, 0x20, 0x60, 0x24, 0x30, 0x30, 0x70, 0x34, 0x24, 0x60, 0x64, 0x64, 0x34, 0x70, 0x74, 0x74, 0x22, 0x22, 0x62, 0x26, 0x32, 0x32, 0x72, 0x36, 0x26, 0x62, 0x66, 0x66, 0x36, 0x72, 0x76, 0x76, 0xa0, 0x28, 0xe0, 0x2c, 0xb0, 0x38, 0xf0, 0x3c, 0xa4, 0x68, 0xe4, 0x6c, 0xb4, 0x78, 0xf4, 0x7c, 0xa2, 0x2a, 0xe2, 0x2e, 0xb2, 0x3a, 0xf2, 0x3e, 0xa6, 0x6a, 0xe6, 0x6e, 0xb6, 0x7a, 0xf6, 0x7e, 0x08, 0x80, 0x48, 0x84, 0x18, 0x90, 0x58, 0x94, 0x0c, 0xc0, 0x4c, 0xc4, 0x1c, 0xd0, 0x5c, 0xd4, 0x0a, 0x82, 0x4a, 0x86, 0x1a, 0x92, 0x5a, 0x96, 0x0e, 0xc2, 0x4e, 0xc6, 0x1e, 0xd2, 0x5e, 0xd6, 0x88, 0x88, 0xc8, 0x8c, 0x98, 0x98, 0xd8, 0x9c, 0x8c, 0xc8, 0xcc, 0xcc, 0x9c, 0xd8, 0xdc, 0xdc, 0x8a, 0x8a, 0xca, 0x8e, 0x9a, 0x9a, 0xda, 0x9e, 0x8e, 0xca, 0xce, 0xce, 0x9e, 0xda, 0xde, 0xde, 0x28, 0xa0, 0x68, 0xa4, 0x38, 0xb0, 0x78, 0xb4, 0x2c, 0xe0, 0x6c, 0xe4, 0x3c, 0xf0, 0x7c, 0xf4, 0x2a, 0xa2, 0x6a, 0xa6, 0x3a, 0xb2, 0x7a, 0xb6, 0x2e, 0xe2, 0x6e, 0xe6, 0x3e, 0xf2, 0x7e, 0xf6, 0xa8, 0xa8, 0xe8, 0xac, 0xb8, 0xb8, 0xf8, 0xbc, 0xac, 0xe8, 0xec, 0xec, 0xbc, 0xf8, 0xfc, 0xfc, 0xaa, 0xaa, 0xea, 0xae, 0xba, 0xba, 0xfa, 0xbe, 0xae, 0xea, 0xee, 0xee, 0xbe, 0xfa, 0xfe, 0xfe }; static const u8 rs[256] = { 0x00, 0x00, 0x80, 0x80, 0x02, 0x02, 0x82, 0x82, 0x04, 0x04, 0x84, 0x84, 0x06, 0x06, 0x86, 0x86, 0x08, 0x08, 0x88, 0x88, 0x0a, 0x0a, 0x8a, 0x8a, 0x0c, 0x0c, 0x8c, 0x8c, 0x0e, 0x0e, 0x8e, 0x8e, 0x10, 0x10, 0x90, 0x90, 0x12, 0x12, 0x92, 0x92, 0x14, 0x14, 0x94, 0x94, 0x16, 0x16, 0x96, 0x96, 0x18, 0x18, 0x98, 0x98, 0x1a, 0x1a, 0x9a, 0x9a, 0x1c, 0x1c, 0x9c, 0x9c, 0x1e, 0x1e, 0x9e, 0x9e, 0x20, 0x20, 0xa0, 0xa0, 0x22, 0x22, 0xa2, 0xa2, 0x24, 0x24, 0xa4, 0xa4, 0x26, 0x26, 0xa6, 0xa6, 0x28, 0x28, 0xa8, 0xa8, 0x2a, 0x2a, 0xaa, 0xaa, 0x2c, 0x2c, 0xac, 0xac, 0x2e, 0x2e, 0xae, 0xae, 0x30, 0x30, 0xb0, 0xb0, 0x32, 0x32, 0xb2, 0xb2, 0x34, 0x34, 0xb4, 0xb4, 0x36, 0x36, 0xb6, 0xb6, 0x38, 0x38, 0xb8, 0xb8, 0x3a, 0x3a, 0xba, 0xba, 0x3c, 0x3c, 0xbc, 0xbc, 0x3e, 0x3e, 0xbe, 0xbe, 0x40, 0x40, 0xc0, 0xc0, 0x42, 0x42, 0xc2, 0xc2, 0x44, 0x44, 0xc4, 0xc4, 0x46, 0x46, 0xc6, 0xc6, 0x48, 0x48, 0xc8, 0xc8, 0x4a, 0x4a, 0xca, 0xca, 0x4c, 0x4c, 0xcc, 0xcc, 0x4e, 0x4e, 0xce, 0xce, 0x50, 0x50, 0xd0, 0xd0, 0x52, 0x52, 0xd2, 0xd2, 0x54, 0x54, 0xd4, 0xd4, 0x56, 0x56, 0xd6, 0xd6, 0x58, 0x58, 0xd8, 0xd8, 0x5a, 0x5a, 0xda, 0xda, 0x5c, 0x5c, 0xdc, 0xdc, 0x5e, 0x5e, 0xde, 0xde, 0x60, 0x60, 0xe0, 0xe0, 0x62, 0x62, 0xe2, 0xe2, 0x64, 0x64, 0xe4, 0xe4, 0x66, 0x66, 0xe6, 0xe6, 0x68, 0x68, 0xe8, 0xe8, 0x6a, 0x6a, 0xea, 0xea, 0x6c, 0x6c, 0xec, 0xec, 0x6e, 0x6e, 0xee, 0xee, 0x70, 0x70, 0xf0, 0xf0, 0x72, 0x72, 0xf2, 0xf2, 0x74, 0x74, 0xf4, 0xf4, 0x76, 0x76, 0xf6, 0xf6, 0x78, 0x78, 0xf8, 0xf8, 0x7a, 0x7a, 0xfa, 0xfa, 0x7c, 0x7c, 0xfc, 0xfc, 0x7e, 0x7e, 0xfe, 0xfe }; static const u32 pc2[1024] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00040000, 0x00000000, 0x04000000, 0x00100000, 0x00400000, 0x00000008, 0x00000800, 0x40000000, 0x00440000, 0x00000008, 0x04000800, 0x40100000, 0x00000400, 0x00000020, 0x08000000, 0x00000100, 0x00040400, 0x00000020, 0x0c000000, 0x00100100, 0x00400400, 0x00000028, 0x08000800, 0x40000100, 0x00440400, 0x00000028, 0x0c000800, 0x40100100, 0x80000000, 0x00000010, 0x00000000, 0x00800000, 0x80040000, 0x00000010, 0x04000000, 0x00900000, 0x80400000, 0x00000018, 0x00000800, 0x40800000, 0x80440000, 0x00000018, 0x04000800, 0x40900000, 0x80000400, 0x00000030, 0x08000000, 0x00800100, 0x80040400, 0x00000030, 0x0c000000, 0x00900100, 0x80400400, 0x00000038, 0x08000800, 0x40800100, 0x80440400, 0x00000038, 0x0c000800, 0x40900100, 0x10000000, 0x00000000, 0x00200000, 0x00001000, 0x10040000, 0x00000000, 0x04200000, 0x00101000, 0x10400000, 0x00000008, 0x00200800, 0x40001000, 0x10440000, 0x00000008, 0x04200800, 0x40101000, 0x10000400, 0x00000020, 0x08200000, 0x00001100, 0x10040400, 0x00000020, 0x0c200000, 0x00101100, 0x10400400, 0x00000028, 0x08200800, 0x40001100, 0x10440400, 0x00000028, 0x0c200800, 0x40101100, 0x90000000, 0x00000010, 0x00200000, 0x00801000, 0x90040000, 0x00000010, 0x04200000, 0x00901000, 0x90400000, 0x00000018, 0x00200800, 0x40801000, 0x90440000, 0x00000018, 0x04200800, 0x40901000, 0x90000400, 0x00000030, 0x08200000, 0x00801100, 0x90040400, 0x00000030, 0x0c200000, 0x00901100, 0x90400400, 0x00000038, 0x08200800, 0x40801100, 0x90440400, 0x00000038, 0x0c200800, 0x40901100, 0x00000200, 0x00080000, 0x00000000, 0x00000004, 0x00040200, 0x00080000, 0x04000000, 0x00100004, 0x00400200, 0x00080008, 0x00000800, 0x40000004, 0x00440200, 0x00080008, 0x04000800, 0x40100004, 0x00000600, 0x00080020, 0x08000000, 0x00000104, 0x00040600, 0x00080020, 0x0c000000, 0x00100104, 0x00400600, 0x00080028, 0x08000800, 0x40000104, 0x00440600, 0x00080028, 0x0c000800, 0x40100104, 0x80000200, 0x00080010, 0x00000000, 0x00800004, 0x80040200, 0x00080010, 0x04000000, 0x00900004, 0x80400200, 0x00080018, 0x00000800, 0x40800004, 0x80440200, 0x00080018, 0x04000800, 0x40900004, 0x80000600, 0x00080030, 0x08000000, 0x00800104, 0x80040600, 0x00080030, 0x0c000000, 0x00900104, 0x80400600, 0x00080038, 0x08000800, 0x40800104, 0x80440600, 0x00080038, 0x0c000800, 0x40900104, 0x10000200, 0x00080000, 0x00200000, 0x00001004, 0x10040200, 0x00080000, 0x04200000, 0x00101004, 0x10400200, 0x00080008, 0x00200800, 0x40001004, 0x10440200, 0x00080008, 0x04200800, 0x40101004, 0x10000600, 0x00080020, 0x08200000, 0x00001104, 0x10040600, 0x00080020, 0x0c200000, 0x00101104, 0x10400600, 0x00080028, 0x08200800, 0x40001104, 0x10440600, 0x00080028, 0x0c200800, 0x40101104, 0x90000200, 0x00080010, 0x00200000, 0x00801004, 0x90040200, 0x00080010, 0x04200000, 0x00901004, 0x90400200, 0x00080018, 0x00200800, 0x40801004, 0x90440200, 0x00080018, 0x04200800, 0x40901004, 0x90000600, 0x00080030, 0x08200000, 0x00801104, 0x90040600, 0x00080030, 0x0c200000, 0x00901104, 0x90400600, 0x00080038, 0x08200800, 0x40801104, 0x90440600, 0x00080038, 0x0c200800, 0x40901104, 0x00000002, 0x00002000, 0x20000000, 0x00000001, 0x00040002, 0x00002000, 0x24000000, 0x00100001, 0x00400002, 0x00002008, 0x20000800, 0x40000001, 0x00440002, 0x00002008, 0x24000800, 0x40100001, 0x00000402, 0x00002020, 0x28000000, 0x00000101, 0x00040402, 0x00002020, 0x2c000000, 0x00100101, 0x00400402, 0x00002028, 0x28000800, 0x40000101, 0x00440402, 0x00002028, 0x2c000800, 0x40100101, 0x80000002, 0x00002010, 0x20000000, 0x00800001, 0x80040002, 0x00002010, 0x24000000, 0x00900001, 0x80400002, 0x00002018, 0x20000800, 0x40800001, 0x80440002, 0x00002018, 0x24000800, 0x40900001, 0x80000402, 0x00002030, 0x28000000, 0x00800101, 0x80040402, 0x00002030, 0x2c000000, 0x00900101, 0x80400402, 0x00002038, 0x28000800, 0x40800101, 0x80440402, 0x00002038, 0x2c000800, 0x40900101, 0x10000002, 0x00002000, 0x20200000, 0x00001001, 0x10040002, 0x00002000, 0x24200000, 0x00101001, 0x10400002, 0x00002008, 0x20200800, 0x40001001, 0x10440002, 0x00002008, 0x24200800, 0x40101001, 0x10000402, 0x00002020, 0x28200000, 0x00001101, 0x10040402, 0x00002020, 0x2c200000, 0x00101101, 0x10400402, 0x00002028, 0x28200800, 0x40001101, 0x10440402, 0x00002028, 0x2c200800, 0x40101101, 0x90000002, 0x00002010, 0x20200000, 0x00801001, 0x90040002, 0x00002010, 0x24200000, 0x00901001, 0x90400002, 0x00002018, 0x20200800, 0x40801001, 0x90440002, 0x00002018, 0x24200800, 0x40901001, 0x90000402, 0x00002030, 0x28200000, 0x00801101, 0x90040402, 0x00002030, 0x2c200000, 0x00901101, 0x90400402, 0x00002038, 0x28200800, 0x40801101, 0x90440402, 0x00002038, 0x2c200800, 0x40901101, 0x00000202, 0x00082000, 0x20000000, 0x00000005, 0x00040202, 0x00082000, 0x24000000, 0x00100005, 0x00400202, 0x00082008, 0x20000800, 0x40000005, 0x00440202, 0x00082008, 0x24000800, 0x40100005, 0x00000602, 0x00082020, 0x28000000, 0x00000105, 0x00040602, 0x00082020, 0x2c000000, 0x00100105, 0x00400602, 0x00082028, 0x28000800, 0x40000105, 0x00440602, 0x00082028, 0x2c000800, 0x40100105, 0x80000202, 0x00082010, 0x20000000, 0x00800005, 0x80040202, 0x00082010, 0x24000000, 0x00900005, 0x80400202, 0x00082018, 0x20000800, 0x40800005, 0x80440202, 0x00082018, 0x24000800, 0x40900005, 0x80000602, 0x00082030, 0x28000000, 0x00800105, 0x80040602, 0x00082030, 0x2c000000, 0x00900105, 0x80400602, 0x00082038, 0x28000800, 0x40800105, 0x80440602, 0x00082038, 0x2c000800, 0x40900105, 0x10000202, 0x00082000, 0x20200000, 0x00001005, 0x10040202, 0x00082000, 0x24200000, 0x00101005, 0x10400202, 0x00082008, 0x20200800, 0x40001005, 0x10440202, 0x00082008, 0x24200800, 0x40101005, 0x10000602, 0x00082020, 0x28200000, 0x00001105, 0x10040602, 0x00082020, 0x2c200000, 0x00101105, 0x10400602, 0x00082028, 0x28200800, 0x40001105, 0x10440602, 0x00082028, 0x2c200800, 0x40101105, 0x90000202, 0x00082010, 0x20200000, 0x00801005, 0x90040202, 0x00082010, 0x24200000, 0x00901005, 0x90400202, 0x00082018, 0x20200800, 0x40801005, 0x90440202, 0x00082018, 0x24200800, 0x40901005, 0x90000602, 0x00082030, 0x28200000, 0x00801105, 0x90040602, 0x00082030, 0x2c200000, 0x00901105, 0x90400602, 0x00082038, 0x28200800, 0x40801105, 0x90440602, 0x00082038, 0x2c200800, 0x40901105, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008, 0x00080000, 0x10000000, 0x02000000, 0x00000000, 0x00000080, 0x00001000, 0x02000000, 0x00000008, 0x00080080, 0x10001000, 0x00004000, 0x00000000, 0x00000040, 0x00040000, 0x00004000, 0x00000008, 0x00080040, 0x10040000, 0x02004000, 0x00000000, 0x000000c0, 0x00041000, 0x02004000, 0x00000008, 0x000800c0, 0x10041000, 0x00020000, 0x00008000, 0x08000000, 0x00200000, 0x00020000, 0x00008008, 0x08080000, 0x10200000, 0x02020000, 0x00008000, 0x08000080, 0x00201000, 0x02020000, 0x00008008, 0x08080080, 0x10201000, 0x00024000, 0x00008000, 0x08000040, 0x00240000, 0x00024000, 0x00008008, 0x08080040, 0x10240000, 0x02024000, 0x00008000, 0x080000c0, 0x00241000, 0x02024000, 0x00008008, 0x080800c0, 0x10241000, 0x00000000, 0x01000000, 0x00002000, 0x00000020, 0x00000000, 0x01000008, 0x00082000, 0x10000020, 0x02000000, 0x01000000, 0x00002080, 0x00001020, 0x02000000, 0x01000008, 0x00082080, 0x10001020, 0x00004000, 0x01000000, 0x00002040, 0x00040020, 0x00004000, 0x01000008, 0x00082040, 0x10040020, 0x02004000, 0x01000000, 0x000020c0, 0x00041020, 0x02004000, 0x01000008, 0x000820c0, 0x10041020, 0x00020000, 0x01008000, 0x08002000, 0x00200020, 0x00020000, 0x01008008, 0x08082000, 0x10200020, 0x02020000, 0x01008000, 0x08002080, 0x00201020, 0x02020000, 0x01008008, 0x08082080, 0x10201020, 0x00024000, 0x01008000, 0x08002040, 0x00240020, 0x00024000, 0x01008008, 0x08082040, 0x10240020, 0x02024000, 0x01008000, 0x080020c0, 0x00241020, 0x02024000, 0x01008008, 0x080820c0, 0x10241020, 0x00000400, 0x04000000, 0x00100000, 0x00000004, 0x00000400, 0x04000008, 0x00180000, 0x10000004, 0x02000400, 0x04000000, 0x00100080, 0x00001004, 0x02000400, 0x04000008, 0x00180080, 0x10001004, 0x00004400, 0x04000000, 0x00100040, 0x00040004, 0x00004400, 0x04000008, 0x00180040, 0x10040004, 0x02004400, 0x04000000, 0x001000c0, 0x00041004, 0x02004400, 0x04000008, 0x001800c0, 0x10041004, 0x00020400, 0x04008000, 0x08100000, 0x00200004, 0x00020400, 0x04008008, 0x08180000, 0x10200004, 0x02020400, 0x04008000, 0x08100080, 0x00201004, 0x02020400, 0x04008008, 0x08180080, 0x10201004, 0x00024400, 0x04008000, 0x08100040, 0x00240004, 0x00024400, 0x04008008, 0x08180040, 0x10240004, 0x02024400, 0x04008000, 0x081000c0, 0x00241004, 0x02024400, 0x04008008, 0x081800c0, 0x10241004, 0x00000400, 0x05000000, 0x00102000, 0x00000024, 0x00000400, 0x05000008, 0x00182000, 0x10000024, 0x02000400, 0x05000000, 0x00102080, 0x00001024, 0x02000400, 0x05000008, 0x00182080, 0x10001024, 0x00004400, 0x05000000, 0x00102040, 0x00040024, 0x00004400, 0x05000008, 0x00182040, 0x10040024, 0x02004400, 0x05000000, 0x001020c0, 0x00041024, 0x02004400, 0x05000008, 0x001820c0, 0x10041024, 0x00020400, 0x05008000, 0x08102000, 0x00200024, 0x00020400, 0x05008008, 0x08182000, 0x10200024, 0x02020400, 0x05008000, 0x08102080, 0x00201024, 0x02020400, 0x05008008, 0x08182080, 0x10201024, 0x00024400, 0x05008000, 0x08102040, 0x00240024, 0x00024400, 0x05008008, 0x08182040, 0x10240024, 0x02024400, 0x05008000, 0x081020c0, 0x00241024, 0x02024400, 0x05008008, 0x081820c0, 0x10241024, 0x00000800, 0x00010000, 0x20000000, 0x00000010, 0x00000800, 0x00010008, 0x20080000, 0x10000010, 0x02000800, 0x00010000, 0x20000080, 0x00001010, 0x02000800, 0x00010008, 0x20080080, 0x10001010, 0x00004800, 0x00010000, 0x20000040, 0x00040010, 0x00004800, 0x00010008, 0x20080040, 0x10040010, 0x02004800, 0x00010000, 0x200000c0, 0x00041010, 0x02004800, 0x00010008, 0x200800c0, 0x10041010, 0x00020800, 0x00018000, 0x28000000, 0x00200010, 0x00020800, 0x00018008, 0x28080000, 0x10200010, 0x02020800, 0x00018000, 0x28000080, 0x00201010, 0x02020800, 0x00018008, 0x28080080, 0x10201010, 0x00024800, 0x00018000, 0x28000040, 0x00240010, 0x00024800, 0x00018008, 0x28080040, 0x10240010, 0x02024800, 0x00018000, 0x280000c0, 0x00241010, 0x02024800, 0x00018008, 0x280800c0, 0x10241010, 0x00000800, 0x01010000, 0x20002000, 0x00000030, 0x00000800, 0x01010008, 0x20082000, 0x10000030, 0x02000800, 0x01010000, 0x20002080, 0x00001030, 0x02000800, 0x01010008, 0x20082080, 0x10001030, 0x00004800, 0x01010000, 0x20002040, 0x00040030, 0x00004800, 0x01010008, 0x20082040, 0x10040030, 0x02004800, 0x01010000, 0x200020c0, 0x00041030, 0x02004800, 0x01010008, 0x200820c0, 0x10041030, 0x00020800, 0x01018000, 0x28002000, 0x00200030, 0x00020800, 0x01018008, 0x28082000, 0x10200030, 0x02020800, 0x01018000, 0x28002080, 0x00201030, 0x02020800, 0x01018008, 0x28082080, 0x10201030, 0x00024800, 0x01018000, 0x28002040, 0x00240030, 0x00024800, 0x01018008, 0x28082040, 0x10240030, 0x02024800, 0x01018000, 0x280020c0, 0x00241030, 0x02024800, 0x01018008, 0x280820c0, 0x10241030, 0x00000c00, 0x04010000, 0x20100000, 0x00000014, 0x00000c00, 0x04010008, 0x20180000, 0x10000014, 0x02000c00, 0x04010000, 0x20100080, 0x00001014, 0x02000c00, 0x04010008, 0x20180080, 0x10001014, 0x00004c00, 0x04010000, 0x20100040, 0x00040014, 0x00004c00, 0x04010008, 0x20180040, 0x10040014, 0x02004c00, 0x04010000, 0x201000c0, 0x00041014, 0x02004c00, 0x04010008, 0x201800c0, 0x10041014, 0x00020c00, 0x04018000, 0x28100000, 0x00200014, 0x00020c00, 0x04018008, 0x28180000, 0x10200014, 0x02020c00, 0x04018000, 0x28100080, 0x00201014, 0x02020c00, 0x04018008, 0x28180080, 0x10201014, 0x00024c00, 0x04018000, 0x28100040, 0x00240014, 0x00024c00, 0x04018008, 0x28180040, 0x10240014, 0x02024c00, 0x04018000, 0x281000c0, 0x00241014, 0x02024c00, 0x04018008, 0x281800c0, 0x10241014, 0x00000c00, 0x05010000, 0x20102000, 0x00000034, 0x00000c00, 0x05010008, 0x20182000, 0x10000034, 0x02000c00, 0x05010000, 0x20102080, 0x00001034, 0x02000c00, 0x05010008, 0x20182080, 0x10001034, 0x00004c00, 0x05010000, 0x20102040, 0x00040034, 0x00004c00, 0x05010008, 0x20182040, 0x10040034, 0x02004c00, 0x05010000, 0x201020c0, 0x00041034, 0x02004c00, 0x05010008, 0x201820c0, 0x10041034, 0x00020c00, 0x05018000, 0x28102000, 0x00200034, 0x00020c00, 0x05018008, 0x28182000, 0x10200034, 0x02020c00, 0x05018000, 0x28102080, 0x00201034, 0x02020c00, 0x05018008, 0x28182080, 0x10201034, 0x00024c00, 0x05018000, 0x28102040, 0x00240034, 0x00024c00, 0x05018008, 0x28182040, 0x10240034, 0x02024c00, 0x05018000, 0x281020c0, 0x00241034, 0x02024c00, 0x05018008, 0x281820c0, 0x10241034 }; /* S-box lookup tables */ static const u32 S1[64] = { 0x01010400, 0x00000000, 0x00010000, 0x01010404, 0x01010004, 0x00010404, 0x00000004, 0x00010000, 0x00000400, 0x01010400, 0x01010404, 0x00000400, 0x01000404, 0x01010004, 0x01000000, 0x00000004, 0x00000404, 0x01000400, 0x01000400, 0x00010400, 0x00010400, 0x01010000, 0x01010000, 0x01000404, 0x00010004, 0x01000004, 0x01000004, 0x00010004, 0x00000000, 0x00000404, 0x00010404, 0x01000000, 0x00010000, 0x01010404, 0x00000004, 0x01010000, 0x01010400, 0x01000000, 0x01000000, 0x00000400, 0x01010004, 0x00010000, 0x00010400, 0x01000004, 0x00000400, 0x00000004, 0x01000404, 0x00010404, 0x01010404, 0x00010004, 0x01010000, 0x01000404, 0x01000004, 0x00000404, 0x00010404, 0x01010400, 0x00000404, 0x01000400, 0x01000400, 0x00000000, 0x00010004, 0x00010400, 0x00000000, 0x01010004 }; static const u32 S2[64] = { 0x80108020, 0x80008000, 0x00008000, 0x00108020, 0x00100000, 0x00000020, 0x80100020, 0x80008020, 0x80000020, 0x80108020, 0x80108000, 0x80000000, 0x80008000, 0x00100000, 0x00000020, 0x80100020, 0x00108000, 0x00100020, 0x80008020, 0x00000000, 0x80000000, 0x00008000, 0x00108020, 0x80100000, 0x00100020, 0x80000020, 0x00000000, 0x00108000, 0x00008020, 0x80108000, 0x80100000, 0x00008020, 0x00000000, 0x00108020, 0x80100020, 0x00100000, 0x80008020, 0x80100000, 0x80108000, 0x00008000, 0x80100000, 0x80008000, 0x00000020, 0x80108020, 0x00108020, 0x00000020, 0x00008000, 0x80000000, 0x00008020, 0x80108000, 0x00100000, 0x80000020, 0x00100020, 0x80008020, 0x80000020, 0x00100020, 0x00108000, 0x00000000, 0x80008000, 0x00008020, 0x80000000, 0x80100020, 0x80108020, 0x00108000 }; static const u32 S3[64] = { 0x00000208, 0x08020200, 0x00000000, 0x08020008, 0x08000200, 0x00000000, 0x00020208, 0x08000200, 0x00020008, 0x08000008, 0x08000008, 0x00020000, 0x08020208, 0x00020008, 0x08020000, 0x00000208, 0x08000000, 0x00000008, 0x08020200, 0x00000200, 0x00020200, 0x08020000, 0x08020008, 0x00020208, 0x08000208, 0x00020200, 0x00020000, 0x08000208, 0x00000008, 0x08020208, 0x00000200, 0x08000000, 0x08020200, 0x08000000, 0x00020008, 0x00000208, 0x00020000, 0x08020200, 0x08000200, 0x00000000, 0x00000200, 0x00020008, 0x08020208, 0x08000200, 0x08000008, 0x00000200, 0x00000000, 0x08020008, 0x08000208, 0x00020000, 0x08000000, 0x08020208, 0x00000008, 0x00020208, 0x00020200, 0x08000008, 0x08020000, 0x08000208, 0x00000208, 0x08020000, 0x00020208, 0x00000008, 0x08020008, 0x00020200 }; static const u32 S4[64] = { 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802080, 0x00800081, 0x00800001, 0x00002001, 0x00000000, 0x00802000, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00800080, 0x00800001, 0x00000001, 0x00002000, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002001, 0x00002080, 0x00800081, 0x00000001, 0x00002080, 0x00800080, 0x00002000, 0x00802080, 0x00802081, 0x00000081, 0x00800080, 0x00800001, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00000000, 0x00802000, 0x00002080, 0x00800080, 0x00800081, 0x00000001, 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802081, 0x00000081, 0x00000001, 0x00002000, 0x00800001, 0x00002001, 0x00802080, 0x00800081, 0x00002001, 0x00002080, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002000, 0x00802080 }; static const u32 S5[64] = { 0x00000100, 0x02080100, 0x02080000, 0x42000100, 0x00080000, 0x00000100, 0x40000000, 0x02080000, 0x40080100, 0x00080000, 0x02000100, 0x40080100, 0x42000100, 0x42080000, 0x00080100, 0x40000000, 0x02000000, 0x40080000, 0x40080000, 0x00000000, 0x40000100, 0x42080100, 0x42080100, 0x02000100, 0x42080000, 0x40000100, 0x00000000, 0x42000000, 0x02080100, 0x02000000, 0x42000000, 0x00080100, 0x00080000, 0x42000100, 0x00000100, 0x02000000, 0x40000000, 0x02080000, 0x42000100, 0x40080100, 0x02000100, 0x40000000, 0x42080000, 0x02080100, 0x40080100, 0x00000100, 0x02000000, 0x42080000, 0x42080100, 0x00080100, 0x42000000, 0x42080100, 0x02080000, 0x00000000, 0x40080000, 0x42000000, 0x00080100, 0x02000100, 0x40000100, 0x00080000, 0x00000000, 0x40080000, 0x02080100, 0x40000100 }; static const u32 S6[64] = { 0x20000010, 0x20400000, 0x00004000, 0x20404010, 0x20400000, 0x00000010, 0x20404010, 0x00400000, 0x20004000, 0x00404010, 0x00400000, 0x20000010, 0x00400010, 0x20004000, 0x20000000, 0x00004010, 0x00000000, 0x00400010, 0x20004010, 0x00004000, 0x00404000, 0x20004010, 0x00000010, 0x20400010, 0x20400010, 0x00000000, 0x00404010, 0x20404000, 0x00004010, 0x00404000, 0x20404000, 0x20000000, 0x20004000, 0x00000010, 0x20400010, 0x00404000, 0x20404010, 0x00400000, 0x00004010, 0x20000010, 0x00400000, 0x20004000, 0x20000000, 0x00004010, 0x20000010, 0x20404010, 0x00404000, 0x20400000, 0x00404010, 0x20404000, 0x00000000, 0x20400010, 0x00000010, 0x00004000, 0x20400000, 0x00404010, 0x00004000, 0x00400010, 0x20004010, 0x00000000, 0x20404000, 0x20000000, 0x00400010, 0x20004010 }; static const u32 S7[64] = { 0x00200000, 0x04200002, 0x04000802, 0x00000000, 0x00000800, 0x04000802, 0x00200802, 0x04200800, 0x04200802, 0x00200000, 0x00000000, 0x04000002, 0x00000002, 0x04000000, 0x04200002, 0x00000802, 0x04000800, 0x00200802, 0x00200002, 0x04000800, 0x04000002, 0x04200000, 0x04200800, 0x00200002, 0x04200000, 0x00000800, 0x00000802, 0x04200802, 0x00200800, 0x00000002, 0x04000000, 0x00200800, 0x04000000, 0x00200800, 0x00200000, 0x04000802, 0x04000802, 0x04200002, 0x04200002, 0x00000002, 0x00200002, 0x04000000, 0x04000800, 0x00200000, 0x04200800, 0x00000802, 0x00200802, 0x04200800, 0x00000802, 0x04000002, 0x04200802, 0x04200000, 0x00200800, 0x00000000, 0x00000002, 0x04200802, 0x00000000, 0x00200802, 0x04200000, 0x00000800, 0x04000002, 0x04000800, 0x00000800, 0x00200002 }; static const u32 S8[64] = { 0x10001040, 0x00001000, 0x00040000, 0x10041040, 0x10000000, 0x10001040, 0x00000040, 0x10000000, 0x00040040, 0x10040000, 0x10041040, 0x00041000, 0x10041000, 0x00041040, 0x00001000, 0x00000040, 0x10040000, 0x10000040, 0x10001000, 0x00001040, 0x00041000, 0x00040040, 0x10040040, 0x10041000, 0x00001040, 0x00000000, 0x00000000, 0x10040040, 0x10000040, 0x10001000, 0x00041040, 0x00040000, 0x00041040, 0x00040000, 0x10041000, 0x00001000, 0x00000040, 0x10040040, 0x00001000, 0x00041040, 0x10001000, 0x00000040, 0x10000040, 0x10040000, 0x10040040, 0x10000000, 0x00040000, 0x10001040, 0x00000000, 0x10041040, 0x00040040, 0x10000040, 0x10040000, 0x10001000, 0x10001040, 0x00000000, 0x10041040, 0x00041000, 0x00041000, 0x00001040, 0x00001040, 0x00040040, 0x10000000, 0x10041000 }; /* Encryption components: IP, FP, and round function */ #define IP(L, R, T) \ ROL(R, 4); \ T = L; \ L ^= R; \ L &= 0xf0f0f0f0; \ R ^= L; \ L ^= T; \ ROL(R, 12); \ T = L; \ L ^= R; \ L &= 0xffff0000; \ R ^= L; \ L ^= T; \ ROR(R, 14); \ T = L; \ L ^= R; \ L &= 0xcccccccc; \ R ^= L; \ L ^= T; \ ROL(R, 6); \ T = L; \ L ^= R; \ L &= 0xff00ff00; \ R ^= L; \ L ^= T; \ ROR(R, 7); \ T = L; \ L ^= R; \ L &= 0xaaaaaaaa; \ R ^= L; \ L ^= T; \ ROL(L, 1); #define FP(L, R, T) \ ROR(L, 1); \ T = L; \ L ^= R; \ L &= 0xaaaaaaaa; \ R ^= L; \ L ^= T; \ ROL(R, 7); \ T = L; \ L ^= R; \ L &= 0xff00ff00; \ R ^= L; \ L ^= T; \ ROR(R, 6); \ T = L; \ L ^= R; \ L &= 0xcccccccc; \ R ^= L; \ L ^= T; \ ROL(R, 14); \ T = L; \ L ^= R; \ L &= 0xffff0000; \ R ^= L; \ L ^= T; \ ROR(R, 12); \ T = L; \ L ^= R; \ L &= 0xf0f0f0f0; \ R ^= L; \ L ^= T; \ ROR(R, 4); #define ROUND(L, R, A, B, K, d) \ B = K[0]; A = K[1]; K += d; \ B ^= R; A ^= R; \ B &= 0x3f3f3f3f; ROR(A, 4); \ L ^= S8[0xff & B]; A &= 0x3f3f3f3f; \ L ^= S6[0xff & (B >> 8)]; B >>= 16; \ L ^= S7[0xff & A]; \ L ^= S5[0xff & (A >> 8)]; A >>= 16; \ L ^= S4[0xff & B]; \ L ^= S2[0xff & (B >> 8)]; \ L ^= S3[0xff & A]; \ L ^= S1[0xff & (A >> 8)]; /* * PC2 lookup tables are organized as 2 consecutive sets of 4 interleaved * tables of 128 elements. One set is for C_i and the other for D_i, while * the 4 interleaved tables correspond to four 7-bit subsets of C_i or D_i. * * After PC1 each of the variables a,b,c,d contains a 7 bit subset of C_i * or D_i in bits 7-1 (bit 0 being the least significant). */ #define T1(x) pt[2 * (x) + 0] #define T2(x) pt[2 * (x) + 1] #define T3(x) pt[2 * (x) + 2] #define T4(x) pt[2 * (x) + 3] #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) /* * Encryption key expansion * * RFC2451: Weak key checks SHOULD be performed. * * FIPS 74: * * Keys having duals are keys which produce all zeros, all ones, or * alternating zero-one patterns in the C and D registers after Permuted * Choice 1 has operated on the key. * */ static unsigned long des_ekey(u32 *pe, const u8 *k) { /* K&R: long is at least 32 bits */ unsigned long a, b, c, d, w; const u32 *pt = pc2; d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); /* Check if first half is weak */ w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); /* Skip to next table set */ pt += 512; d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; /* Check if second half is weak */ w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); /* Fixup: 2413 5768 -> 1357 2468 */ for (d = 0; d < 16; ++d) { a = pe[2 * d]; b = pe[2 * d + 1]; c = a ^ b; c &= 0xffff0000; a ^= c; b ^= c; ROL(b, 18); pe[2 * d] = a; pe[2 * d + 1] = b; } /* Zero if weak key */ return w; } int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen) { if (keylen != DES_KEY_SIZE) return -EINVAL; return des_ekey(ctx->expkey, key) ? 0 : -ENOKEY; } EXPORT_SYMBOL_GPL(des_expand_key); /* * Decryption key expansion * * No weak key checking is performed, as this is only used by triple DES * */ static void dkey(u32 *pe, const u8 *k) { /* K&R: long is at least 32 bits */ unsigned long a, b, c, d; const u32 *pt = pc2; d = k[4]; d &= 0x0e; d <<= 4; d |= k[0] & 0x1e; d = pc1[d]; c = k[5]; c &= 0x0e; c <<= 4; c |= k[1] & 0x1e; c = pc1[c]; b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; pe[15 * 2] = DES_PC2(b, c, d, a); /* Skip to next table set */ pt += 512; d = k[0]; d &= 0xe0; d >>= 4; d |= k[4] & 0xf0; d = pc1[d + 1]; c = k[1]; c &= 0xe0; c >>= 4; c |= k[5] & 0xf0; c = pc1[c + 1]; b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; pe[15 * 2 + 1] = DES_PC2(b, c, d, a); /* Fixup: 2413 5768 -> 1357 2468 */ for (d = 0; d < 16; ++d) { a = pe[2 * d]; b = pe[2 * d + 1]; c = a ^ b; c &= 0xffff0000; a ^= c; b ^= c; ROL(b, 18); pe[2 * d] = a; pe[2 * d + 1] = b; } } void des_encrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src) { const u32 *K = ctx->expkey; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, 2); ROUND(R, L, A, B, K, 2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des_encrypt); void des_decrypt(const struct des_ctx *ctx, u8 *dst, const u8 *src) { const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, -2); ROUND(R, L, A, B, K, -2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des_decrypt); int des3_ede_expand_key(struct des3_ede_ctx *ctx, const u8 *key, unsigned int keylen) { u32 *pe = ctx->expkey; int err; if (keylen != DES3_EDE_KEY_SIZE) return -EINVAL; err = des3_ede_verify_key(key, keylen, true); if (err && err != -ENOKEY) return err; des_ekey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; dkey(pe, key); pe += DES_EXPKEY_WORDS; key += DES_KEY_SIZE; des_ekey(pe, key); return err; } EXPORT_SYMBOL_GPL(des3_ede_expand_key); void des3_ede_encrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src) { const u32 *K = dctx->expkey; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, 2); ROUND(R, L, A, B, K, 2); } for (i = 0; i < 8; i++) { ROUND(R, L, A, B, K, 2); ROUND(L, R, A, B, K, 2); } for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, 2); ROUND(R, L, A, B, K, 2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des3_ede_encrypt); void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src) { const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; u32 L, R, A, B; int i; L = get_unaligned_le32(src); R = get_unaligned_le32(src + 4); IP(L, R, A); for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, -2); ROUND(R, L, A, B, K, -2); } for (i = 0; i < 8; i++) { ROUND(R, L, A, B, K, -2); ROUND(L, R, A, B, K, -2); } for (i = 0; i < 8; i++) { ROUND(L, R, A, B, K, -2); ROUND(R, L, A, B, K, -2); } FP(R, L, A); put_unaligned_le32(R, dst); put_unaligned_le32(L, dst + 4); } EXPORT_SYMBOL_GPL(des3_ede_decrypt); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); MODULE_LICENSE("GPL");
113 114 113 114 114 114 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_TRAPS_H #define _ASM_X86_TRAPS_H #include <linux/context_tracking_state.h> #include <linux/kprobes.h> #include <asm/debugreg.h> #include <asm/idtentry.h> #include <asm/siginfo.h> /* TRAP_TRACE, ... */ #include <asm/trap_pf.h> #ifdef CONFIG_X86_64 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); asmlinkage __visible notrace struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs); asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *eregs); #endif extern int ibt_selftest(void); extern int ibt_selftest_noendbr(void); #ifdef CONFIG_X86_F00F_BUG /* For handling the FOOF bug */ void handle_invalid_op(struct pt_regs *regs); #endif static inline int get_si_code(unsigned long condition) { if (condition & DR_STEP) return TRAP_TRACE; else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) return TRAP_HWBKPT; else return TRAP_BRKPT; } void math_emulate(struct math_emu_info *); bool fault_in_kernel_space(unsigned long address); #ifdef CONFIG_VMAP_STACK void __noreturn handle_stack_overflow(struct pt_regs *regs, unsigned long fault_address, struct stack_info *info); #endif static inline void cond_local_irq_enable(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_enable(); } static inline void cond_local_irq_disable(struct pt_regs *regs) { if (regs->flags & X86_EFLAGS_IF) local_irq_disable(); } #endif /* _ASM_X86_TRAPS_H */
13 5032 5048 4 13 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KERNEL_PRINTK__ #define __KERNEL_PRINTK__ #include <linux/stdarg.h> #include <linux/init.h> #include <linux/kern_levels.h> #include <linux/linkage.h> #include <linux/ratelimit_types.h> #include <linux/once_lite.h> struct console; extern const char linux_banner[]; extern const char linux_proc_banner[]; extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ #define PRINTK_MAX_SINGLE_HEADER_LEN 2 static inline int printk_get_level(const char *buffer) { if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { switch (buffer[1]) { case '0' ... '7': case 'c': /* KERN_CONT */ return buffer[1]; } } return 0; } static inline const char *printk_skip_level(const char *buffer) { if (printk_get_level(buffer)) return buffer + 2; return buffer; } static inline const char *printk_skip_headers(const char *buffer) { while (printk_get_level(buffer)) buffer = printk_skip_level(buffer); return buffer; } /* printk's without a loglevel use this.. */ #define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT /* We show everything that is MORE important than this.. */ #define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ #define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ #define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ #define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ /* * Default used to be hard-coded at 7, quiet used to be hardcoded at 4, * we're now allowing both to be set from kernel config. */ #define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT #define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET int match_devname_and_update_preferred_console(const char *match, const char *name, const short idx); extern int console_printk[]; #define console_loglevel (console_printk[0]) #define default_message_loglevel (console_printk[1]) #define minimum_console_loglevel (console_printk[2]) #define default_console_loglevel (console_printk[3]) extern void console_verbose(void); /* strlen("ratelimit") + 1 */ #define DEVKMSG_STR_MAX_SIZE 10 extern char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE]; struct ctl_table; extern int suppress_printk; struct va_format { const char *fmt; va_list *va; }; /* * FW_BUG * Add this to a message where you are sure the firmware is buggy or behaves * really stupid or out of spec. Be aware that the responsible BIOS developer * should be able to fix this issue or at least get a concrete idea of the * problem by reading your message without the need of looking at the kernel * code. * * Use it for definite and high priority BIOS bugs. * * FW_WARN * Use it for not that clear (e.g. could the kernel messed up things already?) * and medium priority BIOS bugs. * * FW_INFO * Use this one if you want to tell the user or vendor about something * suspicious, but generally harmless related to the firmware. * * Use it for information or very low priority BIOS bugs. */ #define FW_BUG "[Firmware Bug]: " #define FW_WARN "[Firmware Warn]: " #define FW_INFO "[Firmware Info]: " /* * HW_ERR * Add this to a message for hardware errors, so that user can report * it to hardware vendor instead of LKML or software vendor. */ #define HW_ERR "[Hardware Error]: " /* * DEPRECATED * Add this to a message whenever you want to warn user space about the use * of a deprecated aspect of an API so they can stop using it */ #define DEPRECATED "[Deprecated]: " /* * Dummy printk for disabled debugging statements to use whilst maintaining * gcc's format checking. */ #define no_printk(fmt, ...) \ ({ \ if (0) \ _printk(fmt, ##__VA_ARGS__); \ 0; \ }) #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif struct dev_printk_info; #ifdef CONFIG_PRINTK asmlinkage __printf(4, 0) int vprintk_emit(int facility, int level, const struct dev_printk_info *dev_info, const char *fmt, va_list args); asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); asmlinkage __printf(1, 2) __cold int _printk(const char *fmt, ...); /* * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! */ __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); extern void __printk_deferred_enter(void); extern void __printk_deferred_exit(void); extern void printk_force_console_enter(void); extern void printk_force_console_exit(void); /* * The printk_deferred_enter/exit macros are available only as a hack for * some code paths that need to defer all printk console printing. Interrupts * must be disabled for the deferred duration. */ #define printk_deferred_enter() __printk_deferred_enter() #define printk_deferred_exit() __printk_deferred_exit() /* * Please don't use printk_ratelimit(), because it shares ratelimiting state * with all other unrelated printk_ratelimit() callsites. Instead use * printk_ratelimited() or plain old __ratelimit(). */ extern int __printk_ratelimit(const char *func); #define printk_ratelimit() __printk_ratelimit(__func__) extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); extern int printk_delay_msec; extern int dmesg_restrict; extern void wake_up_klogd(void); char *log_buf_addr_get(void); u32 log_buf_len_get(void); void log_buf_vmcoreinfo_setup(void); void __init setup_log_buf(int early); __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...); void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; void printk_trigger_flush(void); void console_try_replay_all(void); void printk_legacy_allow_panic_sync(void); extern bool nbcon_device_try_acquire(struct console *con); extern void nbcon_device_release(struct console *con); void nbcon_atomic_flush_unsafe(void); bool pr_flush(int timeout_ms, bool reset_on_progress); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) { return 0; } static inline __printf(1, 2) __cold int _printk(const char *s, ...) { return 0; } static inline __printf(1, 2) __cold int _printk_deferred(const char *s, ...) { return 0; } static inline void printk_deferred_enter(void) { } static inline void printk_deferred_exit(void) { } static inline void printk_force_console_enter(void) { } static inline void printk_force_console_exit(void) { } static inline int printk_ratelimit(void) { return 0; } static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec) { return false; } static inline void wake_up_klogd(void) { } static inline char *log_buf_addr_get(void) { return NULL; } static inline u32 log_buf_len_get(void) { return 0; } static inline void log_buf_vmcoreinfo_setup(void) { } static inline void setup_log_buf(int early) { } static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...) { } static inline void dump_stack_print_info(const char *log_lvl) { } static inline void show_regs_print_info(const char *log_lvl) { } static inline void dump_stack_lvl(const char *log_lvl) { } static inline void dump_stack(void) { } static inline void printk_trigger_flush(void) { } static inline void console_try_replay_all(void) { } static inline void printk_legacy_allow_panic_sync(void) { } static inline bool nbcon_device_try_acquire(struct console *con) { return false; } static inline void nbcon_device_release(struct console *con) { } static inline void nbcon_atomic_flush_unsafe(void) { } static inline bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; } #endif bool this_cpu_in_panic(void); #ifdef CONFIG_SMP extern int __printk_cpu_sync_try_get(void); extern void __printk_cpu_sync_wait(void); extern void __printk_cpu_sync_put(void); #else #define __printk_cpu_sync_try_get() true #define __printk_cpu_sync_wait() #define __printk_cpu_sync_put() #endif /* CONFIG_SMP */ /** * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk * cpu-reentrant spinning lock. * @flags: Stack-allocated storage for saving local interrupt state, * to be passed to printk_cpu_sync_put_irqrestore(). * * If the lock is owned by another CPU, spin until it becomes available. * Interrupts are restored while spinning. * * CAUTION: This function must be used carefully. It does not behave like a * typical lock. Here are important things to watch out for... * * * This function is reentrant on the same CPU. Therefore the calling * code must not assume exclusive access to data if code accessing the * data can run reentrant or within NMI context on the same CPU. * * * If there exists usage of this function from NMI context, it becomes * unsafe to perform any type of locking or spinning to wait for other * CPUs after calling this function from any context. This includes * using spinlocks or any other busy-waiting synchronization methods. */ #define printk_cpu_sync_get_irqsave(flags) \ for (;;) { \ local_irq_save(flags); \ if (__printk_cpu_sync_try_get()) \ break; \ local_irq_restore(flags); \ __printk_cpu_sync_wait(); \ } /** * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning * lock and restore interrupts. * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave(). */ #define printk_cpu_sync_put_irqrestore(flags) \ do { \ __printk_cpu_sync_put(); \ local_irq_restore(flags); \ } while (0) extern int kptr_restrict; /** * pr_fmt - used by the pr_*() macros to generate the printk format string * @fmt: format string passed from a pr_*() macro * * This macro can be used to generate a unified format string for pr_*() * macros. A common use is to prefix all pr_*() messages in a file with a common * string. For example, defining this at the top of a source file: * * #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt * * would prefix all pr_info, pr_emerg... messages in the file with the module * name. */ #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif struct module; #ifdef CONFIG_PRINTK_INDEX struct pi_entry { const char *fmt; const char *func; const char *file; unsigned int line; /* * While printk and pr_* have the level stored in the string at compile * time, some subsystems dynamically add it at runtime through the * format string. For these dynamic cases, we allow the subsystem to * tell us the level at compile time. * * NULL indicates that the level, if any, is stored in fmt. */ const char *level; /* * The format string used by various subsystem specific printk() * wrappers to prefix the message. * * Note that the static prefix defined by the pr_fmt() macro is stored * directly in the message format (@fmt), not here. */ const char *subsys_fmt_prefix; } __packed; #define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \ do { \ if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \ /* * We check __builtin_constant_p multiple times here * for the same input because GCC will produce an error * if we try to assign a static variable to fmt if it * is not a constant, even with the outer if statement. */ \ static const struct pi_entry _entry \ __used = { \ .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \ .func = __func__, \ .file = __FILE__, \ .line = __LINE__, \ .level = __builtin_constant_p(_level) ? (_level) : NULL, \ .subsys_fmt_prefix = _subsys_fmt_prefix,\ }; \ static const struct pi_entry *_entry_ptr \ __used __section(".printk_index") = &_entry; \ } \ } while (0) #else /* !CONFIG_PRINTK_INDEX */ #define __printk_index_emit(...) do {} while (0) #endif /* CONFIG_PRINTK_INDEX */ /* * Some subsystems have their own custom printk that applies a va_format to a * generic format, for example, to include a device number or other metadata * alongside the format supplied by the caller. * * In order to store these in the way they would be emitted by the printk * infrastructure, the subsystem provides us with the start, fixed string, and * any subsequent text in the format string. * * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the * first one. * * subsys_fmt_prefix must be known at compile time, or compilation will fail * (since this is a mistake). If fmt or level is not known at compile time, no * index entry will be made (since this can legitimately happen). */ #define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \ __printk_index_emit(fmt, level, subsys_fmt_prefix) #define printk_index_wrap(_p_func, _fmt, ...) \ ({ \ __printk_index_emit(_fmt, NULL, NULL); \ _p_func(_fmt, ##__VA_ARGS__); \ }) /** * printk - print a kernel message * @fmt: format string * * This is printk(). It can be called from any context. We want it to work. * * If printk indexing is enabled, _printk() is called from printk_index_wrap. * Otherwise, printk is simply #defined to _printk. * * We try to grab the console_lock. If we succeed, it's easy - we log the * output and call the console drivers. If we fail to get the semaphore, we * place the output into the log buffer and return. The current holder of * the console_sem will notice the new output in console_unlock(); and will * send it to the consoles before releasing the lock. * * One effect of this deferred printing is that code which calls printk() and * then changes console_loglevel may break. This is because console_loglevel * is inspected when the actual printing occurs. * * See also: * printf(3) * * See the vsnprintf() documentation for format string extensions over C99. */ #define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__) #define printk_deferred(fmt, ...) \ printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__) /** * pr_emerg - Print an emergency-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to * generate the format string. */ #define pr_emerg(fmt, ...) \ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) /** * pr_alert - Print an alert-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to * generate the format string. */ #define pr_alert(fmt, ...) \ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) /** * pr_crit - Print a critical-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to * generate the format string. */ #define pr_crit(fmt, ...) \ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) /** * pr_err - Print an error-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to * generate the format string. */ #define pr_err(fmt, ...) \ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) /** * pr_warn - Print a warning-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt() * to generate the format string. */ #define pr_warn(fmt, ...) \ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) /** * pr_notice - Print a notice-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to * generate the format string. */ #define pr_notice(fmt, ...) \ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) /** * pr_info - Print an info-level message * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to * generate the format string. */ #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /** * pr_cont - Continues a previous log message in the same line. * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_CONT loglevel. It should only be * used when continuing a log message with no newline ('\n') enclosed. Otherwise * it defaults back to KERN_DEFAULT loglevel. */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) /** * pr_devel - Print a debug-level message conditionally * @fmt: format string * @...: arguments for the format string * * This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is * defined. Otherwise it does nothing. * * It uses pr_fmt() to generate the format string. */ #ifdef DEBUG #define pr_devel(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #include <linux/dynamic_debug.h> /** * pr_debug - Print a debug-level message conditionally * @fmt: format string * @...: arguments for the format string * * This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is * set. Otherwise, if DEBUG is defined, it's equivalent to a printk with * KERN_DEBUG loglevel. If DEBUG is not defined it does nothing. * * It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses * pr_fmt() internally). */ #define pr_debug(fmt, ...) \ dynamic_pr_debug(fmt, ##__VA_ARGS__) #elif defined(DEBUG) #define pr_debug(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* * Print a one-time message (analogous to WARN_ONCE() et al): */ #ifdef CONFIG_PRINTK #define printk_once(fmt, ...) \ DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__) #define printk_deferred_once(fmt, ...) \ DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__) #else #define printk_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #define printk_deferred_once(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #endif #define pr_emerg_once(fmt, ...) \ printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert_once(fmt, ...) \ printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #define pr_crit_once(fmt, ...) \ printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #define pr_err_once(fmt, ...) \ printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warn_once(fmt, ...) \ printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #define pr_notice_once(fmt, ...) \ printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_once(fmt, ...) \ printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /* no pr_cont_once, don't do that... */ #if defined(DEBUG) #define pr_devel_once(fmt, ...) \ printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel_once(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(DEBUG) #define pr_debug_once(fmt, ...) \ printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug_once(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* * ratelimited messages with local ratelimit_state, * no local ratelimit_state used in the !PRINTK case */ #ifdef CONFIG_PRINTK #define printk_ratelimited(fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ \ if (__ratelimit(&_rs)) \ printk(fmt, ##__VA_ARGS__); \ }) #else #define printk_ratelimited(fmt, ...) \ no_printk(fmt, ##__VA_ARGS__) #endif #define pr_emerg_ratelimited(fmt, ...) \ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) #define pr_alert_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) #define pr_crit_ratelimited(fmt, ...) \ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) #define pr_err_ratelimited(fmt, ...) \ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) #define pr_warn_ratelimited(fmt, ...) \ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) #define pr_notice_ratelimited(fmt, ...) \ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_ratelimited(fmt, ...) \ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) /* no pr_cont_ratelimited, don't do that... */ #if defined(DEBUG) #define pr_devel_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_devel_ratelimited(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define pr_debug_ratelimited(fmt, ...) \ do { \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ __ratelimit(&_rs)) \ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ } while (0) #elif defined(DEBUG) #define pr_debug_ratelimited(fmt, ...) \ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #else #define pr_debug_ratelimited(fmt, ...) \ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif extern const struct file_operations kmsg_fops; enum { DUMP_PREFIX_NONE, DUMP_PREFIX_ADDRESS, DUMP_PREFIX_OFFSET }; extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, char *linebuf, size_t linebuflen, bool ascii); #ifdef CONFIG_PRINTK extern void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii); #else static inline void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, const void *buf, size_t len) { } #endif #if defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) #elif defined(DEBUG) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) #else static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } #endif /** * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params * @prefix_str: string to prefix each line with; * caller supplies trailing spaces for alignment if desired * @prefix_type: controls whether prefix of an offset, address, or none * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) * @buf: data blob to dump * @len: number of bytes in the @buf * * Calls print_hex_dump(), with log level of KERN_DEBUG, * rowsize of 16, groupsize of 1, and ASCII output included. */ #define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \ print_hex_dump_debug(prefix_str, prefix_type, 16, 1, buf, len, true) #endif
2 2 1 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 1010 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 // SPDX-License-Identifier: GPL-2.0-or-later /* * tsacct.c - System accounting over taskstats interface * * Copyright (C) Jay Lan, <jlan@sgi.com> */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/sched/cputime.h> #include <linux/tsacct_kern.h> #include <linux/acct.h> #include <linux/jiffies.h> #include <linux/mm.h> /* * fill in basic accounting fields */ void bacct_add_tsk(struct user_namespace *user_ns, struct pid_namespace *pid_ns, struct taskstats *stats, struct task_struct *tsk) { const struct cred *tcred; u64 utime, stime, utimescaled, stimescaled; u64 now_ns, delta; time64_t btime; BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); /* calculate task elapsed time in nsec */ now_ns = ktime_get_ns(); /* store whole group time first */ delta = now_ns - tsk->group_leader->start_time; /* Convert to micro seconds */ do_div(delta, NSEC_PER_USEC); stats->ac_tgetime = delta; delta = now_ns - tsk->start_time; do_div(delta, NSEC_PER_USEC); stats->ac_etime = delta; /* Convert to seconds for btime (note y2106 limit) */ btime = ktime_get_real_seconds() - div_u64(delta, USEC_PER_SEC); stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX); stats->ac_btime64 = btime; if (tsk->flags & PF_EXITING) stats->ac_exitcode = tsk->exit_code; if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC)) stats->ac_flag |= AFORK; if (tsk->flags & PF_SUPERPRIV) stats->ac_flag |= ASU; if (tsk->flags & PF_DUMPCORE) stats->ac_flag |= ACORE; if (tsk->flags & PF_SIGNALED) stats->ac_flag |= AXSIG; stats->ac_nice = task_nice(tsk); stats->ac_sched = tsk->policy; stats->ac_pid = task_pid_nr_ns(tsk, pid_ns); stats->ac_tgid = task_tgid_nr_ns(tsk, pid_ns); rcu_read_lock(); tcred = __task_cred(tsk); stats->ac_uid = from_kuid_munged(user_ns, tcred->uid); stats->ac_gid = from_kgid_munged(user_ns, tcred->gid); stats->ac_ppid = pid_alive(tsk) ? task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; rcu_read_unlock(); task_cputime(tsk, &utime, &stime); stats->ac_utime = div_u64(utime, NSEC_PER_USEC); stats->ac_stime = div_u64(stime, NSEC_PER_USEC); task_cputime_scaled(tsk, &utimescaled, &stimescaled); stats->ac_utimescaled = div_u64(utimescaled, NSEC_PER_USEC); stats->ac_stimescaled = div_u64(stimescaled, NSEC_PER_USEC); stats->ac_minflt = tsk->min_flt; stats->ac_majflt = tsk->maj_flt; strscpy_pad(stats->ac_comm, tsk->comm); } #ifdef CONFIG_TASK_XACCT #define KB 1024 #define MB (1024*KB) #define KB_MASK (~(KB-1)) /* * fill in extended accounting fields */ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) { struct mm_struct *mm; /* convert pages-nsec/1024 to Mbyte-usec, see __acct_update_integrals */ stats->coremem = p->acct_rss_mem1 * PAGE_SIZE; do_div(stats->coremem, 1000 * KB); stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE; do_div(stats->virtmem, 1000 * KB); mm = get_task_mm(p); if (mm) { /* adjust to KB unit */ stats->hiwater_rss = get_mm_hiwater_rss(mm) * PAGE_SIZE / KB; stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; mmput(mm); } stats->read_char = p->ioac.rchar & KB_MASK; stats->write_char = p->ioac.wchar & KB_MASK; stats->read_syscalls = p->ioac.syscr & KB_MASK; stats->write_syscalls = p->ioac.syscw & KB_MASK; #ifdef CONFIG_TASK_IO_ACCOUNTING stats->read_bytes = p->ioac.read_bytes & KB_MASK; stats->write_bytes = p->ioac.write_bytes & KB_MASK; stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK; #else stats->read_bytes = 0; stats->write_bytes = 0; stats->cancelled_write_bytes = 0; #endif } #undef KB #undef MB static void __acct_update_integrals(struct task_struct *tsk, u64 utime, u64 stime) { u64 time, delta; if (!likely(tsk->mm)) return; time = stime + utime; delta = time - tsk->acct_timexpd; if (delta < TICK_NSEC) return; tsk->acct_timexpd = time; /* * Divide by 1024 to avoid overflow, and to avoid division. * The final unit reported to userspace is Mbyte-usecs, * the rest of the math is done in xacct_add_tsk. */ tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10; tsk->acct_vm_mem1 += delta * READ_ONCE(tsk->mm->total_vm) >> 10; } /** * acct_update_integrals - update mm integral fields in task_struct * @tsk: task_struct for accounting */ void acct_update_integrals(struct task_struct *tsk) { u64 utime, stime; unsigned long flags; local_irq_save(flags); task_cputime(tsk, &utime, &stime); __acct_update_integrals(tsk, utime, stime); local_irq_restore(flags); } /** * acct_account_cputime - update mm integral after cputime update * @tsk: task_struct for accounting */ void acct_account_cputime(struct task_struct *tsk) { __acct_update_integrals(tsk, tsk->utime, tsk->stime); } /** * acct_clear_integrals - clear the mm integral fields in task_struct * @tsk: task_struct whose accounting fields are cleared */ void acct_clear_integrals(struct task_struct *tsk) { tsk->acct_timexpd = 0; tsk->acct_rss_mem1 = 0; tsk->acct_vm_mem1 = 0; } #endif
820 900 1710 291 292 286 223 225 13 225 20 19 2 225 292 1822 1030 1859 1689 1690 1111 208 110 1755 1753 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 // SPDX-License-Identifier: GPL-2.0-only /* * mm/interval_tree.c - interval tree for mapping->i_mmap * * Copyright (C) 2012, Michel Lespinasse <walken@google.com> */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/rmap.h> #include <linux/interval_tree_generic.h> static inline unsigned long vma_start_pgoff(struct vm_area_struct *v) { return v->vm_pgoff; } static inline unsigned long vma_last_pgoff(struct vm_area_struct *v) { return v->vm_pgoff + vma_pages(v) - 1; } INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, unsigned long, shared.rb_subtree_last, vma_start_pgoff, vma_last_pgoff, /* empty */, vma_interval_tree) /* Insert node immediately after prev in the interval tree */ void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *prev, struct rb_root_cached *root) { struct rb_node **link; struct vm_area_struct *parent; unsigned long last = vma_last_pgoff(node); VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); if (!prev->shared.rb.rb_right) { parent = prev; link = &prev->shared.rb.rb_right; } else { parent = rb_entry(prev->shared.rb.rb_right, struct vm_area_struct, shared.rb); if (parent->shared.rb_subtree_last < last) parent->shared.rb_subtree_last = last; while (parent->shared.rb.rb_left) { parent = rb_entry(parent->shared.rb.rb_left, struct vm_area_struct, shared.rb); if (parent->shared.rb_subtree_last < last) parent->shared.rb_subtree_last = last; } link = &parent->shared.rb.rb_left; } node->shared.rb_subtree_last = last; rb_link_node(&node->shared.rb, &parent->shared.rb, link); rb_insert_augmented(&node->shared.rb, &root->rb_root, &vma_interval_tree_augment); } static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc) { return vma_start_pgoff(avc->vma); } static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc) { return vma_last_pgoff(avc->vma); } INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, avc_start_pgoff, avc_last_pgoff, static inline, __anon_vma_interval_tree) void anon_vma_interval_tree_insert(struct anon_vma_chain *node, struct rb_root_cached *root) { #ifdef CONFIG_DEBUG_VM_RB node->cached_vma_start = avc_start_pgoff(node); node->cached_vma_last = avc_last_pgoff(node); #endif __anon_vma_interval_tree_insert(node, root); } void anon_vma_interval_tree_remove(struct anon_vma_chain *node, struct rb_root_cached *root) { __anon_vma_interval_tree_remove(node, root); } struct anon_vma_chain * anon_vma_interval_tree_iter_first(struct rb_root_cached *root, unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_first(root, first, last); } struct anon_vma_chain * anon_vma_interval_tree_iter_next(struct anon_vma_chain *node, unsigned long first, unsigned long last) { return __anon_vma_interval_tree_iter_next(node, first, last); } #ifdef CONFIG_DEBUG_VM_RB void anon_vma_interval_tree_verify(struct anon_vma_chain *node) { WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node)); WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node)); } #endif
35 35 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 // SPDX-License-Identifier: GPL-2.0 /* * Copyright 2021 Google LLC * * sysfs support for blk-crypto. This file contains the code which exports the * crypto capabilities of devices via /sys/block/$disk/queue/crypto/. */ #include <linux/blk-crypto-profile.h> #include "blk-crypto-internal.h" struct blk_crypto_kobj { struct kobject kobj; struct blk_crypto_profile *profile; }; struct blk_crypto_attr { struct attribute attr; ssize_t (*show)(struct blk_crypto_profile *profile, struct blk_crypto_attr *attr, char *page); }; static struct blk_crypto_profile *kobj_to_crypto_profile(struct kobject *kobj) { return container_of(kobj, struct blk_crypto_kobj, kobj)->profile; } static struct blk_crypto_attr *attr_to_crypto_attr(struct attribute *attr) { return container_of(attr, struct blk_crypto_attr, attr); } static ssize_t hw_wrapped_keys_show(struct blk_crypto_profile *profile, struct blk_crypto_attr *attr, char *page) { /* Always show supported, since the file doesn't exist otherwise. */ return sysfs_emit(page, "supported\n"); } static ssize_t max_dun_bits_show(struct blk_crypto_profile *profile, struct blk_crypto_attr *attr, char *page) { return sysfs_emit(page, "%u\n", 8 * profile->max_dun_bytes_supported); } static ssize_t num_keyslots_show(struct blk_crypto_profile *profile, struct blk_crypto_attr *attr, char *page) { return sysfs_emit(page, "%u\n", profile->num_slots); } static ssize_t raw_keys_show(struct blk_crypto_profile *profile, struct blk_crypto_attr *attr, char *page) { /* Always show supported, since the file doesn't exist otherwise. */ return sysfs_emit(page, "supported\n"); } #define BLK_CRYPTO_RO_ATTR(_name) \ static struct blk_crypto_attr _name##_attr = __ATTR_RO(_name) BLK_CRYPTO_RO_ATTR(hw_wrapped_keys); BLK_CRYPTO_RO_ATTR(max_dun_bits); BLK_CRYPTO_RO_ATTR(num_keyslots); BLK_CRYPTO_RO_ATTR(raw_keys); static umode_t blk_crypto_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct blk_crypto_profile *profile = kobj_to_crypto_profile(kobj); struct blk_crypto_attr *a = attr_to_crypto_attr(attr); if (a == &hw_wrapped_keys_attr && !(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)) return 0; if (a == &raw_keys_attr && !(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_RAW)) return 0; return 0444; } static struct attribute *blk_crypto_attrs[] = { &hw_wrapped_keys_attr.attr, &max_dun_bits_attr.attr, &num_keyslots_attr.attr, &raw_keys_attr.attr, NULL, }; static const struct attribute_group blk_crypto_attr_group = { .attrs = blk_crypto_attrs, .is_visible = blk_crypto_is_visible, }; /* * The encryption mode attributes. To avoid hard-coding the list of encryption * modes, these are initialized at boot time by blk_crypto_sysfs_init(). */ static struct blk_crypto_attr __blk_crypto_mode_attrs[BLK_ENCRYPTION_MODE_MAX]; static struct attribute *blk_crypto_mode_attrs[BLK_ENCRYPTION_MODE_MAX + 1]; static umode_t blk_crypto_mode_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct blk_crypto_profile *profile = kobj_to_crypto_profile(kobj); struct blk_crypto_attr *a = attr_to_crypto_attr(attr); int mode_num = a - __blk_crypto_mode_attrs; if (profile->modes_supported[mode_num]) return 0444; return 0; } static ssize_t blk_crypto_mode_show(struct blk_crypto_profile *profile, struct blk_crypto_attr *attr, char *page) { int mode_num = attr - __blk_crypto_mode_attrs; return sysfs_emit(page, "0x%x\n", profile->modes_supported[mode_num]); } static const struct attribute_group blk_crypto_modes_attr_group = { .name = "modes", .attrs = blk_crypto_mode_attrs, .is_visible = blk_crypto_mode_is_visible, }; static const struct attribute_group *blk_crypto_attr_groups[] = { &blk_crypto_attr_group, &blk_crypto_modes_attr_group, NULL, }; static ssize_t blk_crypto_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct blk_crypto_profile *profile = kobj_to_crypto_profile(kobj); struct blk_crypto_attr *a = attr_to_crypto_attr(attr); return a->show(profile, a, page); } static const struct sysfs_ops blk_crypto_attr_ops = { .show = blk_crypto_attr_show, }; static void blk_crypto_release(struct kobject *kobj) { kfree(container_of(kobj, struct blk_crypto_kobj, kobj)); } static const struct kobj_type blk_crypto_ktype = { .default_groups = blk_crypto_attr_groups, .sysfs_ops = &blk_crypto_attr_ops, .release = blk_crypto_release, }; /* * If the request_queue has a blk_crypto_profile, create the "crypto" * subdirectory in sysfs (/sys/block/$disk/queue/crypto/). */ int blk_crypto_sysfs_register(struct gendisk *disk) { struct request_queue *q = disk->queue; struct blk_crypto_kobj *obj; int err; if (!q->crypto_profile) return 0; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return -ENOMEM; obj->profile = q->crypto_profile; err = kobject_init_and_add(&obj->kobj, &blk_crypto_ktype, &disk->queue_kobj, "crypto"); if (err) { kobject_put(&obj->kobj); return err; } q->crypto_kobject = &obj->kobj; return 0; } void blk_crypto_sysfs_unregister(struct gendisk *disk) { kobject_put(disk->queue->crypto_kobject); } static int __init blk_crypto_sysfs_init(void) { int i; BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0); for (i = 1; i < BLK_ENCRYPTION_MODE_MAX; i++) { struct blk_crypto_attr *attr = &__blk_crypto_mode_attrs[i]; attr->attr.name = blk_crypto_modes[i].name; attr->attr.mode = 0444; attr->show = blk_crypto_mode_show; blk_crypto_mode_attrs[i - 1] = &attr->attr; } return 0; } subsys_initcall(blk_crypto_sysfs_init);
64 56 1849 1843 1843 4 3 28 249 108 49 18 17 192 107 62 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM writeback #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_WRITEBACK_H #include <linux/tracepoint.h> #include <linux/backing-dev.h> #include <linux/writeback.h> #define show_inode_state(state) \ __print_flags(state, "|", \ {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ {I_NEW, "I_NEW"}, \ {I_WILL_FREE, "I_WILL_FREE"}, \ {I_FREEING, "I_FREEING"}, \ {I_CLEAR, "I_CLEAR"}, \ {I_SYNC, "I_SYNC"}, \ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ {I_REFERENCED, "I_REFERENCED"}, \ {I_LINKABLE, "I_LINKABLE"}, \ {I_WB_SWITCH, "I_WB_SWITCH"}, \ {I_OVL_INUSE, "I_OVL_INUSE"}, \ {I_CREATING, "I_CREATING"}, \ {I_DONTCACHE, "I_DONTCACHE"}, \ {I_SYNC_QUEUED, "I_SYNC_QUEUED"}, \ {I_PINNING_NETFS_WB, "I_PINNING_NETFS_WB"}, \ {I_LRU_ISOLATING, "I_LRU_ISOLATING"} \ ) /* enums need to be exported to user space */ #undef EM #undef EMe #define EM(a,b) TRACE_DEFINE_ENUM(a); #define EMe(a,b) TRACE_DEFINE_ENUM(a); #define WB_WORK_REASON \ EM( WB_REASON_BACKGROUND, "background") \ EM( WB_REASON_VMSCAN, "vmscan") \ EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ EM( WB_REASON_FORKER_THREAD, "forker_thread") \ EMe(WB_REASON_FOREIGN_FLUSH, "foreign_flush") WB_WORK_REASON /* * Now redefine the EM() and EMe() macros to map the enums to the strings * that will be printed in the output. */ #undef EM #undef EMe #define EM(a,b) { a, b }, #define EMe(a,b) { a, b } struct wb_writeback_work; DECLARE_EVENT_CLASS(writeback_folio_template, TP_PROTO(struct folio *folio, struct address_space *mapping), TP_ARGS(folio, mapping), TP_STRUCT__entry ( __array(char, name, 32) __field(ino_t, ino) __field(pgoff_t, index) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : NULL), 32); __entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0; __entry->index = folio->index; ), TP_printk("bdi %s: ino=%lu index=%lu", __entry->name, (unsigned long)__entry->ino, __entry->index ) ); DEFINE_EVENT(writeback_folio_template, writeback_dirty_folio, TP_PROTO(struct folio *folio, struct address_space *mapping), TP_ARGS(folio, mapping) ); DEFINE_EVENT(writeback_folio_template, folio_wait_writeback, TP_PROTO(struct folio *folio, struct address_space *mapping), TP_ARGS(folio, mapping) ); DECLARE_EVENT_CLASS(writeback_dirty_inode_template, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags), TP_STRUCT__entry ( __array(char, name, 32) __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, flags) ), TP_fast_assign( struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->flags = flags; ), TP_printk("bdi %s: ino=%lu state=%s flags=%s", __entry->name, (unsigned long)__entry->ino, show_inode_state(__entry->state), show_inode_state(__entry->flags) ) ); DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags) ); DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags) ); DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode, TP_PROTO(struct inode *inode, int flags), TP_ARGS(inode, flags) ); #ifdef CREATE_TRACE_POINTS #ifdef CONFIG_CGROUP_WRITEBACK static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) { return cgroup_ino(wb->memcg_css->cgroup); } static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) { if (wbc->wb) return __trace_wb_assign_cgroup(wbc->wb); else return 1; } #else /* CONFIG_CGROUP_WRITEBACK */ static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb) { return 1; } static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc) { return 1; } #endif /* CONFIG_CGROUP_WRITEBACK */ #endif /* CREATE_TRACE_POINTS */ #ifdef CONFIG_CGROUP_WRITEBACK TRACE_EVENT(inode_foreign_history, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned int history), TP_ARGS(inode, wbc, history), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(ino_t, cgroup_ino) __field(unsigned int, history) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); __entry->history = history; ), TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x", __entry->name, (unsigned long)__entry->ino, (unsigned long)__entry->cgroup_ino, __entry->history ) ); TRACE_EVENT(inode_switch_wbs, TP_PROTO(struct inode *inode, struct bdi_writeback *old_wb, struct bdi_writeback *new_wb), TP_ARGS(inode, old_wb, new_wb), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(ino_t, old_cgroup_ino) __field(ino_t, new_cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(old_wb->bdi), 32); __entry->ino = inode->i_ino; __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); ), TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, (unsigned long)__entry->old_cgroup_ino, (unsigned long)__entry->new_cgroup_ino ) ); TRACE_EVENT(track_foreign_dirty, TP_PROTO(struct folio *folio, struct bdi_writeback *wb), TP_ARGS(folio, wb), TP_STRUCT__entry( __array(char, name, 32) __field(u64, bdi_id) __field(ino_t, ino) __field(unsigned int, memcg_id) __field(ino_t, cgroup_ino) __field(ino_t, page_cgroup_ino) ), TP_fast_assign( struct address_space *mapping = folio_mapping(folio); struct inode *inode = mapping ? mapping->host : NULL; strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->bdi_id = wb->bdi->id; __entry->ino = inode ? inode->i_ino : 0; __entry->memcg_id = wb->memcg_css->id; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->page_cgroup_ino = cgroup_ino(folio_memcg(folio)->css.cgroup); ), TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu", __entry->name, __entry->bdi_id, (unsigned long)__entry->ino, __entry->memcg_id, (unsigned long)__entry->cgroup_ino, (unsigned long)__entry->page_cgroup_ino ) ); TRACE_EVENT(flush_foreign, TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id, unsigned int frn_memcg_id), TP_ARGS(wb, frn_bdi_id, frn_memcg_id), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, cgroup_ino) __field(unsigned int, frn_bdi_id) __field(unsigned int, frn_memcg_id) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->frn_bdi_id = frn_bdi_id; __entry->frn_memcg_id = frn_memcg_id; ), TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u", __entry->name, (unsigned long)__entry->cgroup_ino, __entry->frn_bdi_id, __entry->frn_memcg_id ) ); #endif DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc), TP_STRUCT__entry ( __array(char, name, 32) __field(ino_t, ino) __field(int, sync_mode) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, __entry->sync_mode, (unsigned long)__entry->cgroup_ino ) ); DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc) ); DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode, TP_PROTO(struct inode *inode, struct writeback_control *wbc), TP_ARGS(inode, wbc) ); DECLARE_EVENT_CLASS(writeback_work_class, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), TP_ARGS(wb, work), TP_STRUCT__entry( __array(char, name, 32) __field(long, nr_pages) __field(dev_t, sb_dev) __field(int, sync_mode) __field(int, for_kupdate) __field(int, range_cyclic) __field(int, for_background) __field(int, reason) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; __entry->for_kupdate = work->for_kupdate; __entry->range_cyclic = work->range_cyclic; __entry->for_background = work->for_background; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu", __entry->name, MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), __entry->nr_pages, __entry->sync_mode, __entry->for_kupdate, __entry->range_cyclic, __entry->for_background, __print_symbolic(__entry->reason, WB_WORK_REASON), (unsigned long)__entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_WORK_EVENT(name) \ DEFINE_EVENT(writeback_work_class, name, \ TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \ TP_ARGS(wb, work)) DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); DEFINE_WRITEBACK_WORK_EVENT(writeback_start); DEFINE_WRITEBACK_WORK_EVENT(writeback_written); DEFINE_WRITEBACK_WORK_EVENT(writeback_wait); TRACE_EVENT(writeback_pages_written, TP_PROTO(long pages_written), TP_ARGS(pages_written), TP_STRUCT__entry( __field(long, pages) ), TP_fast_assign( __entry->pages = pages_written; ), TP_printk("%ld", __entry->pages) ); DECLARE_EVENT_CLASS(writeback_class, TP_PROTO(struct bdi_writeback *wb), TP_ARGS(wb), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: cgroup_ino=%lu", __entry->name, (unsigned long)__entry->cgroup_ino ) ); #define DEFINE_WRITEBACK_EVENT(name) \ DEFINE_EVENT(writeback_class, name, \ TP_PROTO(struct bdi_writeback *wb), \ TP_ARGS(wb)) DEFINE_WRITEBACK_EVENT(writeback_wake_background); TRACE_EVENT(writeback_bdi_register, TP_PROTO(struct backing_dev_info *bdi), TP_ARGS(bdi), TP_STRUCT__entry( __array(char, name, 32) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); ), TP_printk("bdi %s", __entry->name ) ); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_ARGS(wbc, bdi), TP_STRUCT__entry( __array(char, name, 32) __field(long, nr_to_write) __field(long, pages_skipped) __field(int, sync_mode) __field(int, for_kupdate) __field(int, for_background) __field(int, for_reclaim) __field(int, range_cyclic) __field(long, range_start) __field(long, range_end) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; __entry->for_kupdate = wbc->for_kupdate; __entry->for_background = wbc->for_background; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->range_start = (long)wbc->range_start; __entry->range_end = (long)wbc->range_end; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " "bgrd=%d reclm=%d cyclic=%d " "start=0x%lx end=0x%lx cgroup_ino=%lu", __entry->name, __entry->nr_to_write, __entry->pages_skipped, __entry->sync_mode, __entry->for_kupdate, __entry->for_background, __entry->for_reclaim, __entry->range_cyclic, __entry->range_start, __entry->range_end, (unsigned long)__entry->cgroup_ino ) ) #define DEFINE_WBC_EVENT(name) \ DEFINE_EVENT(wbc_class, name, \ TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ TP_ARGS(wbc, bdi)) DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, unsigned long dirtied_before, int moved), TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) __field(long, age) __field(int, moved) __field(int, reason) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->older = dirtied_before; __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu", __entry->name, __entry->older, /* dirtied_before in jiffies */ __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), (unsigned long)__entry->cgroup_ino ) ); TRACE_EVENT(global_dirty_state, TP_PROTO(unsigned long background_thresh, unsigned long dirty_thresh ), TP_ARGS(background_thresh, dirty_thresh ), TP_STRUCT__entry( __field(unsigned long, nr_dirty) __field(unsigned long, nr_writeback) __field(unsigned long, background_thresh) __field(unsigned long, dirty_thresh) __field(unsigned long, dirty_limit) __field(unsigned long, nr_dirtied) __field(unsigned long, nr_written) ), TP_fast_assign( __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); __entry->nr_dirtied = global_node_page_state(NR_DIRTIED); __entry->nr_written = global_node_page_state(NR_WRITTEN); __entry->background_thresh = background_thresh; __entry->dirty_thresh = dirty_thresh; __entry->dirty_limit = global_wb_domain.dirty_limit; ), TP_printk("dirty=%lu writeback=%lu " "bg_thresh=%lu thresh=%lu limit=%lu " "dirtied=%lu written=%lu", __entry->nr_dirty, __entry->nr_writeback, __entry->background_thresh, __entry->dirty_thresh, __entry->dirty_limit, __entry->nr_dirtied, __entry->nr_written ) ); #define KBps(x) ((x) << (PAGE_SHIFT - 10)) TRACE_EVENT(bdi_dirty_ratelimit, TP_PROTO(struct bdi_writeback *wb, unsigned long dirty_rate, unsigned long task_ratelimit), TP_ARGS(wb, dirty_rate, task_ratelimit), TP_STRUCT__entry( __array(char, bdi, 32) __field(unsigned long, write_bw) __field(unsigned long, avg_write_bw) __field(unsigned long, dirty_rate) __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned long, balanced_dirty_ratelimit) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->write_bw = KBps(wb->write_bandwidth); __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); __entry->dirty_rate = KBps(dirty_rate); __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->balanced_dirty_ratelimit = KBps(wb->balanced_dirty_ratelimit); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: " "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "balanced_dirty_ratelimit=%lu cgroup_ino=%lu", __entry->bdi, __entry->write_bw, /* write bandwidth */ __entry->avg_write_bw, /* avg write bandwidth */ __entry->dirty_rate, /* bdi dirty rate */ __entry->dirty_ratelimit, /* base ratelimit */ __entry->task_ratelimit, /* ratelimit with position control */ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ (unsigned long)__entry->cgroup_ino ) ); TRACE_EVENT(balance_dirty_pages, TP_PROTO(struct bdi_writeback *wb, struct dirty_throttle_control *dtc, unsigned long dirty_ratelimit, unsigned long task_ratelimit, unsigned long dirtied, unsigned long period, long pause, unsigned long start_time), TP_ARGS(wb, dtc, dirty_ratelimit, task_ratelimit, dirtied, period, pause, start_time), TP_STRUCT__entry( __array( char, bdi, 32) __field(unsigned long, limit) __field(unsigned long, setpoint) __field(unsigned long, dirty) __field(unsigned long, wb_setpoint) __field(unsigned long, wb_dirty) __field(unsigned long, dirty_ratelimit) __field(unsigned long, task_ratelimit) __field(unsigned int, dirtied) __field(unsigned int, dirtied_pause) __field(unsigned long, paused) __field( long, pause) __field(unsigned long, period) __field( long, think) __field(ino_t, cgroup_ino) ), TP_fast_assign( unsigned long freerun = (dtc->thresh + dtc->bg_thresh) / 2; strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->limit = dtc->limit; __entry->setpoint = (dtc->limit + freerun) / 2; __entry->dirty = dtc->dirty; __entry->wb_setpoint = __entry->setpoint * dtc->wb_thresh / (dtc->thresh + 1); __entry->wb_dirty = dtc->wb_dirty; __entry->dirty_ratelimit = KBps(dirty_ratelimit); __entry->task_ratelimit = KBps(task_ratelimit); __entry->dirtied = dirtied; __entry->dirtied_pause = current->nr_dirtied_pause; __entry->think = current->dirty_paused_when == 0 ? 0 : (long)(jiffies - current->dirty_paused_when) * 1000/HZ; __entry->period = period * 1000 / HZ; __entry->pause = pause * 1000 / HZ; __entry->paused = (jiffies - start_time) * 1000 / HZ; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: " "limit=%lu setpoint=%lu dirty=%lu " "wb_setpoint=%lu wb_dirty=%lu " "dirty_ratelimit=%lu task_ratelimit=%lu " "dirtied=%u dirtied_pause=%u " "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu", __entry->bdi, __entry->limit, __entry->setpoint, __entry->dirty, __entry->wb_setpoint, __entry->wb_dirty, __entry->dirty_ratelimit, __entry->task_ratelimit, __entry->dirtied, __entry->dirtied_pause, __entry->paused, /* ms */ __entry->pause, /* ms */ __entry->period, /* ms */ __entry->think, /* ms */ (unsigned long)__entry->cgroup_ino ) ); TRACE_EVENT(writeback_sb_inodes_requeue, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); ), TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, (unsigned long)__entry->cgroup_ino ) ); DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned long nr_to_write ), TP_ARGS(inode, wbc, nr_to_write), TP_STRUCT__entry( __array(char, name, 32) __field(ino_t, ino) __field(unsigned long, state) __field(unsigned long, dirtied_when) __field(unsigned long, writeback_index) __field(long, nr_to_write) __field(unsigned long, wrote) __field(ino_t, cgroup_ino) ), TP_fast_assign( strscpy_pad(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; __entry->writeback_index = inode->i_mapping->writeback_index; __entry->nr_to_write = nr_to_write; __entry->wrote = nr_to_write - wbc->nr_to_write; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu", __entry->name, (unsigned long)__entry->ino, show_inode_state(__entry->state), __entry->dirtied_when, (jiffies - __entry->dirtied_when) / HZ, __entry->writeback_index, __entry->nr_to_write, __entry->wrote, (unsigned long)__entry->cgroup_ino ) ); DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned long nr_to_write), TP_ARGS(inode, wbc, nr_to_write) ); DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, TP_PROTO(struct inode *inode, struct writeback_control *wbc, unsigned long nr_to_write), TP_ARGS(inode, wbc, nr_to_write) ); DECLARE_EVENT_CLASS(writeback_inode_template, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field(unsigned long, state ) __field( __u16, mode ) __field(unsigned long, dirtied_when ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->mode = inode->i_mode; __entry->dirtied_when = inode->dirtied_when; ), TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino, __entry->dirtied_when, show_inode_state(__entry->state), __entry->mode) ); DEFINE_EVENT(writeback_inode_template, writeback_lazytime, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); /* * Inode writeback list tracking. */ DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
54 54 338 113 113 111 113 113 156 19 19 19 22 22 22 194 54 195 139 139 18 18 18 18 17 18 16 18 4 4 4 1 1 54 70 54 116 96 17 7 117 117 116 23 23 24 116 94 94 94 94 94 94 94 94 94 16 16 14 13 15 15 2 14 14 2 14 14 14 14 14 14 14 16 14 14 14 99 117 114 95 114 117 117 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 // SPDX-License-Identifier: GPL-2.0 /* * Tty port functions */ #include <linux/types.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/serdev.h> #include "tty.h" static size_t tty_port_default_receive_buf(struct tty_port *port, const u8 *p, const u8 *f, size_t count) { struct tty_struct *tty; struct tty_ldisc *ld; tty = READ_ONCE(port->itty); if (!tty) return 0; ld = tty_ldisc_ref(tty); if (!ld) return 0; count = tty_ldisc_receive_buf(ld, p, f, count); tty_ldisc_deref(ld); return count; } static void tty_port_default_lookahead_buf(struct tty_port *port, const u8 *p, const u8 *f, size_t count) { struct tty_struct *tty; struct tty_ldisc *ld; tty = READ_ONCE(port->itty); if (!tty) return; ld = tty_ldisc_ref(tty); if (!ld) return; if (ld->ops->lookahead_buf) ld->ops->lookahead_buf(ld->tty, p, f, count); tty_ldisc_deref(ld); } static void tty_port_default_wakeup(struct tty_port *port) { struct tty_struct *tty = tty_port_tty_get(port); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } } const struct tty_port_client_operations tty_port_default_client_ops = { .receive_buf = tty_port_default_receive_buf, .lookahead_buf = tty_port_default_lookahead_buf, .write_wakeup = tty_port_default_wakeup, }; EXPORT_SYMBOL_GPL(tty_port_default_client_ops); /** * tty_port_init - initialize tty_port * @port: tty_port to initialize * * Initializes the state of struct tty_port. When a port was initialized using * this function, one has to destroy the port by tty_port_destroy(). Either * indirectly by using &tty_port refcounting (tty_port_put()) or directly if * refcounting is not used. */ void tty_port_init(struct tty_port *port) { memset(port, 0, sizeof(*port)); tty_buffer_init(port); init_waitqueue_head(&port->open_wait); init_waitqueue_head(&port->delta_msr_wait); mutex_init(&port->mutex); mutex_init(&port->buf_mutex); spin_lock_init(&port->lock); port->close_delay = (50 * HZ) / 100; port->closing_wait = (3000 * HZ) / 100; port->client_ops = &tty_port_default_client_ops; kref_init(&port->kref); } EXPORT_SYMBOL(tty_port_init); /** * tty_port_link_device - link tty and tty_port * @port: tty_port of the device * @driver: tty_driver for this device * @index: index of the tty * * Provide the tty layer with a link from a tty (specified by @index) to a * tty_port (@port). Use this only if neither tty_port_register_device() nor * tty_port_install() is used in the driver. If used, this has to be called * before tty_register_driver(). */ void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, unsigned index) { if (WARN_ON(index >= driver->num)) return; driver->ports[index] = port; } EXPORT_SYMBOL_GPL(tty_port_link_device); /** * tty_port_register_device - register tty device * @port: tty_port of the device * @driver: tty_driver for this device * @index: index of the tty * @device: parent if exists, otherwise NULL * * It is the same as tty_register_device() except the provided @port is linked * to a concrete tty specified by @index. Use this or tty_port_install() (or * both). Call tty_port_link_device() as a last resort. */ struct device *tty_port_register_device(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device) { return tty_port_register_device_attr(port, driver, index, device, NULL, NULL); } EXPORT_SYMBOL_GPL(tty_port_register_device); /** * tty_port_register_device_attr - register tty device * @port: tty_port of the device * @driver: tty_driver for this device * @index: index of the tty * @device: parent if exists, otherwise NULL * @drvdata: Driver data to be set to device. * @attr_grp: Attribute group to be set on device. * * It is the same as tty_register_device_attr() except the provided @port is * linked to a concrete tty specified by @index. Use this or tty_port_install() * (or both). Call tty_port_link_device() as a last resort. */ struct device *tty_port_register_device_attr(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp) { tty_port_link_device(port, driver, index); return tty_register_device_attr(driver, index, device, drvdata, attr_grp); } EXPORT_SYMBOL_GPL(tty_port_register_device_attr); /** * tty_port_register_device_attr_serdev - register tty or serdev device * @port: tty_port of the device * @driver: tty_driver for this device * @index: index of the tty * @host: serial port hardware device * @parent: parent if exists, otherwise NULL * @drvdata: driver data for the device * @attr_grp: attribute group for the device * * Register a serdev or tty device depending on if the parent device has any * defined serdev clients or not. */ struct device *tty_port_register_device_attr_serdev(struct tty_port *port, struct tty_driver *driver, unsigned index, struct device *host, struct device *parent, void *drvdata, const struct attribute_group **attr_grp) { struct device *dev; tty_port_link_device(port, driver, index); dev = serdev_tty_port_register(port, host, parent, driver, index); if (PTR_ERR(dev) != -ENODEV) { /* Skip creating cdev if we registered a serdev device */ return dev; } return tty_register_device_attr(driver, index, parent, drvdata, attr_grp); } EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev); /** * tty_port_unregister_device - deregister a tty or serdev device * @port: tty_port of the device * @driver: tty_driver for this device * @index: index of the tty * * If a tty or serdev device is registered with a call to * tty_port_register_device_serdev() then this function must be called when * the device is gone. */ void tty_port_unregister_device(struct tty_port *port, struct tty_driver *driver, unsigned index) { int ret; ret = serdev_tty_port_unregister(port); if (ret == 0) return; tty_unregister_device(driver, index); } EXPORT_SYMBOL_GPL(tty_port_unregister_device); int tty_port_alloc_xmit_buf(struct tty_port *port) { /* We may sleep in get_zeroed_page() */ mutex_lock(&port->buf_mutex); if (port->xmit_buf == NULL) { port->xmit_buf = (u8 *)get_zeroed_page(GFP_KERNEL); if (port->xmit_buf) kfifo_init(&port->xmit_fifo, port->xmit_buf, PAGE_SIZE); } mutex_unlock(&port->buf_mutex); if (port->xmit_buf == NULL) return -ENOMEM; return 0; } EXPORT_SYMBOL(tty_port_alloc_xmit_buf); void tty_port_free_xmit_buf(struct tty_port *port) { mutex_lock(&port->buf_mutex); free_page((unsigned long)port->xmit_buf); port->xmit_buf = NULL; INIT_KFIFO(port->xmit_fifo); mutex_unlock(&port->buf_mutex); } EXPORT_SYMBOL(tty_port_free_xmit_buf); /** * tty_port_destroy - destroy inited port * @port: tty port to be destroyed * * When a port was initialized using tty_port_init(), one has to destroy the * port by this function. Either indirectly by using &tty_port refcounting * (tty_port_put()) or directly if refcounting is not used. */ void tty_port_destroy(struct tty_port *port) { tty_buffer_cancel_work(port); tty_buffer_free_all(port); } EXPORT_SYMBOL(tty_port_destroy); static void tty_port_destructor(struct kref *kref) { struct tty_port *port = container_of(kref, struct tty_port, kref); /* check if last port ref was dropped before tty release */ if (WARN_ON(port->itty)) return; free_page((unsigned long)port->xmit_buf); tty_port_destroy(port); if (port->ops && port->ops->destruct) port->ops->destruct(port); else kfree(port); } /** * tty_port_put - drop a reference to tty_port * @port: port to drop a reference of (can be NULL) * * The final put will destroy and free up the @port using * @port->ops->destruct() hook, or using kfree() if not provided. */ void tty_port_put(struct tty_port *port) { if (port) kref_put(&port->kref, tty_port_destructor); } EXPORT_SYMBOL(tty_port_put); /** * tty_port_tty_get - get a tty reference * @port: tty port * * Return a refcount protected tty instance or %NULL if the port is not * associated with a tty (eg due to close or hangup). */ struct tty_struct *tty_port_tty_get(struct tty_port *port) { unsigned long flags; struct tty_struct *tty; spin_lock_irqsave(&port->lock, flags); tty = tty_kref_get(port->tty); spin_unlock_irqrestore(&port->lock, flags); return tty; } EXPORT_SYMBOL(tty_port_tty_get); /** * tty_port_tty_set - set the tty of a port * @port: tty port * @tty: the tty * * Associate the port and tty pair. Manages any internal refcounts. Pass %NULL * to deassociate a port. */ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); tty_kref_put(port->tty); port->tty = tty_kref_get(tty); spin_unlock_irqrestore(&port->lock, flags); } EXPORT_SYMBOL(tty_port_tty_set); /** * tty_port_shutdown - internal helper to shutdown the device * @port: tty port to be shut down * @tty: the associated tty * * It is used by tty_port_hangup() and tty_port_close(). Its task is to * shutdown the device if it was initialized (note consoles remain * functioning). It lowers DTR/RTS (if @tty has HUPCL set) and invokes * @port->ops->shutdown(). */ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty) { mutex_lock(&port->mutex); if (port->console) goto out; if (tty_port_initialized(port)) { tty_port_set_initialized(port, false); /* * Drop DTR/RTS if HUPCL is set. This causes any attached * modem to hang up the line. */ if (tty && C_HUPCL(tty)) tty_port_lower_dtr_rts(port); if (port->ops->shutdown) port->ops->shutdown(port); } out: mutex_unlock(&port->mutex); } /** * tty_port_hangup - hangup helper * @port: tty port * * Perform port level tty hangup flag and count changes. Drop the tty * reference. * * Caller holds tty lock. */ void tty_port_hangup(struct tty_port *port) { struct tty_struct *tty; unsigned long flags; spin_lock_irqsave(&port->lock, flags); port->count = 0; tty = port->tty; if (tty) set_bit(TTY_IO_ERROR, &tty->flags); port->tty = NULL; spin_unlock_irqrestore(&port->lock, flags); tty_port_set_active(port, false); tty_port_shutdown(port, tty); tty_kref_put(tty); wake_up_interruptible(&port->open_wait); wake_up_interruptible(&port->delta_msr_wait); } EXPORT_SYMBOL(tty_port_hangup); /** * tty_port_tty_hangup - helper to hang up a tty * @port: tty port * @check_clocal: hang only ttys with %CLOCAL unset? */ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal) { struct tty_struct *tty = tty_port_tty_get(port); if (tty && (!check_clocal || !C_CLOCAL(tty))) tty_hangup(tty); tty_kref_put(tty); } EXPORT_SYMBOL_GPL(tty_port_tty_hangup); /** * tty_port_tty_wakeup - helper to wake up a tty * @port: tty port */ void tty_port_tty_wakeup(struct tty_port *port) { port->client_ops->write_wakeup(port); } EXPORT_SYMBOL_GPL(tty_port_tty_wakeup); /** * tty_port_carrier_raised - carrier raised check * @port: tty port * * Wrapper for the carrier detect logic. For the moment this is used * to hide some internal details. This will eventually become entirely * internal to the tty port. */ bool tty_port_carrier_raised(struct tty_port *port) { if (port->ops->carrier_raised == NULL) return true; return port->ops->carrier_raised(port); } EXPORT_SYMBOL(tty_port_carrier_raised); /** * tty_port_raise_dtr_rts - Raise DTR/RTS * @port: tty port * * Wrapper for the DTR/RTS raise logic. For the moment this is used to hide * some internal details. This will eventually become entirely internal to the * tty port. */ void tty_port_raise_dtr_rts(struct tty_port *port) { if (port->ops->dtr_rts) port->ops->dtr_rts(port, true); } EXPORT_SYMBOL(tty_port_raise_dtr_rts); /** * tty_port_lower_dtr_rts - Lower DTR/RTS * @port: tty port * * Wrapper for the DTR/RTS raise logic. For the moment this is used to hide * some internal details. This will eventually become entirely internal to the * tty port. */ void tty_port_lower_dtr_rts(struct tty_port *port) { if (port->ops->dtr_rts) port->ops->dtr_rts(port, false); } EXPORT_SYMBOL(tty_port_lower_dtr_rts); /** * tty_port_block_til_ready - Waiting logic for tty open * @port: the tty port being opened * @tty: the tty device being bound * @filp: the file pointer of the opener or %NULL * * Implement the core POSIX/SuS tty behaviour when opening a tty device. * Handles: * * - hangup (both before and during) * - non blocking open * - rts/dtr/dcd * - signals * - port flags and counts * * The passed @port must implement the @port->ops->carrier_raised method if it * can do carrier detect and the @port->ops->dtr_rts method if it supports * software management of these lines. Note that the dtr/rts raise is done each * iteration as a hangup may have previously dropped them while we wait. * * Caller holds tty lock. * * Note: May drop and reacquire tty lock when blocking, so @tty and @port may * have changed state (eg., may have been hung up). */ int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, struct file *filp) { int do_clocal = 0, retval; unsigned long flags; DEFINE_WAIT(wait); /* if non-blocking mode is set we can pass directly to open unless * the port has just hung up or is in another error state. */ if (tty_io_error(tty)) { tty_port_set_active(port, true); return 0; } if (filp == NULL || (filp->f_flags & O_NONBLOCK)) { /* Indicate we are open */ if (C_BAUD(tty)) tty_port_raise_dtr_rts(port); tty_port_set_active(port, true); return 0; } if (C_CLOCAL(tty)) do_clocal = 1; /* Block waiting until we can proceed. We may need to wait for the * carrier, but we must also wait for any close that is in progress * before the next open may complete. */ retval = 0; /* The port lock protects the port counts */ spin_lock_irqsave(&port->lock, flags); port->count--; port->blocked_open++; spin_unlock_irqrestore(&port->lock, flags); while (1) { /* Indicate we are open */ if (C_BAUD(tty) && tty_port_initialized(port)) tty_port_raise_dtr_rts(port); prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE); /* Check for a hangup or uninitialised port. * Return accordingly. */ if (tty_hung_up_p(filp) || !tty_port_initialized(port)) { if (port->flags & ASYNC_HUP_NOTIFY) retval = -EAGAIN; else retval = -ERESTARTSYS; break; } /* * Probe the carrier. For devices with no carrier detect * tty_port_carrier_raised will always return true. * Never ask drivers if CLOCAL is set, this causes troubles * on some hardware. */ if (do_clocal || tty_port_carrier_raised(port)) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } tty_unlock(tty); schedule(); tty_lock(tty); } finish_wait(&port->open_wait, &wait); /* Update counts. A parallel hangup will have set count to zero and * we must not mess that up further. */ spin_lock_irqsave(&port->lock, flags); if (!tty_hung_up_p(filp)) port->count++; port->blocked_open--; spin_unlock_irqrestore(&port->lock, flags); if (retval == 0) tty_port_set_active(port, true); return retval; } EXPORT_SYMBOL(tty_port_block_til_ready); static void tty_port_drain_delay(struct tty_port *port, struct tty_struct *tty) { unsigned int bps = tty_get_baud_rate(tty); long timeout; if (bps > 1200) { timeout = (HZ * 10 * port->drain_delay) / bps; timeout = max_t(long, timeout, HZ / 10); } else { timeout = 2 * HZ; } schedule_timeout_interruptible(timeout); } /** * tty_port_close_start - helper for tty->ops->close, part 1/2 * @port: tty_port of the device * @tty: tty being closed * @filp: passed file pointer * * Decrements and checks open count. Flushes the port if this is the last * close. That means, dropping the data from the outpu buffer on the device and * waiting for sending logic to finish. The rest of close handling is performed * in tty_port_close_end(). * * Locking: Caller holds tty lock. * * Return: 1 if this is the last close, otherwise 0 */ int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct file *filp) { unsigned long flags; if (tty_hung_up_p(filp)) return 0; spin_lock_irqsave(&port->lock, flags); if (tty->count == 1 && port->count != 1) { tty_warn(tty, "%s: tty->count = 1 port count = %d\n", __func__, port->count); port->count = 1; } if (--port->count < 0) { tty_warn(tty, "%s: bad port count (%d)\n", __func__, port->count); port->count = 0; } if (port->count) { spin_unlock_irqrestore(&port->lock, flags); return 0; } spin_unlock_irqrestore(&port->lock, flags); tty->closing = 1; if (tty_port_initialized(port)) { /* Don't block on a stalled port, just pull the chain */ if (tty->flow.tco_stopped) tty_driver_flush_buffer(tty); if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, port->closing_wait); if (port->drain_delay) tty_port_drain_delay(port, tty); } /* Flush the ldisc buffering */ tty_ldisc_flush(tty); /* Report to caller this is the last port reference */ return 1; } EXPORT_SYMBOL(tty_port_close_start); /** * tty_port_close_end - helper for tty->ops->close, part 2/2 * @port: tty_port of the device * @tty: tty being closed * * This is a continuation of the first part: tty_port_close_start(). This * should be called after turning off the device. It flushes the data from the * line discipline and delays the close by @port->close_delay. * * Locking: Caller holds tty lock. */ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty) { unsigned long flags; tty_ldisc_flush(tty); tty->closing = 0; spin_lock_irqsave(&port->lock, flags); if (port->blocked_open) { spin_unlock_irqrestore(&port->lock, flags); if (port->close_delay) msleep_interruptible(jiffies_to_msecs(port->close_delay)); spin_lock_irqsave(&port->lock, flags); wake_up_interruptible(&port->open_wait); } spin_unlock_irqrestore(&port->lock, flags); tty_port_set_active(port, false); } EXPORT_SYMBOL(tty_port_close_end); /** * tty_port_close - generic tty->ops->close handler * @port: tty_port of the device * @tty: tty being closed * @filp: passed file pointer * * It is a generic helper to be used in driver's @tty->ops->close. It wraps a * sequence of tty_port_close_start(), tty_port_shutdown(), and * tty_port_close_end(). The latter two are called only if this is the last * close. See the respective functions for the details. * * Locking: Caller holds tty lock */ void tty_port_close(struct tty_port *port, struct tty_struct *tty, struct file *filp) { if (tty_port_close_start(port, tty, filp) == 0) return; tty_port_shutdown(port, tty); if (!port->console) set_bit(TTY_IO_ERROR, &tty->flags); tty_port_close_end(port, tty); tty_port_tty_set(port, NULL); } EXPORT_SYMBOL(tty_port_close); /** * tty_port_install - generic tty->ops->install handler * @port: tty_port of the device * @driver: tty_driver for this device * @tty: tty to be installed * * It is the same as tty_standard_install() except the provided @port is linked * to a concrete tty specified by @tty. Use this or tty_port_register_device() * (or both). Call tty_port_link_device() as a last resort. */ int tty_port_install(struct tty_port *port, struct tty_driver *driver, struct tty_struct *tty) { tty->port = port; return tty_standard_install(driver, tty); } EXPORT_SYMBOL_GPL(tty_port_install); /** * tty_port_open - generic tty->ops->open handler * @port: tty_port of the device * @tty: tty to be opened * @filp: passed file pointer * * It is a generic helper to be used in driver's @tty->ops->open. It activates * the devices using @port->ops->activate if not active already. And waits for * the device to be ready using tty_port_block_til_ready() (e.g. raises * DTR/CTS and waits for carrier). * * Note that @port->ops->shutdown is not called when @port->ops->activate * returns an error (on the contrary, @tty->ops->close is). * * Locking: Caller holds tty lock. * * Note: may drop and reacquire tty lock (in tty_port_block_til_ready()) so * @tty and @port may have changed state (eg., may be hung up now). */ int tty_port_open(struct tty_port *port, struct tty_struct *tty, struct file *filp) { spin_lock_irq(&port->lock); ++port->count; spin_unlock_irq(&port->lock); tty_port_tty_set(port, tty); /* * Do the device-specific open only if the hardware isn't * already initialized. Serialize open and shutdown using the * port mutex. */ mutex_lock(&port->mutex); if (!tty_port_initialized(port)) { clear_bit(TTY_IO_ERROR, &tty->flags); if (port->ops->activate) { int retval = port->ops->activate(port, tty); if (retval) { mutex_unlock(&port->mutex); return retval; } } tty_port_set_initialized(port, true); } mutex_unlock(&port->mutex); return tty_port_block_til_ready(port, tty, filp); } EXPORT_SYMBOL(tty_port_open);
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 // SPDX-License-Identifier: GPL-2.0+ /* * usbdux.c * Copyright (C) 2003-2014 Bernd Porr, mail@berndporr.me.uk */ /* * Driver: usbdux * Description: University of Stirling USB DAQ & INCITE Technology Limited * Devices: [ITL] USB-DUX (usbdux) * Author: Bernd Porr <mail@berndporr.me.uk> * Updated: 10 Oct 2014 * Status: Stable * * Connection scheme for the counter at the digital port: * 0=/CLK0, 1=UP/DOWN0, 2=RESET0, 4=/CLK1, 5=UP/DOWN1, 6=RESET1. * The sampling rate of the counter is approximately 500Hz. * * Note that under USB2.0 the length of the channel list determines * the max sampling rate. If you sample only one channel you get 8kHz * sampling rate. If you sample two channels you get 4kHz and so on. */ /* * I must give credit here to Chris Baugher who * wrote the driver for AT-MIO-16d. I used some parts of this * driver. I also must give credits to David Brownell * who supported me with the USB development. * * Bernd Porr * * * Revision history: * 0.94: D/A output should work now with any channel list combinations * 0.95: .owner commented out for kernel vers below 2.4.19 * sanity checks in ai/ao_cmd * 0.96: trying to get it working with 2.6, moved all memory alloc to comedi's * attach final USB IDs * moved memory allocation completely to the corresponding comedi * functions firmware upload is by fxload and no longer by comedi (due to * enumeration) * 0.97: USB IDs received, adjusted table * 0.98: SMP, locking, memory alloc: moved all usb memory alloc * to the usb subsystem and moved all comedi related memory * alloc to comedi. * | kernel | registration | usbdux-usb | usbdux-comedi | comedi | * 0.99: USB 2.0: changed protocol to isochronous transfer * IRQ transfer is too buggy and too risky in 2.0 * for the high speed ISO transfer is now a working version * available * 0.99b: Increased the iso transfer buffer for high sp.to 10 buffers. Some VIA * chipsets miss out IRQs. Deeper buffering is needed. * 1.00: full USB 2.0 support for the A/D converter. Now: max 8kHz sampling * rate. * Firmware vers 1.00 is needed for this. * Two 16 bit up/down/reset counter with a sampling rate of 1kHz * And loads of cleaning up, in particular streamlining the * bulk transfers. * 1.1: moved EP4 transfers to EP1 to make space for a PWM output on EP4 * 1.2: added PWM support via EP4 * 2.0: PWM seems to be stable and is not interfering with the other functions * 2.1: changed PWM API * 2.2: added firmware kernel request to fix an udev problem * 2.3: corrected a bug in bulk timeouts which were far too short * 2.4: fixed a bug which causes the driver to hang when it ran out of data. * Thanks to Jan-Matthias Braun and Ian to spot the bug and fix it. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/fcntl.h> #include <linux/compiler.h> #include <linux/comedi/comedi_usb.h> /* constants for firmware upload and download */ #define USBDUX_FIRMWARE "usbdux_firmware.bin" #define USBDUX_FIRMWARE_MAX_LEN 0x2000 #define USBDUX_FIRMWARE_CMD 0xa0 #define VENDOR_DIR_IN 0xc0 #define VENDOR_DIR_OUT 0x40 #define USBDUX_CPU_CS 0xe600 /* usbdux bulk transfer commands */ #define USBDUX_CMD_MULT_AI 0 #define USBDUX_CMD_AO 1 #define USBDUX_CMD_DIO_CFG 2 #define USBDUX_CMD_DIO_BITS 3 #define USBDUX_CMD_SINGLE_AI 4 #define USBDUX_CMD_TIMER_RD 5 #define USBDUX_CMD_TIMER_WR 6 #define USBDUX_CMD_PWM_ON 7 #define USBDUX_CMD_PWM_OFF 8 /* timeout for the USB-transfer in ms */ #define BULK_TIMEOUT 1000 /* 300Hz max frequ under PWM */ #define MIN_PWM_PERIOD ((long)(1E9 / 300)) /* Default PWM frequency */ #define PWM_DEFAULT_PERIOD ((long)(1E9 / 100)) /* Size of one A/D value */ #define SIZEADIN ((sizeof(u16))) /* * Size of the input-buffer IN BYTES * Always multiple of 8 for 8 microframes which is needed in the highspeed mode */ #define SIZEINBUF (8 * SIZEADIN) /* 16 bytes. */ #define SIZEINSNBUF 16 /* size of one value for the D/A converter: channel and value */ #define SIZEDAOUT ((sizeof(u8) + sizeof(u16))) /* * Size of the output-buffer in bytes * Actually only the first 4 triplets are used but for the * high speed mode we need to pad it to 8 (microframes). */ #define SIZEOUTBUF (8 * SIZEDAOUT) /* * Size of the buffer for the dux commands: just now max size is determined * by the analogue out + command byte + panic bytes... */ #define SIZEOFDUXBUFFER (8 * SIZEDAOUT + 2) /* Number of in-URBs which receive the data: min=2 */ #define NUMOFINBUFFERSFULL 5 /* Number of out-URBs which send the data: min=2 */ #define NUMOFOUTBUFFERSFULL 5 /* Number of in-URBs which receive the data: min=5 */ /* must have more buffers due to buggy USB ctr */ #define NUMOFINBUFFERSHIGH 10 /* Number of out-URBs which send the data: min=5 */ /* must have more buffers due to buggy USB ctr */ #define NUMOFOUTBUFFERSHIGH 10 /* number of retries to get the right dux command */ #define RETRIES 10 static const struct comedi_lrange range_usbdux_ai_range = { 4, { BIP_RANGE(4.096), BIP_RANGE(4.096 / 2), UNI_RANGE(4.096), UNI_RANGE(4.096 / 2) } }; static const struct comedi_lrange range_usbdux_ao_range = { 2, { BIP_RANGE(4.096), UNI_RANGE(4.096) } }; struct usbdux_private { /* actual number of in-buffers */ int n_ai_urbs; /* actual number of out-buffers */ int n_ao_urbs; /* ISO-transfer handling: buffers */ struct urb **ai_urbs; struct urb **ao_urbs; /* pwm-transfer handling */ struct urb *pwm_urb; /* PWM period */ unsigned int pwm_period; /* PWM internal delay for the GPIF in the FX2 */ u8 pwm_delay; /* size of the PWM buffer which holds the bit pattern */ int pwm_buf_sz; /* input buffer for the ISO-transfer */ __le16 *in_buf; /* input buffer for single insn */ __le16 *insn_buf; unsigned int high_speed:1; unsigned int ai_cmd_running:1; unsigned int ao_cmd_running:1; unsigned int pwm_cmd_running:1; /* time between samples in units of the timer */ unsigned int ai_timer; unsigned int ao_timer; /* counter between aquisitions */ unsigned int ai_counter; unsigned int ao_counter; /* interval in frames/uframes */ unsigned int ai_interval; /* commands */ u8 *dux_commands; struct mutex mut; }; static void usbdux_unlink_urbs(struct urb **urbs, int num_urbs) { int i; for (i = 0; i < num_urbs; i++) usb_kill_urb(urbs[i]); } static void usbdux_ai_stop(struct comedi_device *dev, int do_unlink) { struct usbdux_private *devpriv = dev->private; if (do_unlink && devpriv->ai_urbs) usbdux_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs); devpriv->ai_cmd_running = 0; } static int usbdux_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbdux_private *devpriv = dev->private; /* prevent other CPUs from submitting new commands just now */ mutex_lock(&devpriv->mut); /* unlink only if the urb really has been submitted */ usbdux_ai_stop(dev, devpriv->ai_cmd_running); mutex_unlock(&devpriv->mut); return 0; } static void usbduxsub_ai_handle_urb(struct comedi_device *dev, struct comedi_subdevice *s, struct urb *urb) { struct usbdux_private *devpriv = dev->private; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; int ret; int i; devpriv->ai_counter--; if (devpriv->ai_counter == 0) { devpriv->ai_counter = devpriv->ai_timer; /* get the data from the USB bus and hand it over to comedi */ for (i = 0; i < cmd->chanlist_len; i++) { unsigned int range = CR_RANGE(cmd->chanlist[i]); u16 val = le16_to_cpu(devpriv->in_buf[i]); /* bipolar data is two's-complement */ if (comedi_range_is_bipolar(s, range)) val = comedi_offset_munge(s, val); /* transfer data */ if (!comedi_buf_write_samples(s, &val, 1)) return; } if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) async->events |= COMEDI_CB_EOA; } /* if command is still running, resubmit urb */ if (!(async->events & COMEDI_CB_CANCEL_MASK)) { urb->dev = comedi_to_usb_dev(dev); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(dev->class_dev, "urb resubmit failed in int-context! err=%d\n", ret); if (ret == -EL2NSYNC) dev_err(dev->class_dev, "buggy USB host controller or bug in IRQ handler!\n"); async->events |= COMEDI_CB_ERROR; } } } static void usbduxsub_ai_isoc_irq(struct urb *urb) { struct comedi_device *dev = urb->context; struct comedi_subdevice *s = dev->read_subdev; struct comedi_async *async = s->async; struct usbdux_private *devpriv = dev->private; /* exit if not running a command, do not resubmit urb */ if (!devpriv->ai_cmd_running) return; switch (urb->status) { case 0: /* copy the result in the transfer buffer */ memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF); usbduxsub_ai_handle_urb(dev, s, urb); break; case -EILSEQ: /* * error in the ISOchronous data * we don't copy the data into the transfer buffer * and recycle the last data byte */ dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n"); usbduxsub_ai_handle_urb(dev, s, urb); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* after an unlink command, unplug, ... etc */ async->events |= COMEDI_CB_ERROR; break; default: /* a real error */ dev_err(dev->class_dev, "Non-zero urb status received in ai intr context: %d\n", urb->status); async->events |= COMEDI_CB_ERROR; break; } /* * comedi_handle_events() cannot be used in this driver. The (*cancel) * operation would unlink the urb. */ if (async->events & COMEDI_CB_CANCEL_MASK) usbdux_ai_stop(dev, 0); comedi_event(dev, s); } static void usbdux_ao_stop(struct comedi_device *dev, int do_unlink) { struct usbdux_private *devpriv = dev->private; if (do_unlink && devpriv->ao_urbs) usbdux_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs); devpriv->ao_cmd_running = 0; } static int usbdux_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbdux_private *devpriv = dev->private; /* prevent other CPUs from submitting a command just now */ mutex_lock(&devpriv->mut); /* unlink only if it is really running */ usbdux_ao_stop(dev, devpriv->ao_cmd_running); mutex_unlock(&devpriv->mut); return 0; } static void usbduxsub_ao_handle_urb(struct comedi_device *dev, struct comedi_subdevice *s, struct urb *urb) { struct usbdux_private *devpriv = dev->private; struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; u8 *datap; int ret; int i; devpriv->ao_counter--; if (devpriv->ao_counter == 0) { devpriv->ao_counter = devpriv->ao_timer; if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg) { async->events |= COMEDI_CB_EOA; return; } /* transmit data to the USB bus */ datap = urb->transfer_buffer; *datap++ = cmd->chanlist_len; for (i = 0; i < cmd->chanlist_len; i++) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); unsigned short val; if (!comedi_buf_read_samples(s, &val, 1)) { dev_err(dev->class_dev, "buffer underflow\n"); async->events |= COMEDI_CB_OVERFLOW; return; } /* pointer to the DA */ *datap++ = val & 0xff; *datap++ = (val >> 8) & 0xff; *datap++ = chan << 6; s->readback[chan] = val; } } /* if command is still running, resubmit urb for BULK transfer */ if (!(async->events & COMEDI_CB_CANCEL_MASK)) { urb->transfer_buffer_length = SIZEOUTBUF; urb->dev = comedi_to_usb_dev(dev); urb->status = 0; if (devpriv->high_speed) urb->interval = 8; /* uframes */ else urb->interval = 1; /* frames */ urb->number_of_packets = 1; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEOUTBUF; urb->iso_frame_desc[0].status = 0; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(dev->class_dev, "ao urb resubm failed in int-cont. ret=%d", ret); if (ret == -EL2NSYNC) dev_err(dev->class_dev, "buggy USB host controller or bug in IRQ handling!\n"); async->events |= COMEDI_CB_ERROR; } } } static void usbduxsub_ao_isoc_irq(struct urb *urb) { struct comedi_device *dev = urb->context; struct comedi_subdevice *s = dev->write_subdev; struct comedi_async *async = s->async; struct usbdux_private *devpriv = dev->private; /* exit if not running a command, do not resubmit urb */ if (!devpriv->ao_cmd_running) return; switch (urb->status) { case 0: usbduxsub_ao_handle_urb(dev, s, urb); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* after an unlink command, unplug, ... etc */ async->events |= COMEDI_CB_ERROR; break; default: /* a real error */ dev_err(dev->class_dev, "Non-zero urb status received in ao intr context: %d\n", urb->status); async->events |= COMEDI_CB_ERROR; break; } /* * comedi_handle_events() cannot be used in this driver. The (*cancel) * operation would unlink the urb. */ if (async->events & COMEDI_CB_CANCEL_MASK) usbdux_ao_stop(dev, 0); comedi_event(dev, s); } static int usbdux_submit_urbs(struct comedi_device *dev, struct urb **urbs, int num_urbs, int input_urb) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbdux_private *devpriv = dev->private; struct urb *urb; int ret; int i; /* Submit all URBs and start the transfer on the bus */ for (i = 0; i < num_urbs; i++) { urb = urbs[i]; /* in case of a resubmission after an unlink... */ if (input_urb) urb->interval = devpriv->ai_interval; urb->context = dev; urb->dev = usb; urb->status = 0; urb->transfer_flags = URB_ISO_ASAP; ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) return ret; } return 0; } static int usbdux_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct usbdux_private *devpriv = dev->private; int err = 0; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT); err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER); err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->start_src); err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); if (cmd->scan_begin_src == TRIG_TIMER) { /* full speed does 1kHz scans every USB frame */ unsigned int arg = 1000000; unsigned int min_arg = arg; if (devpriv->high_speed) { /* * In high speed mode microframes are possible. * However, during one microframe we can roughly * sample one channel. Thus, the more channels * are in the channel list the more time we need. */ int i = 1; /* find a power of 2 for the number of channels */ while (i < cmd->chanlist_len) i = i * 2; arg /= 8; min_arg = arg * i; } err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, min_arg); /* calc the real sampling rate with the rounding errors */ arg = (cmd->scan_begin_arg / arg) * arg; err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg); } err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; return 0; } /* * creates the ADC command for the MAX1271 * range is the range value from comedi */ static u8 create_adc_command(unsigned int chan, unsigned int range) { u8 p = (range <= 1); u8 r = ((range % 2) == 0); return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3); } static int send_dux_commands(struct comedi_device *dev, unsigned int cmd_type) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbdux_private *devpriv = dev->private; int nsent; devpriv->dux_commands[0] = cmd_type; return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1), devpriv->dux_commands, SIZEOFDUXBUFFER, &nsent, BULK_TIMEOUT); } static int receive_dux_commands(struct comedi_device *dev, unsigned int command) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbdux_private *devpriv = dev->private; int ret; int nrec; int i; for (i = 0; i < RETRIES; i++) { ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8), devpriv->insn_buf, SIZEINSNBUF, &nrec, BULK_TIMEOUT); if (ret < 0) return ret; if (le16_to_cpu(devpriv->insn_buf[0]) == command) return ret; } /* command not received */ return -EFAULT; } static int usbdux_ai_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct usbdux_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int ret; if (trig_num != cmd->start_arg) return -EINVAL; mutex_lock(&devpriv->mut); if (!devpriv->ai_cmd_running) { devpriv->ai_cmd_running = 1; ret = usbdux_submit_urbs(dev, devpriv->ai_urbs, devpriv->n_ai_urbs, 1); if (ret < 0) { devpriv->ai_cmd_running = 0; goto ai_trig_exit; } s->async->inttrig = NULL; } else { ret = -EBUSY; } ai_trig_exit: mutex_unlock(&devpriv->mut); return ret; } static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbdux_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int len = cmd->chanlist_len; int ret = -EBUSY; int i; /* block other CPUs from starting an ai_cmd */ mutex_lock(&devpriv->mut); if (devpriv->ai_cmd_running) goto ai_cmd_exit; devpriv->dux_commands[1] = len; for (i = 0; i < len; ++i) { unsigned int chan = CR_CHAN(cmd->chanlist[i]); unsigned int range = CR_RANGE(cmd->chanlist[i]); devpriv->dux_commands[i + 2] = create_adc_command(chan, range); } ret = send_dux_commands(dev, USBDUX_CMD_MULT_AI); if (ret < 0) goto ai_cmd_exit; if (devpriv->high_speed) { /* * every channel gets a time window of 125us. Thus, if we * sample all 8 channels we need 1ms. If we sample only one * channel we need only 125us */ devpriv->ai_interval = 1; /* find a power of 2 for the interval */ while (devpriv->ai_interval < len) devpriv->ai_interval *= 2; devpriv->ai_timer = cmd->scan_begin_arg / (125000 * devpriv->ai_interval); } else { /* interval always 1ms */ devpriv->ai_interval = 1; devpriv->ai_timer = cmd->scan_begin_arg / 1000000; } if (devpriv->ai_timer < 1) { ret = -EINVAL; goto ai_cmd_exit; } devpriv->ai_counter = devpriv->ai_timer; if (cmd->start_src == TRIG_NOW) { /* enable this acquisition operation */ devpriv->ai_cmd_running = 1; ret = usbdux_submit_urbs(dev, devpriv->ai_urbs, devpriv->n_ai_urbs, 1); if (ret < 0) { devpriv->ai_cmd_running = 0; /* fixme: unlink here?? */ goto ai_cmd_exit; } s->async->inttrig = NULL; } else { /* TRIG_INT */ /* don't enable the acquision operation */ /* wait for an internal signal */ s->async->inttrig = usbdux_ai_inttrig; } ai_cmd_exit: mutex_unlock(&devpriv->mut); return ret; } /* Mode 0 is used to get a single conversion on demand */ static int usbdux_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); unsigned int range = CR_RANGE(insn->chanspec); unsigned int val; int ret = -EBUSY; int i; mutex_lock(&devpriv->mut); if (devpriv->ai_cmd_running) goto ai_read_exit; /* set command for the first channel */ devpriv->dux_commands[1] = create_adc_command(chan, range); /* adc commands */ ret = send_dux_commands(dev, USBDUX_CMD_SINGLE_AI); if (ret < 0) goto ai_read_exit; for (i = 0; i < insn->n; i++) { ret = receive_dux_commands(dev, USBDUX_CMD_SINGLE_AI); if (ret < 0) goto ai_read_exit; val = le16_to_cpu(devpriv->insn_buf[1]); /* bipolar data is two's-complement */ if (comedi_range_is_bipolar(s, range)) val = comedi_offset_munge(s, val); data[i] = val; } ai_read_exit: mutex_unlock(&devpriv->mut); return ret ? ret : insn->n; } static int usbdux_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; int ret; mutex_lock(&devpriv->mut); ret = comedi_readback_insn_read(dev, s, insn, data); mutex_unlock(&devpriv->mut); return ret; } static int usbdux_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); __le16 *p = (__le16 *)&devpriv->dux_commands[2]; int ret = -EBUSY; int i; mutex_lock(&devpriv->mut); if (devpriv->ao_cmd_running) goto ao_write_exit; /* number of channels: 1 */ devpriv->dux_commands[1] = 1; /* channel number */ devpriv->dux_commands[4] = chan << 6; for (i = 0; i < insn->n; i++) { unsigned int val = data[i]; /* one 16 bit value */ *p = cpu_to_le16(val); ret = send_dux_commands(dev, USBDUX_CMD_AO); if (ret < 0) goto ao_write_exit; s->readback[chan] = val; } ao_write_exit: mutex_unlock(&devpriv->mut); return ret ? ret : insn->n; } static int usbdux_ao_inttrig(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trig_num) { struct usbdux_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int ret; if (trig_num != cmd->start_arg) return -EINVAL; mutex_lock(&devpriv->mut); if (!devpriv->ao_cmd_running) { devpriv->ao_cmd_running = 1; ret = usbdux_submit_urbs(dev, devpriv->ao_urbs, devpriv->n_ao_urbs, 0); if (ret < 0) { devpriv->ao_cmd_running = 0; goto ao_trig_exit; } s->async->inttrig = NULL; } else { ret = -EBUSY; } ao_trig_exit: mutex_unlock(&devpriv->mut); return ret; } static int usbdux_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; unsigned int flags; /* Step 1 : check if triggers are trivially valid */ err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT); if (0) { /* (devpriv->high_speed) */ /* the sampling rate is set by the coversion rate */ flags = TRIG_FOLLOW; } else { /* start a new scan (output at once) with a timer */ flags = TRIG_TIMER; } err |= comedi_check_trigger_src(&cmd->scan_begin_src, flags); if (0) { /* (devpriv->high_speed) */ /* * in usb-2.0 only one conversion it transmitted * but with 8kHz/n */ flags = TRIG_TIMER; } else { /* * all conversion events happen simultaneously with * a rate of 1kHz/n */ flags = TRIG_NOW; } err |= comedi_check_trigger_src(&cmd->convert_src, flags); err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= comedi_check_trigger_is_unique(cmd->start_src); err |= comedi_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0); if (cmd->scan_begin_src == TRIG_FOLLOW) /* internal trigger */ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0); if (cmd->scan_begin_src == TRIG_TIMER) { err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000); } /* not used now, is for later use */ if (cmd->convert_src == TRIG_TIMER) err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 125000); err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1); else /* TRIG_NONE */ err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; return 0; } static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbdux_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int ret = -EBUSY; mutex_lock(&devpriv->mut); if (devpriv->ao_cmd_running) goto ao_cmd_exit; /* we count in steps of 1ms (125us) */ /* 125us mode not used yet */ if (0) { /* (devpriv->high_speed) */ /* 125us */ /* timing of the conversion itself: every 125 us */ devpriv->ao_timer = cmd->convert_arg / 125000; } else { /* 1ms */ /* timing of the scan: we get all channels at once */ devpriv->ao_timer = cmd->scan_begin_arg / 1000000; if (devpriv->ao_timer < 1) { ret = -EINVAL; goto ao_cmd_exit; } } devpriv->ao_counter = devpriv->ao_timer; if (cmd->start_src == TRIG_NOW) { /* enable this acquisition operation */ devpriv->ao_cmd_running = 1; ret = usbdux_submit_urbs(dev, devpriv->ao_urbs, devpriv->n_ao_urbs, 0); if (ret < 0) { devpriv->ao_cmd_running = 0; /* fixme: unlink here?? */ goto ao_cmd_exit; } s->async->inttrig = NULL; } else { /* TRIG_INT */ /* submit the urbs later */ /* wait for an internal signal */ s->async->inttrig = usbdux_ao_inttrig; } ao_cmd_exit: mutex_unlock(&devpriv->mut); return ret; } static int usbdux_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int ret; ret = comedi_dio_insn_config(dev, s, insn, data, 0); if (ret) return ret; /* * We don't tell the firmware here as it would take 8 frames * to submit the information. We do it in the insn_bits. */ return insn->n; } static int usbdux_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; int ret; mutex_lock(&devpriv->mut); comedi_dio_update_state(s, data); /* Always update the hardware. See the (*insn_config). */ devpriv->dux_commands[1] = s->io_bits; devpriv->dux_commands[2] = s->state; /* * This command also tells the firmware to return * the digital input lines. */ ret = send_dux_commands(dev, USBDUX_CMD_DIO_BITS); if (ret < 0) goto dio_exit; ret = receive_dux_commands(dev, USBDUX_CMD_DIO_BITS); if (ret < 0) goto dio_exit; data[1] = le16_to_cpu(devpriv->insn_buf[1]); dio_exit: mutex_unlock(&devpriv->mut); return ret ? ret : insn->n; } static int usbdux_counter_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); int ret = 0; int i; mutex_lock(&devpriv->mut); for (i = 0; i < insn->n; i++) { ret = send_dux_commands(dev, USBDUX_CMD_TIMER_RD); if (ret < 0) goto counter_read_exit; ret = receive_dux_commands(dev, USBDUX_CMD_TIMER_RD); if (ret < 0) goto counter_read_exit; data[i] = le16_to_cpu(devpriv->insn_buf[chan + 1]); } counter_read_exit: mutex_unlock(&devpriv->mut); return ret ? ret : insn->n; } static int usbdux_counter_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); __le16 *p = (__le16 *)&devpriv->dux_commands[2]; int ret = 0; int i; mutex_lock(&devpriv->mut); devpriv->dux_commands[1] = chan; for (i = 0; i < insn->n; i++) { *p = cpu_to_le16(data[i]); ret = send_dux_commands(dev, USBDUX_CMD_TIMER_WR); if (ret < 0) break; } mutex_unlock(&devpriv->mut); return ret ? ret : insn->n; } static int usbdux_counter_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { /* nothing to do so far */ return 2; } static void usbduxsub_unlink_pwm_urbs(struct comedi_device *dev) { struct usbdux_private *devpriv = dev->private; usb_kill_urb(devpriv->pwm_urb); } static void usbdux_pwm_stop(struct comedi_device *dev, int do_unlink) { struct usbdux_private *devpriv = dev->private; if (do_unlink) usbduxsub_unlink_pwm_urbs(dev); devpriv->pwm_cmd_running = 0; } static int usbdux_pwm_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbdux_private *devpriv = dev->private; int ret; mutex_lock(&devpriv->mut); /* unlink only if it is really running */ usbdux_pwm_stop(dev, devpriv->pwm_cmd_running); ret = send_dux_commands(dev, USBDUX_CMD_PWM_OFF); mutex_unlock(&devpriv->mut); return ret; } static void usbduxsub_pwm_irq(struct urb *urb) { struct comedi_device *dev = urb->context; struct usbdux_private *devpriv = dev->private; int ret; switch (urb->status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: case -ECONNABORTED: /* * after an unlink command, unplug, ... etc * no unlink needed here. Already shutting down. */ if (devpriv->pwm_cmd_running) usbdux_pwm_stop(dev, 0); return; default: /* a real error */ if (devpriv->pwm_cmd_running) { dev_err(dev->class_dev, "Non-zero urb status received in pwm intr context: %d\n", urb->status); usbdux_pwm_stop(dev, 0); } return; } /* are we actually running? */ if (!devpriv->pwm_cmd_running) return; urb->transfer_buffer_length = devpriv->pwm_buf_sz; urb->dev = comedi_to_usb_dev(dev); urb->status = 0; if (devpriv->pwm_cmd_running) { ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { dev_err(dev->class_dev, "pwm urb resubm failed in int-cont. ret=%d", ret); if (ret == -EL2NSYNC) dev_err(dev->class_dev, "buggy USB host controller or bug in IRQ handling!\n"); /* don't do an unlink here */ usbdux_pwm_stop(dev, 0); } } } static int usbduxsub_submit_pwm_urbs(struct comedi_device *dev) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbdux_private *devpriv = dev->private; struct urb *urb = devpriv->pwm_urb; /* in case of a resubmission after an unlink... */ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4), urb->transfer_buffer, devpriv->pwm_buf_sz, usbduxsub_pwm_irq, dev); return usb_submit_urb(urb, GFP_ATOMIC); } static int usbdux_pwm_period(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int period) { struct usbdux_private *devpriv = dev->private; int fx2delay; if (period < MIN_PWM_PERIOD) return -EAGAIN; fx2delay = (period / (6 * 512 * 1000 / 33)) - 6; if (fx2delay > 255) return -EAGAIN; devpriv->pwm_delay = fx2delay; devpriv->pwm_period = period; return 0; } static int usbdux_pwm_start(struct comedi_device *dev, struct comedi_subdevice *s) { struct usbdux_private *devpriv = dev->private; int ret = 0; mutex_lock(&devpriv->mut); if (devpriv->pwm_cmd_running) goto pwm_start_exit; devpriv->dux_commands[1] = devpriv->pwm_delay; ret = send_dux_commands(dev, USBDUX_CMD_PWM_ON); if (ret < 0) goto pwm_start_exit; /* initialise the buffer */ memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz); devpriv->pwm_cmd_running = 1; ret = usbduxsub_submit_pwm_urbs(dev); if (ret < 0) devpriv->pwm_cmd_running = 0; pwm_start_exit: mutex_unlock(&devpriv->mut); return ret; } static void usbdux_pwm_pattern(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int chan, unsigned int value, unsigned int sign) { struct usbdux_private *devpriv = dev->private; char pwm_mask = (1 << chan); /* DIO bit for the PWM data */ char sgn_mask = (16 << chan); /* DIO bit for the sign */ char *buf = (char *)(devpriv->pwm_urb->transfer_buffer); int szbuf = devpriv->pwm_buf_sz; int i; for (i = 0; i < szbuf; i++) { char c = *buf; c &= ~pwm_mask; if (i < value) c |= pwm_mask; if (!sign) c &= ~sgn_mask; else c |= sgn_mask; *buf++ = c; } } static int usbdux_pwm_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); /* * It doesn't make sense to support more than one value here * because it would just overwrite the PWM buffer. */ if (insn->n != 1) return -EINVAL; /* * The sign is set via a special INSN only, this gives us 8 bits * for normal operation, sign is 0 by default. */ usbdux_pwm_pattern(dev, s, chan, data[0], 0); return insn->n; } static int usbdux_pwm_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct usbdux_private *devpriv = dev->private; unsigned int chan = CR_CHAN(insn->chanspec); switch (data[0]) { case INSN_CONFIG_ARM: /* * if not zero the PWM is limited to a certain time which is * not supported here */ if (data[1] != 0) return -EINVAL; return usbdux_pwm_start(dev, s); case INSN_CONFIG_DISARM: return usbdux_pwm_cancel(dev, s); case INSN_CONFIG_GET_PWM_STATUS: data[1] = devpriv->pwm_cmd_running; return 0; case INSN_CONFIG_PWM_SET_PERIOD: return usbdux_pwm_period(dev, s, data[1]); case INSN_CONFIG_PWM_GET_PERIOD: data[1] = devpriv->pwm_period; return 0; case INSN_CONFIG_PWM_SET_H_BRIDGE: /* * data[1] = value * data[2] = sign (for a relay) */ usbdux_pwm_pattern(dev, s, chan, data[1], (data[2] != 0)); return 0; case INSN_CONFIG_PWM_GET_H_BRIDGE: /* values are not kept in this driver, nothing to return here */ return -EINVAL; } return -EINVAL; } static int usbdux_firmware_upload(struct comedi_device *dev, const u8 *data, size_t size, unsigned long context) { struct usb_device *usb = comedi_to_usb_dev(dev); u8 *buf; u8 *tmp; int ret; if (!data) return 0; if (size > USBDUX_FIRMWARE_MAX_LEN) { dev_err(dev->class_dev, "usbdux firmware binary it too large for FX2.\n"); return -ENOMEM; } /* we generate a local buffer for the firmware */ buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; /* we need a malloc'ed buffer for usb_control_msg() */ tmp = kmalloc(1, GFP_KERNEL); if (!tmp) { kfree(buf); return -ENOMEM; } /* stop the current firmware on the device */ *tmp = 1; /* 7f92 to one */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUX_FIRMWARE_CMD, VENDOR_DIR_OUT, USBDUX_CPU_CS, 0x0000, tmp, 1, BULK_TIMEOUT); if (ret < 0) { dev_err(dev->class_dev, "can not stop firmware\n"); goto done; } /* upload the new firmware to the device */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUX_FIRMWARE_CMD, VENDOR_DIR_OUT, 0, 0x0000, buf, size, BULK_TIMEOUT); if (ret < 0) { dev_err(dev->class_dev, "firmware upload failed\n"); goto done; } /* start the new firmware on the device */ *tmp = 0; /* 7f92 to zero */ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0), USBDUX_FIRMWARE_CMD, VENDOR_DIR_OUT, USBDUX_CPU_CS, 0x0000, tmp, 1, BULK_TIMEOUT); if (ret < 0) dev_err(dev->class_dev, "can not start firmware\n"); done: kfree(tmp); kfree(buf); return ret; } static int usbdux_alloc_usb_buffers(struct comedi_device *dev) { struct usb_device *usb = comedi_to_usb_dev(dev); struct usbdux_private *devpriv = dev->private; struct urb *urb; int i; devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL); devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL); devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL); devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(void *), GFP_KERNEL); devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(void *), GFP_KERNEL); if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf || !devpriv->ai_urbs || !devpriv->ao_urbs) return -ENOMEM; for (i = 0; i < devpriv->n_ai_urbs; i++) { /* one frame: 1ms */ urb = usb_alloc_urb(1, GFP_KERNEL); if (!urb) return -ENOMEM; devpriv->ai_urbs[i] = urb; urb->dev = usb; urb->context = dev; urb->pipe = usb_rcvisocpipe(usb, 6); urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL); if (!urb->transfer_buffer) return -ENOMEM; urb->complete = usbduxsub_ai_isoc_irq; urb->number_of_packets = 1; urb->transfer_buffer_length = SIZEINBUF; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEINBUF; } for (i = 0; i < devpriv->n_ao_urbs; i++) { /* one frame: 1ms */ urb = usb_alloc_urb(1, GFP_KERNEL); if (!urb) return -ENOMEM; devpriv->ao_urbs[i] = urb; urb->dev = usb; urb->context = dev; urb->pipe = usb_sndisocpipe(usb, 2); urb->transfer_flags = URB_ISO_ASAP; urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL); if (!urb->transfer_buffer) return -ENOMEM; urb->complete = usbduxsub_ao_isoc_irq; urb->number_of_packets = 1; urb->transfer_buffer_length = SIZEOUTBUF; urb->iso_frame_desc[0].offset = 0; urb->iso_frame_desc[0].length = SIZEOUTBUF; if (devpriv->high_speed) urb->interval = 8; /* uframes */ else urb->interval = 1; /* frames */ } /* pwm */ if (devpriv->pwm_buf_sz) { urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; devpriv->pwm_urb = urb; /* max bulk ep size in high speed */ urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz, GFP_KERNEL); if (!urb->transfer_buffer) return -ENOMEM; } return 0; } static void usbdux_free_usb_buffers(struct comedi_device *dev) { struct usbdux_private *devpriv = dev->private; struct urb *urb; int i; urb = devpriv->pwm_urb; if (urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } if (devpriv->ao_urbs) { for (i = 0; i < devpriv->n_ao_urbs; i++) { urb = devpriv->ao_urbs[i]; if (urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } } kfree(devpriv->ao_urbs); } if (devpriv->ai_urbs) { for (i = 0; i < devpriv->n_ai_urbs; i++) { urb = devpriv->ai_urbs[i]; if (urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } } kfree(devpriv->ai_urbs); } kfree(devpriv->insn_buf); kfree(devpriv->in_buf); kfree(devpriv->dux_commands); } static int usbdux_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct usb_device *usb = comedi_to_usb_dev(dev); struct usbdux_private *devpriv; struct comedi_subdevice *s; int ret; devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv)); if (!devpriv) return -ENOMEM; mutex_init(&devpriv->mut); usb_set_intfdata(intf, devpriv); devpriv->high_speed = (usb->speed == USB_SPEED_HIGH); if (devpriv->high_speed) { devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH; devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH; devpriv->pwm_buf_sz = 512; } else { devpriv->n_ai_urbs = NUMOFINBUFFERSFULL; devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL; } ret = usbdux_alloc_usb_buffers(dev); if (ret) return ret; /* setting to alternate setting 3: enabling iso ep and bulk ep. */ ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber, 3); if (ret < 0) { dev_err(dev->class_dev, "could not set alternate setting 3 in high speed\n"); return ret; } ret = comedi_load_firmware(dev, &usb->dev, USBDUX_FIRMWARE, usbdux_firmware_upload, 0); if (ret < 0) return ret; ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 5 : 4); if (ret) return ret; /* Analog Input subdevice */ s = &dev->subdevices[0]; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; s->n_chan = 8; s->maxdata = 0x0fff; s->len_chanlist = 8; s->range_table = &range_usbdux_ai_range; s->insn_read = usbdux_ai_insn_read; s->do_cmdtest = usbdux_ai_cmdtest; s->do_cmd = usbdux_ai_cmd; s->cancel = usbdux_ai_cancel; /* Analog Output subdevice */ s = &dev->subdevices[1]; dev->write_subdev = s; s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; s->n_chan = 4; s->maxdata = 0x0fff; s->len_chanlist = s->n_chan; s->range_table = &range_usbdux_ao_range; s->do_cmdtest = usbdux_ao_cmdtest; s->do_cmd = usbdux_ao_cmd; s->cancel = usbdux_ao_cancel; s->insn_read = usbdux_ao_insn_read; s->insn_write = usbdux_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; /* Digital I/O subdevice */ s = &dev->subdevices[2]; s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = usbdux_dio_insn_bits; s->insn_config = usbdux_dio_insn_config; /* Counter subdevice */ s = &dev->subdevices[3]; s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = 4; s->maxdata = 0xffff; s->insn_read = usbdux_counter_read; s->insn_write = usbdux_counter_write; s->insn_config = usbdux_counter_config; if (devpriv->high_speed) { /* PWM subdevice */ s = &dev->subdevices[4]; s->type = COMEDI_SUBD_PWM; s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE; s->n_chan = 8; s->maxdata = devpriv->pwm_buf_sz; s->insn_write = usbdux_pwm_write; s->insn_config = usbdux_pwm_config; usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD); } return 0; } static void usbdux_detach(struct comedi_device *dev) { struct usb_interface *intf = comedi_to_usb_interface(dev); struct usbdux_private *devpriv = dev->private; usb_set_intfdata(intf, NULL); if (!devpriv) return; mutex_lock(&devpriv->mut); /* force unlink all urbs */ usbdux_pwm_stop(dev, 1); usbdux_ao_stop(dev, 1); usbdux_ai_stop(dev, 1); usbdux_free_usb_buffers(dev); mutex_unlock(&devpriv->mut); mutex_destroy(&devpriv->mut); } static struct comedi_driver usbdux_driver = { .driver_name = "usbdux", .module = THIS_MODULE, .auto_attach = usbdux_auto_attach, .detach = usbdux_detach, }; static int usbdux_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { return comedi_usb_auto_config(intf, &usbdux_driver, 0); } static const struct usb_device_id usbdux_usb_table[] = { { USB_DEVICE(0x13d8, 0x0001) }, { USB_DEVICE(0x13d8, 0x0002) }, { } }; MODULE_DEVICE_TABLE(usb, usbdux_usb_table); static struct usb_driver usbdux_usb_driver = { .name = "usbdux", .probe = usbdux_usb_probe, .disconnect = comedi_usb_auto_unconfig, .id_table = usbdux_usb_table, }; module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(USBDUX_FIRMWARE);
67 66 48 4 3 12 2 71 67 2 48 12 4 3 69 45 71 71 70 70 71 38 2 71 71 16 16 16 68 24 23 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright (C) 2020-2023 Intel Corporation */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/if.h> #include <linux/if_ether.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/notifier.h> #include <net/mac80211.h> #include <net/cfg80211.h> #include "ieee80211_i.h" #include "rate.h" #include "debugfs.h" #include "debugfs_netdev.h" #include "driver-ops.h" struct ieee80211_if_read_sdata_data { ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int); struct ieee80211_sub_if_data *sdata; }; static ssize_t ieee80211_if_read_sdata_handler(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, void *data) { struct ieee80211_if_read_sdata_data *d = data; return d->format(d->sdata, buf, bufsize); } static ssize_t ieee80211_if_read_sdata( struct file *file, char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*format)(const struct ieee80211_sub_if_data *sdata, char *, int)) { struct ieee80211_sub_if_data *sdata = file->private_data; struct ieee80211_if_read_sdata_data data = { .format = format, .sdata = sdata, }; char buf[200]; return wiphy_locked_debugfs_read(sdata->local->hw.wiphy, file, buf, sizeof(buf), userbuf, count, ppos, ieee80211_if_read_sdata_handler, &data); } struct ieee80211_if_write_sdata_data { ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int); struct ieee80211_sub_if_data *sdata; }; static ssize_t ieee80211_if_write_sdata_handler(struct wiphy *wiphy, struct file *file, char *buf, size_t count, void *data) { struct ieee80211_if_write_sdata_data *d = data; return d->write(d->sdata, buf, count); } static ssize_t ieee80211_if_write_sdata( struct file *file, const char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*write)(struct ieee80211_sub_if_data *sdata, const char *, int)) { struct ieee80211_sub_if_data *sdata = file->private_data; struct ieee80211_if_write_sdata_data data = { .write = write, .sdata = sdata, }; char buf[64]; return wiphy_locked_debugfs_write(sdata->local->hw.wiphy, file, buf, sizeof(buf), userbuf, count, ieee80211_if_write_sdata_handler, &data); } struct ieee80211_if_read_link_data { ssize_t (*format)(const struct ieee80211_link_data *, char *, int); struct ieee80211_link_data *link; }; static ssize_t ieee80211_if_read_link_handler(struct wiphy *wiphy, struct file *file, char *buf, size_t bufsize, void *data) { struct ieee80211_if_read_link_data *d = data; return d->format(d->link, buf, bufsize); } static ssize_t ieee80211_if_read_link( struct file *file, char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*format)(const struct ieee80211_link_data *link, char *, int)) { struct ieee80211_link_data *link = file->private_data; struct ieee80211_if_read_link_data data = { .format = format, .link = link, }; char buf[200]; return wiphy_locked_debugfs_read(link->sdata->local->hw.wiphy, file, buf, sizeof(buf), userbuf, count, ppos, ieee80211_if_read_link_handler, &data); } struct ieee80211_if_write_link_data { ssize_t (*write)(struct ieee80211_link_data *, const char *, int); struct ieee80211_link_data *link; }; static ssize_t ieee80211_if_write_link_handler(struct wiphy *wiphy, struct file *file, char *buf, size_t count, void *data) { struct ieee80211_if_write_sdata_data *d = data; return d->write(d->sdata, buf, count); } static ssize_t ieee80211_if_write_link( struct file *file, const char __user *userbuf, size_t count, loff_t *ppos, ssize_t (*write)(struct ieee80211_link_data *link, const char *, int)) { struct ieee80211_link_data *link = file->private_data; struct ieee80211_if_write_link_data data = { .write = write, .link = link, }; char buf[64]; return wiphy_locked_debugfs_write(link->sdata->local->hw.wiphy, file, buf, sizeof(buf), userbuf, count, ieee80211_if_write_link_handler, &data); } #define IEEE80211_IF_FMT(name, type, field, format_string) \ static ssize_t ieee80211_if_fmt_##name( \ const type *data, char *buf, \ int buflen) \ { \ return scnprintf(buf, buflen, format_string, data->field); \ } #define IEEE80211_IF_FMT_DEC(name, type, field) \ IEEE80211_IF_FMT(name, type, field, "%d\n") #define IEEE80211_IF_FMT_HEX(name, type, field) \ IEEE80211_IF_FMT(name, type, field, "%#x\n") #define IEEE80211_IF_FMT_LHEX(name, type, field) \ IEEE80211_IF_FMT(name, type, field, "%#lx\n") #define IEEE80211_IF_FMT_HEXARRAY(name, type, field) \ static ssize_t ieee80211_if_fmt_##name( \ const type *data, \ char *buf, int buflen) \ { \ char *p = buf; \ int i; \ for (i = 0; i < sizeof(data->field); i++) { \ p += scnprintf(p, buflen + buf - p, "%.2x ", \ data->field[i]); \ } \ p += scnprintf(p, buflen + buf - p, "\n"); \ return p - buf; \ } #define IEEE80211_IF_FMT_ATOMIC(name, type, field) \ static ssize_t ieee80211_if_fmt_##name( \ const type *data, \ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%d\n", atomic_read(&data->field));\ } #define IEEE80211_IF_FMT_MAC(name, type, field) \ static ssize_t ieee80211_if_fmt_##name( \ const type *data, char *buf, \ int buflen) \ { \ return scnprintf(buf, buflen, "%pM\n", data->field); \ } #define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, type, field) \ static ssize_t ieee80211_if_fmt_##name( \ const type *data, \ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%d\n", \ jiffies_to_msecs(data->field)); \ } #define _IEEE80211_IF_FILE_OPS(name, _read, _write) \ static const struct debugfs_short_fops name##_ops = { \ .read = (_read), \ .write = (_write), \ .llseek = generic_file_llseek, \ } #define _IEEE80211_IF_FILE_R_FN(name) \ static ssize_t ieee80211_if_read_##name(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_read_sdata(file, \ userbuf, count, ppos, \ ieee80211_if_fmt_##name); \ } #define _IEEE80211_IF_FILE_W_FN(name) \ static ssize_t ieee80211_if_write_##name(struct file *file, \ const char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_write_sdata(file, userbuf, \ count, ppos, \ ieee80211_if_parse_##name); \ } #define IEEE80211_IF_FILE_R(name) \ _IEEE80211_IF_FILE_R_FN(name) \ _IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, NULL) #define IEEE80211_IF_FILE_W(name) \ _IEEE80211_IF_FILE_W_FN(name) \ _IEEE80211_IF_FILE_OPS(name, NULL, ieee80211_if_write_##name) #define IEEE80211_IF_FILE_RW(name) \ _IEEE80211_IF_FILE_R_FN(name) \ _IEEE80211_IF_FILE_W_FN(name) \ _IEEE80211_IF_FILE_OPS(name, ieee80211_if_read_##name, \ ieee80211_if_write_##name) #define IEEE80211_IF_FILE(name, field, format) \ IEEE80211_IF_FMT_##format(name, struct ieee80211_sub_if_data, field) \ IEEE80211_IF_FILE_R(name) #define _IEEE80211_IF_LINK_R_FN(name) \ static ssize_t ieee80211_if_read_##name(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_read_link(file, \ userbuf, count, ppos, \ ieee80211_if_fmt_##name); \ } #define _IEEE80211_IF_LINK_W_FN(name) \ static ssize_t ieee80211_if_write_##name(struct file *file, \ const char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return ieee80211_if_write_link(file, userbuf, \ count, ppos, \ ieee80211_if_parse_##name); \ } #define IEEE80211_IF_LINK_FILE_R(name) \ _IEEE80211_IF_LINK_R_FN(name) \ _IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, NULL) #define IEEE80211_IF_LINK_FILE_W(name) \ _IEEE80211_IF_LINK_W_FN(name) \ _IEEE80211_IF_FILE_OPS(link_##name, NULL, ieee80211_if_write_##name) #define IEEE80211_IF_LINK_FILE_RW(name) \ _IEEE80211_IF_LINK_R_FN(name) \ _IEEE80211_IF_LINK_W_FN(name) \ _IEEE80211_IF_FILE_OPS(link_##name, ieee80211_if_read_##name, \ ieee80211_if_write_##name) #define IEEE80211_IF_LINK_FILE(name, field, format) \ IEEE80211_IF_FMT_##format(name, struct ieee80211_link_data, field) \ IEEE80211_IF_LINK_FILE_R(name) /* common attributes */ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[NL80211_BAND_2GHZ], HEX); IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[NL80211_BAND_5GHZ], HEX); IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz, rc_rateidx_mcs_mask[NL80211_BAND_2GHZ], HEXARRAY); IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz, rc_rateidx_mcs_mask[NL80211_BAND_5GHZ], HEXARRAY); static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int i, len = 0; const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_2GHZ]; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); len += scnprintf(buf + len, buflen - len, "\n"); return len; } IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz); static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int i, len = 0; const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[NL80211_BAND_5GHZ]; for (i = 0; i < NL80211_VHT_NSS_MAX; i++) len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); len += scnprintf(buf + len, buflen - len, "\n"); return len; } IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz); IEEE80211_IF_FILE(flags, flags, HEX); IEEE80211_IF_FILE(state, state, LHEX); IEEE80211_IF_LINK_FILE(txpower, conf->txpower, DEC); IEEE80211_IF_LINK_FILE(ap_power_level, ap_power_level, DEC); IEEE80211_IF_LINK_FILE(user_power_level, user_power_level, DEC); static ssize_t ieee80211_if_fmt_hw_queues(const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { int len; len = scnprintf(buf, buflen, "AC queues: VO:%d VI:%d BE:%d BK:%d\n", sdata->vif.hw_queue[IEEE80211_AC_VO], sdata->vif.hw_queue[IEEE80211_AC_VI], sdata->vif.hw_queue[IEEE80211_AC_BE], sdata->vif.hw_queue[IEEE80211_AC_BK]); if (sdata->vif.type == NL80211_IFTYPE_AP) len += scnprintf(buf + len, buflen - len, "cab queue: %d\n", sdata->vif.cab_queue); return len; } IEEE80211_IF_FILE_R(hw_queues); /* STA attributes */ IEEE80211_IF_FILE(bssid, deflink.u.mgd.bssid, MAC); IEEE80211_IF_FILE(aid, vif.cfg.aid, DEC); IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS); static int ieee80211_set_smps(struct ieee80211_link_data *link, enum ieee80211_smps_mode smps_mode) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; /* The driver indicated that EML is enabled for the interface, thus do * not allow to override the SMPS state. */ if (sdata->vif.driver_flags & IEEE80211_VIF_EML_ACTIVE) return -EOPNOTSUPP; if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) && smps_mode == IEEE80211_SMPS_STATIC) return -EINVAL; /* auto should be dynamic if in PS mode */ if (!(local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) && (smps_mode == IEEE80211_SMPS_DYNAMIC || smps_mode == IEEE80211_SMPS_AUTOMATIC)) return -EINVAL; if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; return __ieee80211_request_smps_mgd(link->sdata, link, smps_mode); } static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { [IEEE80211_SMPS_AUTOMATIC] = "auto", [IEEE80211_SMPS_OFF] = "off", [IEEE80211_SMPS_STATIC] = "static", [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_link_data *link, char *buf, int buflen) { if (link->sdata->vif.type == NL80211_IFTYPE_STATION) return snprintf(buf, buflen, "request: %s\nused: %s\n", smps_modes[link->u.mgd.req_smps], smps_modes[link->smps_mode]); return -EINVAL; } static ssize_t ieee80211_if_parse_smps(struct ieee80211_link_data *link, const char *buf, int buflen) { enum ieee80211_smps_mode mode; for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) { if (strncmp(buf, smps_modes[mode], buflen) == 0) { int err = ieee80211_set_smps(link, mode); if (!err) return buflen; return err; } } return -EINVAL; } IEEE80211_IF_LINK_FILE_RW(smps); static ssize_t ieee80211_if_parse_tkip_mic_test( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_local *local = sdata->local; u8 addr[ETH_ALEN]; struct sk_buff *skb; struct ieee80211_hdr *hdr; __le16 fc; if (!mac_pton(buf, addr)) return -EINVAL; if (!ieee80211_sdata_running(sdata)) return -ENOTCONN; skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); hdr = skb_put_zero(skb, 24); fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); switch (sdata->vif.type) { case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ memcpy(hdr->addr1, addr, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN); break; case NL80211_IFTYPE_STATION: fc |= cpu_to_le16(IEEE80211_FCTL_TODS); /* BSSID SA DA */ if (!sdata->u.mgd.associated) { dev_kfree_skb(skb); return -ENOTCONN; } memcpy(hdr->addr1, sdata->deflink.u.mgd.bssid, ETH_ALEN); memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); memcpy(hdr->addr3, addr, ETH_ALEN); break; default: dev_kfree_skb(skb); return -EOPNOTSUPP; } hdr->frame_control = fc; /* * Add some length to the test frame to make it look bit more valid. * The exact contents does not matter since the recipient is required * to drop this because of the Michael MIC failure. */ skb_put_zero(skb, 50); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE; ieee80211_tx_skb(sdata, skb); return buflen; } IEEE80211_IF_FILE_W(tkip_mic_test); static ssize_t ieee80211_if_parse_beacon_loss( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { if (!ieee80211_sdata_running(sdata) || !sdata->vif.cfg.assoc) return -ENOTCONN; ieee80211_beacon_loss(&sdata->vif); return buflen; } IEEE80211_IF_FILE_W(beacon_loss); static ssize_t ieee80211_if_fmt_uapsd_queues( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_queues); } static ssize_t ieee80211_if_parse_uapsd_queues( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 val; int ret; ret = kstrtou8(buf, 0, &val); if (ret) return ret; if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) return -ERANGE; ifmgd->uapsd_queues = val; return buflen; } IEEE80211_IF_FILE_RW(uapsd_queues); static ssize_t ieee80211_if_fmt_uapsd_max_sp_len( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; return snprintf(buf, buflen, "0x%x\n", ifmgd->uapsd_max_sp_len); } static ssize_t ieee80211_if_parse_uapsd_max_sp_len( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; unsigned long val; int ret; ret = kstrtoul(buf, 0, &val); if (ret) return -EINVAL; if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK) return -ERANGE; ifmgd->uapsd_max_sp_len = val; return buflen; } IEEE80211_IF_FILE_RW(uapsd_max_sp_len); static ssize_t ieee80211_if_fmt_tdls_wider_bw( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool tdls_wider_bw; tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; return snprintf(buf, buflen, "%d\n", tdls_wider_bw); } static ssize_t ieee80211_if_parse_tdls_wider_bw( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 val; int ret; ret = kstrtou8(buf, 0, &val); if (ret) return ret; ifmgd->tdls_wider_bw_prohibited = !val; return buflen; } IEEE80211_IF_FILE_RW(tdls_wider_bw); /* AP attributes */ IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC); IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC); IEEE80211_IF_FILE(dtim_count, u.ap.ps.dtim_count, DEC); IEEE80211_IF_FILE(num_mcast_sta_vlan, u.vlan.num_mcast_sta, ATOMIC); static ssize_t ieee80211_if_fmt_num_buffered_multicast( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { return scnprintf(buf, buflen, "%u\n", skb_queue_len(&sdata->u.ap.ps.bc_buf)); } IEEE80211_IF_FILE_R(num_buffered_multicast); static ssize_t ieee80211_if_fmt_aqm( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { struct ieee80211_local *local = sdata->local; struct txq_info *txqi; int len; if (!sdata->vif.txq) return 0; txqi = to_txq_info(sdata->vif.txq); spin_lock_bh(&local->fq.lock); rcu_read_lock(); len = scnprintf(buf, buflen, "ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n" "%u %u %u %u %u %u %u %u %u %u\n", txqi->txq.ac, txqi->tin.backlog_bytes, txqi->tin.backlog_packets, txqi->tin.flows, txqi->cstats.drop_count, txqi->cstats.ecn_mark, txqi->tin.overlimit, txqi->tin.collisions, txqi->tin.tx_bytes, txqi->tin.tx_packets); rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return len; } IEEE80211_IF_FILE_R(aqm); IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX); /* IBSS attributes */ static ssize_t ieee80211_if_fmt_tsf( const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { struct ieee80211_local *local = sdata->local; u64 tsf; tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata); return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf); } static ssize_t ieee80211_if_parse_tsf( struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { struct ieee80211_local *local = sdata->local; unsigned long long tsf; int ret; int tsf_is_delta = 0; if (strncmp(buf, "reset", 5) == 0) { if (local->ops->reset_tsf) { drv_reset_tsf(local, sdata); wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); } } else { if (buflen > 10 && buf[1] == '=') { if (buf[0] == '+') tsf_is_delta = 1; else if (buf[0] == '-') tsf_is_delta = -1; else return -EINVAL; buf += 2; } ret = kstrtoull(buf, 10, &tsf); if (ret < 0) return ret; if (tsf_is_delta && local->ops->offset_tsf) { drv_offset_tsf(local, sdata, tsf_is_delta * tsf); wiphy_info(local->hw.wiphy, "debugfs offset TSF by %018lld\n", tsf_is_delta * tsf); } else if (local->ops->set_tsf) { if (tsf_is_delta) tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf; drv_set_tsf(local, sdata, tsf); wiphy_info(local->hw.wiphy, "debugfs set TSF to %#018llx\n", tsf); } } ieee80211_recalc_dtim(local, sdata); return buflen; } IEEE80211_IF_FILE_RW(tsf); static ssize_t ieee80211_if_fmt_valid_links(const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { return snprintf(buf, buflen, "0x%x\n", sdata->vif.valid_links); } IEEE80211_IF_FILE_R(valid_links); static ssize_t ieee80211_if_fmt_active_links(const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) { return snprintf(buf, buflen, "0x%x\n", sdata->vif.active_links); } static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sdata, const char *buf, int buflen) { u16 active_links; if (kstrtou16(buf, 0, &active_links) || !active_links) return -EINVAL; return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen; } IEEE80211_IF_FILE_RW(active_links); IEEE80211_IF_LINK_FILE(addr, conf->addr, MAC); #ifdef CONFIG_MAC80211_MESH IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC); /* Mesh stats attributes */ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC); IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC); IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC); IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC); IEEE80211_IF_FILE(dropped_frames_no_route, u.mesh.mshstats.dropped_frames_no_route, DEC); /* Mesh parameters */ IEEE80211_IF_FILE(dot11MeshMaxRetries, u.mesh.mshcfg.dot11MeshMaxRetries, DEC); IEEE80211_IF_FILE(dot11MeshRetryTimeout, u.mesh.mshcfg.dot11MeshRetryTimeout, DEC); IEEE80211_IF_FILE(dot11MeshConfirmTimeout, u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHoldingTimeout, u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC); IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC); IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC); IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC); IEEE80211_IF_FILE(dot11MeshMaxPeerLinks, u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC); IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout, u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval, u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval, u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime, u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC); IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries, u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC); IEEE80211_IF_FILE(path_refresh_time, u.mesh.mshcfg.path_refresh_time, DEC); IEEE80211_IF_FILE(min_discovery_timeout, u.mesh.mshcfg.min_discovery_timeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMPRootMode, u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC); IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol, u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC); IEEE80211_IF_FILE(dot11MeshHWMPRannInterval, u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC); IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC); IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC); IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout, u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC); IEEE80211_IF_FILE(dot11MeshHWMProotInterval, u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC); IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval, u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC); IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC); IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC); IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC); IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, u.mesh.mshcfg.dot11MeshConnectedToAuthServer, DEC); #endif #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \ sdata, &name##_ops) #define DEBUGFS_ADD_X(_bits, _name, _mode) \ debugfs_create_x##_bits(#_name, _mode, sdata->vif.debugfs_dir, \ &sdata->vif._name) #define DEBUGFS_ADD_X8(_name, _mode) \ DEBUGFS_ADD_X(8, _name, _mode) #define DEBUGFS_ADD_X16(_name, _mode) \ DEBUGFS_ADD_X(16, _name, _mode) #define DEBUGFS_ADD_X32(_name, _mode) \ DEBUGFS_ADD_X(32, _name, _mode) #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) static void add_common_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(rc_rateidx_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mask_5ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz); DEBUGFS_ADD(hw_queues); if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_NAN) DEBUGFS_ADD(aqm); } static void add_sta_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(bssid); DEBUGFS_ADD(aid); DEBUGFS_ADD(beacon_timeout); DEBUGFS_ADD_MODE(tkip_mic_test, 0200); DEBUGFS_ADD_MODE(beacon_loss, 0200); DEBUGFS_ADD_MODE(uapsd_queues, 0600); DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600); DEBUGFS_ADD_MODE(tdls_wider_bw, 0600); DEBUGFS_ADD_MODE(valid_links, 0400); DEBUGFS_ADD_MODE(active_links, 0600); DEBUGFS_ADD_X16(dormant_links, 0400); } static void add_ap_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD(num_mcast_sta); DEBUGFS_ADD(num_sta_ps); DEBUGFS_ADD(dtim_count); DEBUGFS_ADD(num_buffered_multicast); DEBUGFS_ADD_MODE(tkip_mic_test, 0200); DEBUGFS_ADD_MODE(multicast_to_unicast, 0600); } static void add_vlan_files(struct ieee80211_sub_if_data *sdata) { /* add num_mcast_sta_vlan using name num_mcast_sta */ debugfs_create_file("num_mcast_sta", 0400, sdata->vif.debugfs_dir, sdata, &num_mcast_sta_vlan_ops); } static void add_ibss_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); } #ifdef CONFIG_MAC80211_MESH static void add_mesh_files(struct ieee80211_sub_if_data *sdata) { DEBUGFS_ADD_MODE(tsf, 0600); DEBUGFS_ADD_MODE(estab_plinks, 0400); } static void add_mesh_stats(struct ieee80211_sub_if_data *sdata) { struct dentry *dir = debugfs_create_dir("mesh_stats", sdata->vif.debugfs_dir); #define MESHSTATS_ADD(name)\ debugfs_create_file(#name, 0400, dir, sdata, &name##_ops) MESHSTATS_ADD(fwded_mcast); MESHSTATS_ADD(fwded_unicast); MESHSTATS_ADD(fwded_frames); MESHSTATS_ADD(dropped_frames_ttl); MESHSTATS_ADD(dropped_frames_no_route); #undef MESHSTATS_ADD } static void add_mesh_config(struct ieee80211_sub_if_data *sdata) { struct dentry *dir = debugfs_create_dir("mesh_config", sdata->vif.debugfs_dir); #define MESHPARAMS_ADD(name) \ debugfs_create_file(#name, 0600, dir, sdata, &name##_ops) MESHPARAMS_ADD(dot11MeshMaxRetries); MESHPARAMS_ADD(dot11MeshRetryTimeout); MESHPARAMS_ADD(dot11MeshConfirmTimeout); MESHPARAMS_ADD(dot11MeshHoldingTimeout); MESHPARAMS_ADD(dot11MeshTTL); MESHPARAMS_ADD(element_ttl); MESHPARAMS_ADD(auto_open_plinks); MESHPARAMS_ADD(dot11MeshMaxPeerLinks); MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout); MESHPARAMS_ADD(dot11MeshHWMPpreqMinInterval); MESHPARAMS_ADD(dot11MeshHWMPperrMinInterval); MESHPARAMS_ADD(dot11MeshHWMPnetDiameterTraversalTime); MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries); MESHPARAMS_ADD(path_refresh_time); MESHPARAMS_ADD(min_discovery_timeout); MESHPARAMS_ADD(dot11MeshHWMPRootMode); MESHPARAMS_ADD(dot11MeshHWMPRannInterval); MESHPARAMS_ADD(dot11MeshForwarding); MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol); MESHPARAMS_ADD(rssi_threshold); MESHPARAMS_ADD(ht_opmode); MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout); MESHPARAMS_ADD(dot11MeshHWMProotInterval); MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval); MESHPARAMS_ADD(power_mode); MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); MESHPARAMS_ADD(dot11MeshNolearn); MESHPARAMS_ADD(dot11MeshConnectedToAuthServer); #undef MESHPARAMS_ADD } #endif static void add_files(struct ieee80211_sub_if_data *sdata) { if (!sdata->vif.debugfs_dir) return; DEBUGFS_ADD(flags); DEBUGFS_ADD(state); if (sdata->vif.type != NL80211_IFTYPE_MONITOR) add_common_files(sdata); switch (sdata->vif.type) { case NL80211_IFTYPE_MESH_POINT: #ifdef CONFIG_MAC80211_MESH add_mesh_files(sdata); add_mesh_stats(sdata); add_mesh_config(sdata); #endif break; case NL80211_IFTYPE_STATION: add_sta_files(sdata); break; case NL80211_IFTYPE_ADHOC: add_ibss_files(sdata); break; case NL80211_IFTYPE_AP: add_ap_files(sdata); break; case NL80211_IFTYPE_AP_VLAN: add_vlan_files(sdata); break; default: break; } } #undef DEBUGFS_ADD_MODE #undef DEBUGFS_ADD #define DEBUGFS_ADD_MODE(dentry, name, mode) \ debugfs_create_file(#name, mode, dentry, \ link, &link_##name##_ops) #define DEBUGFS_ADD(dentry, name) DEBUGFS_ADD_MODE(dentry, name, 0400) static void add_link_files(struct ieee80211_link_data *link, struct dentry *dentry) { DEBUGFS_ADD(dentry, txpower); DEBUGFS_ADD(dentry, user_power_level); DEBUGFS_ADD(dentry, ap_power_level); switch (link->sdata->vif.type) { case NL80211_IFTYPE_STATION: DEBUGFS_ADD_MODE(dentry, smps, 0600); break; default: break; } } static void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata, bool mld_vif) { char buf[10+IFNAMSIZ]; sprintf(buf, "netdev:%s", sdata->name); sdata->vif.debugfs_dir = debugfs_create_dir(buf, sdata->local->hw.wiphy->debugfsdir); /* deflink also has this */ sdata->deflink.debugfs_dir = sdata->vif.debugfs_dir; sdata->debugfs.subdir_stations = debugfs_create_dir("stations", sdata->vif.debugfs_dir); add_files(sdata); if (!mld_vif) add_link_files(&sdata->deflink, sdata->vif.debugfs_dir); } void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) { if (!sdata->vif.debugfs_dir) return; debugfs_remove_recursive(sdata->vif.debugfs_dir); sdata->vif.debugfs_dir = NULL; sdata->debugfs.subdir_stations = NULL; } void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) { debugfs_change_name(sdata->vif.debugfs_dir, "netdev:%s", sdata->name); } void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata, bool mld_vif) { ieee80211_debugfs_remove_netdev(sdata); ieee80211_debugfs_add_netdev(sdata, mld_vif); if (sdata->flags & IEEE80211_SDATA_IN_DRIVER) { drv_vif_add_debugfs(sdata->local, sdata); if (!mld_vif) ieee80211_link_debugfs_drv_add(&sdata->deflink); } } void ieee80211_link_debugfs_add(struct ieee80211_link_data *link) { char link_dir_name[10]; if (WARN_ON(!link->sdata->vif.debugfs_dir || link->debugfs_dir)) return; /* For now, this should not be called for non-MLO capable drivers */ if (WARN_ON(!(link->sdata->local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))) return; snprintf(link_dir_name, sizeof(link_dir_name), "link-%d", link->link_id); link->debugfs_dir = debugfs_create_dir(link_dir_name, link->sdata->vif.debugfs_dir); DEBUGFS_ADD(link->debugfs_dir, addr); add_link_files(link, link->debugfs_dir); } void ieee80211_link_debugfs_remove(struct ieee80211_link_data *link) { if (!link->sdata->vif.debugfs_dir || !link->debugfs_dir) { link->debugfs_dir = NULL; return; } if (link->debugfs_dir == link->sdata->vif.debugfs_dir) { WARN_ON(link != &link->sdata->deflink); link->debugfs_dir = NULL; return; } debugfs_remove_recursive(link->debugfs_dir); link->debugfs_dir = NULL; } void ieee80211_link_debugfs_drv_add(struct ieee80211_link_data *link) { if (link->sdata->vif.type == NL80211_IFTYPE_MONITOR || WARN_ON(!link->debugfs_dir)) return; drv_link_add_debugfs(link->sdata->local, link->sdata, link->conf, link->debugfs_dir); } void ieee80211_link_debugfs_drv_remove(struct ieee80211_link_data *link) { if (!link || !link->debugfs_dir) return; if (WARN_ON(link->debugfs_dir == link->sdata->vif.debugfs_dir)) return; /* Recreate the directory excluding the driver data */ debugfs_remove_recursive(link->debugfs_dir); link->debugfs_dir = NULL; ieee80211_link_debugfs_add(link); }
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ #include <linux/bitmap.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/idr.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/termios.h> #include <linux/wwan.h> #include <net/rtnetlink.h> #include <uapi/linux/wwan.h> /* Maximum number of minors in use */ #define WWAN_MAX_MINORS (1 << MINORBITS) static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */ static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ static const struct class wwan_class = { .name = "wwan", }; static int wwan_major; static struct dentry *wwan_debugfs_dir; #define to_wwan_dev(d) container_of(d, struct wwan_device, dev) #define to_wwan_port(d) container_of(d, struct wwan_port, dev) /* WWAN port flags */ #define WWAN_PORT_TX_OFF 0 /** * struct wwan_device - The structure that defines a WWAN device * * @id: WWAN device unique ID. * @dev: Underlying device. * @port_id: Current available port ID to pick. * @ops: wwan device ops * @ops_ctxt: context to pass to ops * @debugfs_dir: WWAN device debugfs dir */ struct wwan_device { unsigned int id; struct device dev; atomic_t port_id; const struct wwan_ops *ops; void *ops_ctxt; #ifdef CONFIG_WWAN_DEBUGFS struct dentry *debugfs_dir; #endif }; /** * struct wwan_port - The structure that defines a WWAN port * @type: Port type * @start_count: Port start counter * @flags: Store port state and capabilities * @ops: Pointer to WWAN port operations * @ops_lock: Protect port ops * @dev: Underlying device * @rxq: Buffer inbound queue * @waitqueue: The waitqueue for port fops (read/write/poll) * @data_lock: Port specific data access serialization * @headroom_len: SKB reserved headroom size * @frag_len: Length to fragment packet * @at_data: AT port specific data */ struct wwan_port { enum wwan_port_type type; unsigned int start_count; unsigned long flags; const struct wwan_port_ops *ops; struct mutex ops_lock; /* Serialize ops + protect against removal */ struct device dev; struct sk_buff_head rxq; wait_queue_head_t waitqueue; struct mutex data_lock; /* Port specific data access serialization */ size_t headroom_len; size_t frag_len; union { struct { struct ktermios termios; int mdmbits; } at_data; }; }; static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wwan_device *wwan = to_wwan_dev(dev); return sprintf(buf, "%d\n", wwan->id); } static DEVICE_ATTR_RO(index); static struct attribute *wwan_dev_attrs[] = { &dev_attr_index.attr, NULL, }; ATTRIBUTE_GROUPS(wwan_dev); static void wwan_dev_destroy(struct device *dev) { struct wwan_device *wwandev = to_wwan_dev(dev); ida_free(&wwan_dev_ids, wwandev->id); kfree(wwandev); } static const struct device_type wwan_dev_type = { .name = "wwan_dev", .release = wwan_dev_destroy, .groups = wwan_dev_groups, }; static int wwan_dev_parent_match(struct device *dev, const void *parent) { return (dev->type == &wwan_dev_type && (dev->parent == parent || dev == parent)); } static struct wwan_device *wwan_dev_get_by_parent(struct device *parent) { struct device *dev; dev = class_find_device(&wwan_class, NULL, parent, wwan_dev_parent_match); if (!dev) return ERR_PTR(-ENODEV); return to_wwan_dev(dev); } static int wwan_dev_name_match(struct device *dev, const void *name) { return dev->type == &wwan_dev_type && strcmp(dev_name(dev), name) == 0; } static struct wwan_device *wwan_dev_get_by_name(const char *name) { struct device *dev; dev = class_find_device(&wwan_class, NULL, name, wwan_dev_name_match); if (!dev) return ERR_PTR(-ENODEV); return to_wwan_dev(dev); } #ifdef CONFIG_WWAN_DEBUGFS struct dentry *wwan_get_debugfs_dir(struct device *parent) { struct wwan_device *wwandev; wwandev = wwan_dev_get_by_parent(parent); if (IS_ERR(wwandev)) return ERR_CAST(wwandev); return wwandev->debugfs_dir; } EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); static int wwan_dev_debugfs_match(struct device *dev, const void *dir) { struct wwan_device *wwandev; if (dev->type != &wwan_dev_type) return 0; wwandev = to_wwan_dev(dev); return wwandev->debugfs_dir == dir; } static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir) { struct device *dev; dev = class_find_device(&wwan_class, NULL, dir, wwan_dev_debugfs_match); if (!dev) return ERR_PTR(-ENODEV); return to_wwan_dev(dev); } void wwan_put_debugfs_dir(struct dentry *dir) { struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir); if (WARN_ON(IS_ERR(wwandev))) return; /* wwan_dev_get_by_debugfs() also got a reference */ put_device(&wwandev->dev); put_device(&wwandev->dev); } EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir); #endif /* This function allocates and registers a new WWAN device OR if a WWAN device * already exist for the given parent, it gets a reference and return it. * This function is not exported (for now), it is called indirectly via * wwan_create_port(). */ static struct wwan_device *wwan_create_dev(struct device *parent) { struct wwan_device *wwandev; int err, id; /* The 'find-alloc-register' operation must be protected against * concurrent execution, a WWAN device is possibly shared between * multiple callers or concurrently unregistered from wwan_remove_dev(). */ mutex_lock(&wwan_register_lock); /* If wwandev already exists, return it */ wwandev = wwan_dev_get_by_parent(parent); if (!IS_ERR(wwandev)) goto done_unlock; id = ida_alloc(&wwan_dev_ids, GFP_KERNEL); if (id < 0) { wwandev = ERR_PTR(id); goto done_unlock; } wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL); if (!wwandev) { wwandev = ERR_PTR(-ENOMEM); ida_free(&wwan_dev_ids, id); goto done_unlock; } wwandev->dev.parent = parent; wwandev->dev.class = &wwan_class; wwandev->dev.type = &wwan_dev_type; wwandev->id = id; dev_set_name(&wwandev->dev, "wwan%d", wwandev->id); err = device_register(&wwandev->dev); if (err) { put_device(&wwandev->dev); wwandev = ERR_PTR(err); goto done_unlock; } #ifdef CONFIG_WWAN_DEBUGFS wwandev->debugfs_dir = debugfs_create_dir(kobject_name(&wwandev->dev.kobj), wwan_debugfs_dir); #endif done_unlock: mutex_unlock(&wwan_register_lock); return wwandev; } static int is_wwan_child(struct device *dev, void *data) { return dev->class == &wwan_class; } static void wwan_remove_dev(struct wwan_device *wwandev) { int ret; /* Prevent concurrent picking from wwan_create_dev */ mutex_lock(&wwan_register_lock); /* WWAN device is created and registered (get+add) along with its first * child port, and subsequent port registrations only grab a reference * (get). The WWAN device must then be unregistered (del+put) along with * its last port, and reference simply dropped (put) otherwise. In the * same fashion, we must not unregister it when the ops are still there. */ if (wwandev->ops) ret = 1; else ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); if (!ret) { #ifdef CONFIG_WWAN_DEBUGFS debugfs_remove_recursive(wwandev->debugfs_dir); #endif device_unregister(&wwandev->dev); } else { put_device(&wwandev->dev); } mutex_unlock(&wwan_register_lock); } /* ------- WWAN port management ------- */ static const struct { const char * const name; /* Port type name */ const char * const devsuf; /* Port device name suffix */ } wwan_port_types[WWAN_PORT_MAX + 1] = { [WWAN_PORT_AT] = { .name = "AT", .devsuf = "at", }, [WWAN_PORT_MBIM] = { .name = "MBIM", .devsuf = "mbim", }, [WWAN_PORT_QMI] = { .name = "QMI", .devsuf = "qmi", }, [WWAN_PORT_QCDM] = { .name = "QCDM", .devsuf = "qcdm", }, [WWAN_PORT_FIREHOSE] = { .name = "FIREHOSE", .devsuf = "firehose", }, [WWAN_PORT_XMMRPC] = { .name = "XMMRPC", .devsuf = "xmmrpc", }, [WWAN_PORT_FASTBOOT] = { .name = "FASTBOOT", .devsuf = "fastboot", }, [WWAN_PORT_ADB] = { .name = "ADB", .devsuf = "adb", }, [WWAN_PORT_MIPC] = { .name = "MIPC", .devsuf = "mipc", }, }; static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wwan_port *port = to_wwan_port(dev); return sprintf(buf, "%s\n", wwan_port_types[port->type].name); } static DEVICE_ATTR_RO(type); static struct attribute *wwan_port_attrs[] = { &dev_attr_type.attr, NULL, }; ATTRIBUTE_GROUPS(wwan_port); static void wwan_port_destroy(struct device *dev) { struct wwan_port *port = to_wwan_port(dev); ida_free(&minors, MINOR(port->dev.devt)); mutex_destroy(&port->data_lock); mutex_destroy(&port->ops_lock); kfree(port); } static const struct device_type wwan_port_dev_type = { .name = "wwan_port", .release = wwan_port_destroy, .groups = wwan_port_groups, }; static int wwan_port_minor_match(struct device *dev, const void *minor) { return (dev->type == &wwan_port_dev_type && MINOR(dev->devt) == *(unsigned int *)minor); } static struct wwan_port *wwan_port_get_by_minor(unsigned int minor) { struct device *dev; dev = class_find_device(&wwan_class, NULL, &minor, wwan_port_minor_match); if (!dev) return ERR_PTR(-ENODEV); return to_wwan_port(dev); } /* Allocate and set unique name based on passed format * * Name allocation approach is highly inspired by the __dev_alloc_name() * function. * * To avoid names collision, the caller must prevent the new port device * registration as well as concurrent invocation of this function. */ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) { struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); const unsigned int max_ports = PAGE_SIZE * 8; struct class_dev_iter iter; unsigned long *idmap; struct device *dev; char buf[0x20]; int id; idmap = bitmap_zalloc(max_ports, GFP_KERNEL); if (!idmap) return -ENOMEM; /* Collect ids of same name format ports */ class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type); while ((dev = class_dev_iter_next(&iter))) { if (dev->parent != &wwandev->dev) continue; if (sscanf(dev_name(dev), fmt, &id) != 1) continue; if (id < 0 || id >= max_ports) continue; set_bit(id, idmap); } class_dev_iter_exit(&iter); /* Allocate unique id */ id = find_first_zero_bit(idmap, max_ports); bitmap_free(idmap); snprintf(buf, sizeof(buf), fmt, id); /* Name generation */ dev = device_find_child_by_name(&wwandev->dev, buf); if (dev) { put_device(dev); return -ENFILE; } return dev_set_name(&port->dev, "%s", buf); } struct wwan_port *wwan_create_port(struct device *parent, enum wwan_port_type type, const struct wwan_port_ops *ops, struct wwan_port_caps *caps, void *drvdata) { struct wwan_device *wwandev; struct wwan_port *port; char namefmt[0x20]; int minor, err; if (type > WWAN_PORT_MAX || !ops) return ERR_PTR(-EINVAL); /* A port is always a child of a WWAN device, retrieve (allocate or * pick) the WWAN device based on the provided parent device. */ wwandev = wwan_create_dev(parent); if (IS_ERR(wwandev)) return ERR_CAST(wwandev); /* A port is exposed as character device, get a minor */ minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); if (minor < 0) { err = minor; goto error_wwandev_remove; } port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) { err = -ENOMEM; ida_free(&minors, minor); goto error_wwandev_remove; } port->type = type; port->ops = ops; port->frag_len = caps ? caps->frag_len : SIZE_MAX; port->headroom_len = caps ? caps->headroom_len : 0; mutex_init(&port->ops_lock); skb_queue_head_init(&port->rxq); init_waitqueue_head(&port->waitqueue); mutex_init(&port->data_lock); port->dev.parent = &wwandev->dev; port->dev.class = &wwan_class; port->dev.type = &wwan_port_dev_type; port->dev.devt = MKDEV(wwan_major, minor); dev_set_drvdata(&port->dev, drvdata); /* allocate unique name based on wwan device id, port type and number */ snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id, wwan_port_types[port->type].devsuf); /* Serialize ports registration */ mutex_lock(&wwan_register_lock); __wwan_port_dev_assign_name(port, namefmt); err = device_register(&port->dev); mutex_unlock(&wwan_register_lock); if (err) goto error_put_device; dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev)); return port; error_put_device: put_device(&port->dev); error_wwandev_remove: wwan_remove_dev(wwandev); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(wwan_create_port); void wwan_remove_port(struct wwan_port *port) { struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); mutex_lock(&port->ops_lock); if (port->start_count) port->ops->stop(port); port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */ mutex_unlock(&port->ops_lock); wake_up_interruptible(&port->waitqueue); skb_queue_purge(&port->rxq); dev_set_drvdata(&port->dev, NULL); dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev)); device_unregister(&port->dev); /* Release related wwan device */ wwan_remove_dev(wwandev); } EXPORT_SYMBOL_GPL(wwan_remove_port); void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb) { skb_queue_tail(&port->rxq, skb); wake_up_interruptible(&port->waitqueue); } EXPORT_SYMBOL_GPL(wwan_port_rx); void wwan_port_txon(struct wwan_port *port) { clear_bit(WWAN_PORT_TX_OFF, &port->flags); wake_up_interruptible(&port->waitqueue); } EXPORT_SYMBOL_GPL(wwan_port_txon); void wwan_port_txoff(struct wwan_port *port) { set_bit(WWAN_PORT_TX_OFF, &port->flags); } EXPORT_SYMBOL_GPL(wwan_port_txoff); void *wwan_port_get_drvdata(struct wwan_port *port) { return dev_get_drvdata(&port->dev); } EXPORT_SYMBOL_GPL(wwan_port_get_drvdata); static int wwan_port_op_start(struct wwan_port *port) { int ret = 0; mutex_lock(&port->ops_lock); if (!port->ops) { /* Port got unplugged */ ret = -ENODEV; goto out_unlock; } /* If port is already started, don't start again */ if (!port->start_count) ret = port->ops->start(port); if (!ret) port->start_count++; out_unlock: mutex_unlock(&port->ops_lock); return ret; } static void wwan_port_op_stop(struct wwan_port *port) { mutex_lock(&port->ops_lock); port->start_count--; if (!port->start_count) { if (port->ops) port->ops->stop(port); skb_queue_purge(&port->rxq); } mutex_unlock(&port->ops_lock); } static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb, bool nonblock) { int ret; mutex_lock(&port->ops_lock); if (!port->ops) { /* Port got unplugged */ ret = -ENODEV; goto out_unlock; } if (nonblock || !port->ops->tx_blocking) ret = port->ops->tx(port, skb); else ret = port->ops->tx_blocking(port, skb); out_unlock: mutex_unlock(&port->ops_lock); return ret; } static bool is_read_blocked(struct wwan_port *port) { return skb_queue_empty(&port->rxq) && port->ops; } static bool is_write_blocked(struct wwan_port *port) { return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops; } static int wwan_wait_rx(struct wwan_port *port, bool nonblock) { if (!is_read_blocked(port)) return 0; if (nonblock) return -EAGAIN; if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port))) return -ERESTARTSYS; return 0; } static int wwan_wait_tx(struct wwan_port *port, bool nonblock) { if (!is_write_blocked(port)) return 0; if (nonblock) return -EAGAIN; if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port))) return -ERESTARTSYS; return 0; } static int wwan_port_fops_open(struct inode *inode, struct file *file) { struct wwan_port *port; int err = 0; port = wwan_port_get_by_minor(iminor(inode)); if (IS_ERR(port)) return PTR_ERR(port); file->private_data = port; stream_open(inode, file); err = wwan_port_op_start(port); if (err) put_device(&port->dev); return err; } static int wwan_port_fops_release(struct inode *inode, struct file *filp) { struct wwan_port *port = filp->private_data; wwan_port_op_stop(port); put_device(&port->dev); return 0; } static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct wwan_port *port = filp->private_data; struct sk_buff *skb; size_t copied; int ret; ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK)); if (ret) return ret; skb = skb_dequeue(&port->rxq); if (!skb) return -EIO; copied = min_t(size_t, count, skb->len); if (copy_to_user(buf, skb->data, copied)) { kfree_skb(skb); return -EFAULT; } skb_pull(skb, copied); /* skb is not fully consumed, keep it in the queue */ if (skb->len) skb_queue_head(&port->rxq, skb); else consume_skb(skb); return copied; } static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) { struct sk_buff *skb, *head = NULL, *tail = NULL; struct wwan_port *port = filp->private_data; size_t frag_len, remain = count; int ret; ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); if (ret) return ret; do { frag_len = min(remain, port->frag_len); skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto freeskb; } skb_reserve(skb, port->headroom_len); if (!head) { head = skb; } else if (!tail) { skb_shinfo(head)->frag_list = skb; tail = skb; } else { tail->next = skb; tail = skb; } if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) { ret = -EFAULT; goto freeskb; } if (skb != head) { head->data_len += skb->len; head->len += skb->len; head->truesize += skb->truesize; } } while (remain -= frag_len); ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK)); if (!ret) return count; freeskb: kfree_skb(head); return ret; } static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) { struct wwan_port *port = filp->private_data; __poll_t mask = 0; poll_wait(filp, &port->waitqueue, wait); mutex_lock(&port->ops_lock); if (port->ops && port->ops->tx_poll) mask |= port->ops->tx_poll(port, filp, wait); else if (!is_write_blocked(port)) mask |= EPOLLOUT | EPOLLWRNORM; if (!is_read_blocked(port)) mask |= EPOLLIN | EPOLLRDNORM; if (!port->ops) mask |= EPOLLHUP | EPOLLERR; mutex_unlock(&port->ops_lock); return mask; } /* Implements minimalistic stub terminal IOCTLs support */ static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd, unsigned long arg) { int ret = 0; mutex_lock(&port->data_lock); switch (cmd) { case TCFLSH: break; case TCGETS: if (copy_to_user((void __user *)arg, &port->at_data.termios, sizeof(struct termios))) ret = -EFAULT; break; case TCSETS: case TCSETSW: case TCSETSF: if (copy_from_user(&port->at_data.termios, (void __user *)arg, sizeof(struct termios))) ret = -EFAULT; break; #ifdef TCGETS2 case TCGETS2: if (copy_to_user((void __user *)arg, &port->at_data.termios, sizeof(struct termios2))) ret = -EFAULT; break; case TCSETS2: case TCSETSW2: case TCSETSF2: if (copy_from_user(&port->at_data.termios, (void __user *)arg, sizeof(struct termios2))) ret = -EFAULT; break; #endif case TIOCMGET: ret = put_user(port->at_data.mdmbits, (int __user *)arg); break; case TIOCMSET: case TIOCMBIC: case TIOCMBIS: { int mdmbits; if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) { ret = -EFAULT; break; } if (cmd == TIOCMBIC) port->at_data.mdmbits &= ~mdmbits; else if (cmd == TIOCMBIS) port->at_data.mdmbits |= mdmbits; else port->at_data.mdmbits = mdmbits; break; } default: ret = -ENOIOCTLCMD; } mutex_unlock(&port->data_lock); return ret; } static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct wwan_port *port = filp->private_data; int res; if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */ res = wwan_port_fops_at_ioctl(port, cmd, arg); if (res != -ENOIOCTLCMD) return res; } switch (cmd) { case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */ unsigned long flags; struct sk_buff *skb; int amount = 0; spin_lock_irqsave(&port->rxq.lock, flags); skb_queue_walk(&port->rxq, skb) amount += skb->len; spin_unlock_irqrestore(&port->rxq.lock, flags); return put_user(amount, (int __user *)arg); } default: return -ENOIOCTLCMD; } } static const struct file_operations wwan_port_fops = { .owner = THIS_MODULE, .open = wwan_port_fops_open, .release = wwan_port_fops_release, .read = wwan_port_fops_read, .write = wwan_port_fops_write, .poll = wwan_port_fops_poll, .unlocked_ioctl = wwan_port_fops_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_ptr_ioctl, #endif .llseek = noop_llseek, }; static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (!data) return -EINVAL; if (!tb[IFLA_PARENT_DEV_NAME]) return -EINVAL; if (!data[IFLA_WWAN_LINK_ID]) return -EINVAL; return 0; } static const struct device_type wwan_type = { .name = "wwan" }; static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[], const char *ifname, unsigned char name_assign_type, unsigned int num_tx_queues, unsigned int num_rx_queues) { const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]); struct wwan_device *wwandev = wwan_dev_get_by_name(devname); struct net_device *dev; unsigned int priv_size; if (IS_ERR(wwandev)) return ERR_CAST(wwandev); /* only supported if ops were registered (not just ports) */ if (!wwandev->ops) { dev = ERR_PTR(-EOPNOTSUPP); goto out; } priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size; dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type, wwandev->ops->setup, num_tx_queues, num_rx_queues); if (dev) { SET_NETDEV_DEV(dev, &wwandev->dev); SET_NETDEV_DEVTYPE(dev, &wwan_type); } out: /* release the reference */ put_device(&wwandev->dev); return dev; } static int wwan_rtnl_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent); struct wwan_netdev_priv *priv = netdev_priv(dev); struct nlattr **data = params->data; u32 link_id; int ret; link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]); if (IS_ERR(wwandev)) return PTR_ERR(wwandev); /* shouldn't have a netdev (left) with us as parent so WARN */ if (WARN_ON(!wwandev->ops)) { ret = -EOPNOTSUPP; goto out; } priv->link_id = link_id; if (wwandev->ops->newlink) ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev, link_id, extack); else ret = register_netdevice(dev); out: /* release the reference */ put_device(&wwandev->dev); return ret; } static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head) { struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent); if (IS_ERR(wwandev)) return; /* shouldn't have a netdev (left) with us as parent so WARN */ if (WARN_ON(!wwandev->ops)) goto out; if (wwandev->ops->dellink) wwandev->ops->dellink(wwandev->ops_ctxt, dev, head); else unregister_netdevice_queue(dev, head); out: /* release the reference */ put_device(&wwandev->dev); } static size_t wwan_rtnl_get_size(const struct net_device *dev) { return nla_total_size(4) + /* IFLA_WWAN_LINK_ID */ 0; } static int wwan_rtnl_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct wwan_netdev_priv *priv = netdev_priv(dev); if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = { [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 }, }; static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = { .kind = "wwan", .maxtype = IFLA_WWAN_MAX, .alloc = wwan_rtnl_alloc, .validate = wwan_rtnl_validate, .newlink = wwan_rtnl_newlink, .dellink = wwan_rtnl_dellink, .get_size = wwan_rtnl_get_size, .fill_info = wwan_rtnl_fill_info, .policy = wwan_rtnl_policy, }; static void wwan_create_default_link(struct wwan_device *wwandev, u32 def_link_id) { struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1]; struct nlattr *data[IFLA_WWAN_MAX + 1]; struct rtnl_newlink_params params = { .src_net = &init_net, .tb = tb, .data = data, }; struct net_device *dev; struct nlmsghdr *nlh; struct sk_buff *msg; /* Forge attributes required to create a WWAN netdev. We first * build a netlink message and then parse it. This looks * odd, but such approach is less error prone. */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (WARN_ON(!msg)) return; nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0); if (WARN_ON(!nlh)) goto free_attrs; if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev))) goto free_attrs; tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO); if (!tb[IFLA_LINKINFO]) goto free_attrs; linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA); if (!linkinfo[IFLA_INFO_DATA]) goto free_attrs; if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id)) goto free_attrs; nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]); nla_nest_end(msg, tb[IFLA_LINKINFO]); nlmsg_end(msg, nlh); /* The next three parsing calls can not fail */ nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL); nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO], NULL, NULL); nla_parse_nested_deprecated(data, IFLA_WWAN_MAX, linkinfo[IFLA_INFO_DATA], NULL, NULL); rtnl_lock(); dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM, &wwan_rtnl_link_ops, tb, NULL); if (WARN_ON(IS_ERR(dev))) goto unlock; if (WARN_ON(wwan_rtnl_newlink(dev, &params, NULL))) { free_netdev(dev); goto unlock; } rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */ unlock: rtnl_unlock(); free_attrs: nlmsg_free(msg); } /** * wwan_register_ops - register WWAN device ops * @parent: Device to use as parent and shared by all WWAN ports and * created netdevs * @ops: operations to register * @ctxt: context to pass to operations * @def_link_id: id of the default link that will be automatically created by * the WWAN core for the WWAN device. The default link will not be created * if the passed value is WWAN_NO_DEFAULT_LINK. * * Returns: 0 on success, a negative error code on failure */ int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, void *ctxt, u32 def_link_id) { struct wwan_device *wwandev; if (WARN_ON(!parent || !ops || !ops->setup)) return -EINVAL; wwandev = wwan_create_dev(parent); if (IS_ERR(wwandev)) return PTR_ERR(wwandev); if (WARN_ON(wwandev->ops)) { wwan_remove_dev(wwandev); return -EBUSY; } wwandev->ops = ops; wwandev->ops_ctxt = ctxt; /* NB: we do not abort ops registration in case of default link * creation failure. Link ops is the management interface, while the * default link creation is a service option. And we should not prevent * a user from manually creating a link latter if service option failed * now. */ if (def_link_id != WWAN_NO_DEFAULT_LINK) wwan_create_default_link(wwandev, def_link_id); return 0; } EXPORT_SYMBOL_GPL(wwan_register_ops); /* Enqueue child netdev deletion */ static int wwan_child_dellink(struct device *dev, void *data) { struct list_head *kill_list = data; if (dev->type == &wwan_type) wwan_rtnl_dellink(to_net_dev(dev), kill_list); return 0; } /** * wwan_unregister_ops - remove WWAN device ops * @parent: Device to use as parent and shared by all WWAN ports and * created netdevs */ void wwan_unregister_ops(struct device *parent) { struct wwan_device *wwandev = wwan_dev_get_by_parent(parent); LIST_HEAD(kill_list); if (WARN_ON(IS_ERR(wwandev))) return; if (WARN_ON(!wwandev->ops)) { put_device(&wwandev->dev); return; } /* put the reference obtained by wwan_dev_get_by_parent(), * we should still have one (that the owner is giving back * now) due to the ops being assigned. */ put_device(&wwandev->dev); rtnl_lock(); /* Prevent concurrent netdev(s) creation/destroying */ /* Remove all child netdev(s), using batch removing */ device_for_each_child(&wwandev->dev, &kill_list, wwan_child_dellink); unregister_netdevice_many(&kill_list); wwandev->ops = NULL; /* Finally remove ops */ rtnl_unlock(); wwandev->ops_ctxt = NULL; wwan_remove_dev(wwandev); } EXPORT_SYMBOL_GPL(wwan_unregister_ops); static int __init wwan_init(void) { int err; err = rtnl_link_register(&wwan_rtnl_link_ops); if (err) return err; err = class_register(&wwan_class); if (err) goto unregister; /* chrdev used for wwan ports */ wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port", &wwan_port_fops); if (wwan_major < 0) { err = wwan_major; goto destroy; } #ifdef CONFIG_WWAN_DEBUGFS wwan_debugfs_dir = debugfs_create_dir("wwan", NULL); #endif return 0; destroy: class_unregister(&wwan_class); unregister: rtnl_link_unregister(&wwan_rtnl_link_ops); return err; } static void __exit wwan_exit(void) { debugfs_remove_recursive(wwan_debugfs_dir); __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port"); rtnl_link_unregister(&wwan_rtnl_link_ops); class_unregister(&wwan_class); } module_init(wwan_init); module_exit(wwan_exit); MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); MODULE_DESCRIPTION("WWAN core"); MODULE_LICENSE("GPL v2");
6 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 /* * include/net/tipc.h: Include file for TIPC message header routines * * Copyright (c) 2017 Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _TIPC_HDR_H #define _TIPC_HDR_H #include <linux/random.h> #define KEEPALIVE_MSG_MASK 0x0e080000 /* LINK_PROTOCOL + MSG_IS_KEEPALIVE */ struct tipc_basic_hdr { __be32 w[4]; }; static inline __be32 tipc_hdr_rps_key(struct tipc_basic_hdr *hdr) { u32 w0 = ntohl(hdr->w[0]); bool keepalive_msg = (w0 & KEEPALIVE_MSG_MASK) == KEEPALIVE_MSG_MASK; __be32 key; /* Return source node identity as key */ if (likely(!keepalive_msg)) return hdr->w[3]; /* Spread PROBE/PROBE_REPLY messages across the cores */ get_random_bytes(&key, sizeof(key)); return key; } #endif
531 531 202 530 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 // SPDX-License-Identifier: GPL-2.0 /* * Implement mseal() syscall. * * Copyright (c) 2023,2024 Google, Inc. * * Author: Jeff Xu <jeffxu@chromium.org> */ #include <linux/mempolicy.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/mmu_context.h> #include <linux/syscalls.h> #include <linux/sched.h> #include "internal.h" static inline void set_vma_sealed(struct vm_area_struct *vma) { vm_flags_set(vma, VM_SEALED); } static bool is_madv_discard(int behavior) { switch (behavior) { case MADV_FREE: case MADV_DONTNEED: case MADV_DONTNEED_LOCKED: case MADV_REMOVE: case MADV_DONTFORK: case MADV_WIPEONFORK: case MADV_GUARD_INSTALL: return true; } return false; } static bool is_ro_anon(struct vm_area_struct *vma) { /* check anonymous mapping. */ if (vma->vm_file || vma->vm_flags & VM_SHARED) return false; /* * check for non-writable: * PROT=RO or PKRU is not writeable. */ if (!(vma->vm_flags & VM_WRITE) || !arch_vma_access_permitted(vma, true, false, false)) return true; return false; } /* * Check if a vma is allowed to be modified by madvise. */ bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) { if (!is_madv_discard(behavior)) return true; if (unlikely(!can_modify_vma(vma) && is_ro_anon(vma))) return false; /* Allow by default. */ return true; } static int mseal_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { int ret = 0; vm_flags_t oldflags = vma->vm_flags; if (newflags == oldflags) goto out; vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } set_vma_sealed(vma); out: *prev = vma; return ret; } /* * Check for do_mseal: * 1> start is part of a valid vma. * 2> end is part of a valid vma. * 3> No gap (unallocated address) between start and end. * 4> map is sealable. */ static int check_mm_seal(unsigned long start, unsigned long end) { struct vm_area_struct *vma; unsigned long nstart = start; VMA_ITERATOR(vmi, current->mm, start); /* going through each vma to check. */ for_each_vma_range(vmi, vma, end) { if (vma->vm_start > nstart) /* unallocated memory found. */ return -ENOMEM; if (vma->vm_end >= end) return 0; nstart = vma->vm_end; } return -ENOMEM; } /* * Apply sealing. */ static int apply_mm_seal(unsigned long start, unsigned long end) { unsigned long nstart; struct vm_area_struct *vma, *prev; VMA_ITERATOR(vmi, current->mm, start); vma = vma_iter_load(&vmi); /* * Note: check_mm_seal should already checked ENOMEM case. * so vma should not be null, same for the other ENOMEM cases. */ prev = vma_prev(&vmi); if (start > vma->vm_start) prev = vma; nstart = start; for_each_vma_range(vmi, vma, end) { int error; unsigned long tmp; vm_flags_t newflags; newflags = vma->vm_flags | VM_SEALED; tmp = vma->vm_end; if (tmp > end) tmp = end; error = mseal_fixup(&vmi, vma, &prev, nstart, tmp, newflags); if (error) return error; nstart = vma_iter_end(&vmi); } return 0; } /* * mseal(2) seals the VM's meta data from * selected syscalls. * * addr/len: VM address range. * * The address range by addr/len must meet: * start (addr) must be in a valid VMA. * end (addr + len) must be in a valid VMA. * no gap (unallocated memory) between start and end. * start (addr) must be page aligned. * * len: len will be page aligned implicitly. * * Below VMA operations are blocked after sealing. * 1> Unmapping, moving to another location, and shrinking * the size, via munmap() and mremap(), can leave an empty * space, therefore can be replaced with a VMA with a new * set of attributes. * 2> Moving or expanding a different vma into the current location, * via mremap(). * 3> Modifying a VMA via mmap(MAP_FIXED). * 4> Size expansion, via mremap(), does not appear to pose any * specific risks to sealed VMAs. It is included anyway because * the use case is unclear. In any case, users can rely on * merging to expand a sealed VMA. * 5> mprotect and pkey_mprotect. * 6> Some destructive madvice() behavior (e.g. MADV_DONTNEED) * for anonymous memory, when users don't have write permission to the * memory. Those behaviors can alter region contents by discarding pages, * effectively a memset(0) for anonymous memory. * * flags: reserved. * * return values: * zero: success. * -EINVAL: * invalid input flags. * start address is not page aligned. * Address arange (start + len) overflow. * -ENOMEM: * addr is not a valid address (not allocated). * end (start + len) is not a valid address. * a gap (unallocated memory) between start and end. * -EPERM: * - In 32 bit architecture, sealing is not supported. * Note: * user can call mseal(2) multiple times, adding a seal on an * already sealed memory is a no-action (no error). * * unseal() is not supported. */ int do_mseal(unsigned long start, size_t len_in, unsigned long flags) { size_t len; int ret = 0; unsigned long end; struct mm_struct *mm = current->mm; /* Verify flags not set. */ if (flags) return -EINVAL; start = untagged_addr(start); if (!PAGE_ALIGNED(start)) return -EINVAL; len = PAGE_ALIGN(len_in); /* Check to see whether len was rounded up from small -ve to zero. */ if (len_in && !len) return -EINVAL; end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; if (mmap_write_lock_killable(mm)) return -EINTR; /* * First pass, this helps to avoid * partial sealing in case of error in input address range, * e.g. ENOMEM error. */ ret = check_mm_seal(start, end); if (ret) goto out; /* * Second pass, this should success, unless there are errors * from vma_modify_flags, e.g. merge/split error, or process * reaching the max supported VMAs, however, those cases shall * be rare. */ ret = apply_mm_seal(start, end); out: mmap_write_unlock(current->mm); return ret; } SYSCALL_DEFINE3(mseal, unsigned long, start, size_t, len, unsigned long, flags) { return do_mseal(start, len, flags); }
8 2 322 5916 199 273 273 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 /* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_PERF_EVENT_H #define _LINUX_PERF_EVENT_H #include <uapi/linux/perf_event.h> #include <uapi/linux/bpf_perf_event.h> /* * Kernel-internal data types and definitions: */ #ifdef CONFIG_PERF_EVENTS # include <asm/perf_event.h> # include <asm/local64.h> #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT # include <linux/rhashtable-types.h> # include <asm/hw_breakpoint.h> #endif #include <linux/list.h> #include <linux/mutex.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/fs.h> #include <linux/pid_namespace.h> #include <linux/workqueue.h> #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/irq_work.h> #include <linux/static_key.h> #include <linux/jump_label_ratelimit.h> #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> #include <linux/cgroup.h> #include <linux/refcount.h> #include <linux/security.h> #include <linux/static_call.h> #include <linux/lockdep.h> #include <asm/local.h> struct perf_callchain_entry { u64 nr; u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { struct perf_callchain_entry *entry; u32 max_stack; u32 nr; short contexts; bool contexts_maxed; }; typedef unsigned long (*perf_copy_f)(void *dst, const void *src, unsigned long off, unsigned long len); struct perf_raw_frag { union { struct perf_raw_frag *next; unsigned long pad; }; perf_copy_f copy; void *data; u32 size; } __packed; struct perf_raw_record { struct perf_raw_frag frag; u32 size; }; static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) { return frag->pad < sizeof(u64); } /* * branch stack layout: * nr: number of taken branches stored in entries[] * hw_idx: The low level index of raw branch records * for the most recent branch. * -1ULL means invalid/unknown. * * Note that nr can vary from sample to sample * branches (to, from) are stored from most recent * to least recent, i.e., entries[0] contains the most * recent branch. * The entries[] is an abstraction of raw branch records, * which may not be stored in age order in HW, e.g. Intel LBR. * The hw_idx is to expose the low level index of raw * branch record for the most recent branch aka entries[0]. * The hw_idx index is between -1 (unknown) and max depth, * which can be retrieved in /sys/devices/cpu/caps/branches. * For the architectures whose raw branch records are * already stored in age order, the hw_idx should be 0. */ struct perf_branch_stack { u64 nr; u64 hw_idx; struct perf_branch_entry entries[]; }; struct task_struct; /* * extra PMU register associated with an event */ struct hw_perf_event_extra { u64 config; /* register value */ unsigned int reg; /* register address or index */ int alloc; /* extra register already allocated */ int idx; /* index in shared_regs->regs[] */ }; /** * hw_perf_event::flag values * * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific * usage. */ #define PERF_EVENT_FLAG_ARCH 0x0fffffff #define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000 static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0); /** * struct hw_perf_event - performance event hardware details: */ struct hw_perf_event { #ifdef CONFIG_PERF_EVENTS union { struct { /* hardware */ u64 config; u64 config1; u64 last_tag; u64 dyn_constraint; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; int idx; int last_cpu; int flags; struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; }; struct { /* aux / Intel-PT */ u64 aux_config; /* * For AUX area events, aux_paused cannot be a state * flag because it can be updated asynchronously to * state. */ unsigned int aux_paused; }; struct { /* software */ struct hrtimer hrtimer; }; struct { /* tracepoint */ /* for tp_event->class */ struct list_head tp_list; }; struct { /* amd_power */ u64 pwr_acc; u64 ptsc; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct { /* breakpoint */ /* * Crufty hack to avoid the chicken and egg * problem hw_breakpoint has with context * creation and event initalization. */ struct arch_hw_breakpoint info; struct rhlist_head bp_list; }; #endif struct { /* amd_iommu */ u8 iommu_bank; u8 iommu_cntr; u16 padding; u64 conf; u64 conf1; }; }; /* * If the event is a per task event, this will point to the task in * question. See the comment in perf_event_alloc(). */ struct task_struct *target; /* * PMU would store hardware filter configuration * here. */ void *addr_filters; /* Last sync'ed generation of filters */ unsigned long addr_filters_gen; /* * hw_perf_event::state flags; used to track the PERF_EF_* state. */ /* the counter is stopped */ #define PERF_HES_STOPPED 0x01 /* event->count up-to-date */ #define PERF_HES_UPTODATE 0x02 #define PERF_HES_ARCH 0x04 int state; /* * The last observed hardware counter value, updated with a * local64_cmpxchg() such that pmu::read() can be called nested. */ local64_t prev_count; /* * The period to start the next sample with. */ u64 sample_period; union { struct { /* Sampling */ /* * The period we started this sample with. */ u64 last_period; /* * However much is left of the current period; * note that this is a full 64bit value and * allows for generation of periods longer * than hardware might allow. */ local64_t period_left; }; struct { /* Topdown events counting for context switch */ u64 saved_metric; u64 saved_slots; }; }; /* * State for throttling the event, see __perf_event_overflow() and * perf_adjust_freq_unthr_context(). */ u64 interrupts_seq; u64 interrupts; /* * State for freq target events, see __perf_event_overflow() and * perf_adjust_freq_unthr_context(). */ u64 freq_time_stamp; u64 freq_count_stamp; #endif /* CONFIG_PERF_EVENTS */ }; struct perf_event; struct perf_event_pmu_context; /* * Common implementation detail of pmu::{start,commit,cancel}_txn */ /* txn to add/schedule event on PMU */ #define PERF_PMU_TXN_ADD 0x1 /* txn to read event group from PMU */ #define PERF_PMU_TXN_READ 0x2 /** * pmu::capabilities flags */ #define PERF_PMU_CAP_NO_INTERRUPT 0x0001 #define PERF_PMU_CAP_NO_NMI 0x0002 #define PERF_PMU_CAP_AUX_NO_SG 0x0004 #define PERF_PMU_CAP_EXTENDED_REGS 0x0008 #define PERF_PMU_CAP_EXCLUSIVE 0x0010 #define PERF_PMU_CAP_ITRACE 0x0020 #define PERF_PMU_CAP_NO_EXCLUDE 0x0040 #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 #define PERF_PMU_CAP_AUX_PAUSE 0x0200 #define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400 /** * pmu::scope */ enum perf_pmu_scope { PERF_PMU_SCOPE_NONE = 0, PERF_PMU_SCOPE_CORE, PERF_PMU_SCOPE_DIE, PERF_PMU_SCOPE_CLUSTER, PERF_PMU_SCOPE_PKG, PERF_PMU_SCOPE_SYS_WIDE, PERF_PMU_MAX_SCOPE, }; struct perf_output_handle; #define PMU_NULL_DEV ((void *)(~0UL)) /** * struct pmu - generic performance monitoring unit */ struct pmu { struct list_head entry; spinlock_t events_lock; struct list_head events; struct module *module; struct device *dev; struct device *parent; const struct attribute_group **attr_groups; const struct attribute_group **attr_update; const char *name; int type; /* * various common per-pmu feature flags */ int capabilities; /* * PMU scope */ unsigned int scope; struct perf_cpu_pmu_context * __percpu *cpu_pmu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; /* number of address filters this PMU can do */ unsigned int nr_addr_filters; /* * Fully disable/enable this PMU, can be used to protect from the PMI * as well as for lazy/batch writing of the MSRs. */ void (*pmu_enable) (struct pmu *pmu); /* optional */ void (*pmu_disable) (struct pmu *pmu); /* optional */ /* * Try and initialize the event for this PMU. * * Returns: * -ENOENT -- @event is not for this PMU * * -ENODEV -- @event is for this PMU but PMU not present * -EBUSY -- @event is for this PMU but PMU temporarily unavailable * -EINVAL -- @event is for this PMU but @event is not valid * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported * -EACCES -- @event is for this PMU, @event is valid, but no privileges * * 0 -- @event is for this PMU and valid * * Other error return values are allowed. */ int (*event_init) (struct perf_event *event); /* * Notification that the event was mapped or unmapped. Called * in the context of the mapping task. */ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ /* * Flags for ->add()/->del()/ ->start()/->stop(). There are * matching hw_perf_event::state flags. */ /* start the counter when adding */ #define PERF_EF_START 0x01 /* reload the counter when starting */ #define PERF_EF_RELOAD 0x02 /* update the counter when stopping */ #define PERF_EF_UPDATE 0x04 /* AUX area event, pause tracing */ #define PERF_EF_PAUSE 0x08 /* AUX area event, resume tracing */ #define PERF_EF_RESUME 0x10 /* * Adds/Removes a counter to/from the PMU, can be done inside a * transaction, see the ->*_txn() methods. * * The add/del callbacks will reserve all hardware resources required * to service the event, this includes any counter constraint * scheduling etc. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on. * * ->add() called without PERF_EF_START should result in the same state * as ->add() followed by ->stop(). * * ->del() must always PERF_EF_UPDATE stop an event. If it calls * ->stop() that must deal with already being stopped without * PERF_EF_UPDATE. */ int (*add) (struct perf_event *event, int flags); void (*del) (struct perf_event *event, int flags); /* * Starts/Stops a counter present on the PMU. * * The PMI handler should stop the counter when perf_event_overflow() * returns !0. ->start() will be used to continue. * * Also used to change the sample period. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on -- will be called from NMI context with the PMU generates * NMIs. * * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. * * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with * PERF_EF_RESUME. * * ->start() with PERF_EF_RESUME will start as simply as possible but * only if the counter is not otherwise stopped. Will not overlap * another ->start() with PERF_EF_RESUME nor ->stop() with * PERF_EF_PAUSE. * * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other * ->stop()/->start() invocations, just not itself. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); /* * Updates the counter value of the event. * * For sampling capable PMUs this will also update the software period * hw_perf_event::period_left field. */ void (*read) (struct perf_event *event); /* * Group events scheduling is treated as a transaction, add * group events as a whole and perform one schedulability test. * If the test fails, roll back the whole group * * Start the transaction, after this ->add() doesn't need to * do schedulability tests. * * Optional. */ void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. * * Optional. */ int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. * * Optional. */ void (*cancel_txn) (struct pmu *pmu); /* * Will return the value for perf_event_mmap_page::index for this event, * if no implementation is provided it will default to 0 (see * perf_event_idx_default). */ int (*event_idx) (struct perf_event *event); /*optional */ /* * context-switches callback */ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx, struct task_struct *task, bool sched_in); /* * Kmem cache of PMU specific data */ struct kmem_cache *task_ctx_cache; /* * Set up pmu-private data structures for an AUX area */ void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ /* * Free pmu-private AUX data structures */ void (*free_aux) (void *aux); /* optional */ /* * Take a snapshot of the AUX buffer without touching the event * state, so that preempting ->start()/->stop() callbacks does * not interfere with their logic. Called in PMI context. * * Returns the size of AUX data copied to the output handle. * * Optional. */ long (*snapshot_aux) (struct perf_event *event, struct perf_output_handle *handle, unsigned long size); /* * Validate address range filters: make sure the HW supports the * requested configuration and number of filters; return 0 if the * supplied filters are valid, -errno otherwise. * * Runs in the context of the ioctl()ing process and is not serialized * with the rest of the PMU callbacks. */ int (*addr_filters_validate) (struct list_head *filters); /* optional */ /* * Synchronize address range filter configuration: * translate hw-agnostic filters into hardware configuration in * event::hw::addr_filters. * * Runs as a part of filter sync sequence that is done in ->start() * callback by calling perf_event_addr_filters_sync(). * * May (and should) traverse event::addr_filters::list, for which its * caller provides necessary serialization. */ void (*addr_filters_sync) (struct perf_event *event); /* optional */ /* * Check if event can be used for aux_output purposes for * events of this PMU. * * Runs from perf_event_open(). Should return 0 for "no match" * or non-zero for "match". */ int (*aux_output_match) (struct perf_event *event); /* optional */ /* * Skip programming this PMU on the given CPU. Typically needed for * big.LITTLE things. */ bool (*filter) (struct pmu *pmu, int cpu); /* optional */ /* * Check period value for PERF_EVENT_IOC_PERIOD ioctl. */ int (*check_period) (struct perf_event *event, u64 value); /* optional */ }; enum perf_addr_filter_action_t { PERF_ADDR_FILTER_ACTION_STOP = 0, PERF_ADDR_FILTER_ACTION_START, PERF_ADDR_FILTER_ACTION_FILTER, }; /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop * * This is a hardware-agnostic filter configuration as specified by the user. */ struct perf_addr_filter { struct list_head entry; struct path path; unsigned long offset; unsigned long size; enum perf_addr_filter_action_t action; }; /** * struct perf_addr_filters_head - container for address range filters * @list: list of filters for this event * @lock: spinlock that serializes accesses to the @list and event's * (and its children's) filter generations. * @nr_file_filters: number of file-based filters * * A child event will use parent's @list (and therefore @lock), so they are * bundled together; see perf_event_addr_filters(). */ struct perf_addr_filters_head { struct list_head list; raw_spinlock_t lock; unsigned int nr_file_filters; }; struct perf_addr_filter_range { unsigned long start; unsigned long size; }; /** * enum perf_event_state - the states of an event: */ enum perf_event_state { PERF_EVENT_STATE_DEAD = -5, PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */ PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */ PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */ PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1, }; struct file; struct perf_sample_data; typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *regs); /* * Event capabilities. For event_caps and groups caps. * * PERF_EV_CAP_SOFTWARE: Is a software event. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read * from any CPU in the package where it is active. * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and * cannot be a group leader. If an event with this flag is detached from the * group it is scheduled out and moved into an unrecoverable ERROR state. * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the * PMU scope where it is active. */ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) #define PERF_EV_CAP_SIBLING BIT(2) #define PERF_EV_CAP_READ_SCOPE BIT(3) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) struct swevent_hlist { struct hlist_head heads[SWEVENT_HLIST_SIZE]; struct rcu_head rcu_head; }; #define PERF_ATTACH_CONTEXT 0x0001 #define PERF_ATTACH_GROUP 0x0002 #define PERF_ATTACH_TASK 0x0004 #define PERF_ATTACH_TASK_DATA 0x0008 #define PERF_ATTACH_GLOBAL_DATA 0x0010 #define PERF_ATTACH_SCHED_CB 0x0020 #define PERF_ATTACH_CHILD 0x0040 #define PERF_ATTACH_EXCLUSIVE 0x0080 #define PERF_ATTACH_CALLCHAIN 0x0100 #define PERF_ATTACH_ITRACE 0x0200 struct bpf_prog; struct perf_cgroup; struct perf_buffer; struct pmu_event_list { raw_spinlock_t lock; struct list_head list; }; /* * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex * as such iteration must hold either lock. However, since ctx->lock is an IRQ * safe lock, and is only held by the CPU doing the modification, having IRQs * disabled is sufficient since it will hold-off the IPIs. */ #ifdef CONFIG_PROVE_LOCKING # define lockdep_assert_event_ctx(event) \ WARN_ON_ONCE(__lockdep_enabled && \ (this_cpu_read(hardirqs_enabled) && \ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD)) #else # define lockdep_assert_event_ctx(event) #endif #define for_each_sibling_event(sibling, event) \ lockdep_assert_event_ctx(event); \ if ((event)->group_leader == (event)) \ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) /** * struct perf_event - performance event kernel representation: */ struct perf_event { #ifdef CONFIG_PERF_EVENTS /* * entry onto perf_event_context::event_list; * modifications require ctx->lock * RCU safe iterations. */ struct list_head event_entry; /* * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read. */ struct list_head sibling_list; struct list_head active_list; /* * Node on the pinned or flexible tree located at the event context; */ struct rb_node group_node; u64 group_index; /* * We need storage to track the entries in perf_pmu_migrate_context; we * cannot use the event_entry because of RCU and we want to keep the * group in tact which avoids us using the other two entries. */ struct list_head migrate_entry; struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings; /* Not serialized. Only written during event initialization. */ int event_caps; /* The cumulative AND of all event_caps for events in this group. */ int group_caps; unsigned int group_generation; struct perf_event *group_leader; /* * event->pmu will always point to pmu in which this event belongs. * Whereas event->pmu_ctx->pmu may point to other pmu when group of * different pmu events is created. */ struct pmu *pmu; void *pmu_private; enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; /* * These are the total time in nanoseconds that the event * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. */ u64 total_time_enabled; u64 total_time_running; u64 tstamp; struct perf_event_attr attr; u16 header_size; u16 id_header_size; u16 read_size; struct hw_perf_event hw; struct perf_event_context *ctx; /* * event->pmu_ctx points to perf_event_pmu_context in which the event * is added. This pmu_ctx can be of other pmu for sw event when that * sw event is part of a group which also contains non-sw events. */ struct perf_event_pmu_context *pmu_ctx; atomic_long_t refcount; /* * These accumulate total time (in nanoseconds) that children * events have been enabled and running, respectively. */ atomic64_t child_total_time_enabled; atomic64_t child_total_time_running; /* * Protect attach/detach and child_list: */ struct mutex child_mutex; struct list_head child_list; struct perf_event *parent; int oncpu; int cpu; struct list_head owner_entry; struct task_struct *owner; /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; struct perf_buffer *rb; struct list_head rb_entry; unsigned long rcu_batches; int rcu_pending; /* poll related */ wait_queue_head_t waitq; struct fasync_struct *fasync; /* delayed work for NMIs and such */ unsigned int pending_wakeup; unsigned int pending_kill; unsigned int pending_disable; unsigned long pending_addr; /* SIGTRAP */ struct irq_work pending_irq; struct irq_work pending_disable_irq; struct callback_head pending_task; unsigned int pending_work; atomic_t event_limit; /* address range filters */ struct perf_addr_filters_head addr_filters; /* vma address array for file-based filders */ struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; /* for aux_output events */ struct perf_event *aux_event; void (*destroy)(struct perf_event *); struct rcu_head rcu_head; struct pid_namespace *ns; u64 id; atomic64_t lost_samples; u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; struct bpf_prog *prog; u64 bpf_cookie; #ifdef CONFIG_EVENT_TRACING struct trace_event_call *tp_event; struct event_filter *filter; # ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops ftrace_ops; # endif #endif #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; /* cgroup event is attach to */ #endif #ifdef CONFIG_SECURITY void *security; #endif struct list_head sb_list; struct list_head pmu_list; /* * Certain events gets forwarded to another pmu internally by over- * writing kernel copy of event->attr.type without user being aware * of it. event->orig_type contains original 'type' requested by * user. */ u32 orig_type; #endif /* CONFIG_PERF_EVENTS */ }; /* * ,-----------------------[1:n]------------------------. * V V * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event * | | * `--[n:1]-> pmu <-[1:n]--' * * * struct perf_event_pmu_context lifetime is refcount based and RCU freed * (similar to perf_event_context). Locking is as if it were a member of * perf_event_context; specifically: * * modification, both: ctx->mutex && ctx->lock * reading, either: ctx->mutex || ctx->lock * * There is one exception to this; namely put_pmu_ctx() isn't always called * with ctx->mutex held; this means that as long as we can guarantee the epc * has events the above rules hold. * * Specificially, sys_perf_event_open()'s group_leader case depends on * ctx->mutex pinning the configuration. Since we hold a reference on * group_leader (through the filedesc) it can't go away, therefore it's * associated pmu_ctx must exist and cannot change due to ctx->mutex. * * perf_event holds a refcount on perf_event_context * perf_event holds a refcount on perf_event_pmu_context */ struct perf_event_pmu_context { struct pmu *pmu; struct perf_event_context *ctx; struct list_head pmu_ctx_entry; struct list_head pinned_active; struct list_head flexible_active; /* Used to identify the per-cpu perf_event_pmu_context */ unsigned int embedded : 1; unsigned int nr_events; unsigned int nr_cgroups; unsigned int nr_freq; atomic_t refcount; /* event <-> epc */ struct rcu_head rcu_head; /* * Set when one or more (plausibly active) event can't be scheduled * due to pmu overcommit or pmu constraints, except tolerant to * events not necessary to be active due to scheduling constraints, * such as cgroups. */ int rotate_necessary; }; static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc) { return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active); } struct perf_event_groups { struct rb_root tree; u64 index; }; /** * struct perf_event_context - event context structure * * Used as a container for task events and CPU events as well: */ struct perf_event_context { /* * Protect the states of the events in the list, * nr_active, and the list: */ raw_spinlock_t lock; /* * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change * the list you need to lock both the mutex and the spinlock. */ struct mutex mutex; struct list_head pmu_ctx_list; struct perf_event_groups pinned_groups; struct perf_event_groups flexible_groups; struct list_head event_list; int nr_events; int nr_user; int is_active; int nr_stat; int nr_freq; int rotate_disable; refcount_t refcount; /* event <-> ctx */ struct task_struct *task; /* * Context clock, runs when context enabled. */ u64 time; u64 timestamp; u64 timeoffset; /* * These fields let us detect when two contexts have both * been cloned (inherited) from a common ancestor. */ struct perf_event_context *parent_ctx; u64 parent_gen; u64 generation; int pin_count; #ifdef CONFIG_CGROUP_PERF int nr_cgroups; /* cgroup evts */ #endif struct rcu_head rcu_head; /* * The count of events for which using the switch-out fast path * should be avoided. * * Sum (event->pending_work + events with * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))) * * The SIGTRAP is targeted at ctx->task, as such it won't do changing * that until the signal is delivered. */ local_t nr_no_switch_fast; }; /** * struct perf_ctx_data - PMU specific data for a task * @rcu_head: To avoid the race on free PMU specific data * @refcount: To track users * @global: To track system-wide users * @ctx_cache: Kmem cache of PMU specific data * @data: PMU specific data * * Currently, the struct is only used in Intel LBR call stack mode to * save/restore the call stack of a task on context switches. * * The rcu_head is used to prevent the race on free the data. * The data only be allocated when Intel LBR call stack mode is enabled. * The data will be freed when the mode is disabled. * The content of the data will only be accessed in context switch, which * should be protected by rcu_read_lock(). * * Because of the alignment requirement of Intel Arch LBR, the Kmem cache * is used to allocate the PMU specific data. The ctx_cache is to track * the Kmem cache. * * Careful: Struct perf_ctx_data is added as a pointer in struct task_struct. * When system-wide Intel LBR call stack mode is enabled, a buffer with * constant size will be allocated for each task. * Also, system memory consumption can further grow when the size of * struct perf_ctx_data enlarges. */ struct perf_ctx_data { struct rcu_head rcu_head; refcount_t refcount; int global; struct kmem_cache *ctx_cache; void *data; }; struct perf_cpu_pmu_context { struct perf_event_pmu_context epc; struct perf_event_pmu_context *task_epc; struct list_head sched_cb_entry; int sched_cb_usage; int active_oncpu; int exclusive; int pmu_disable_count; raw_spinlock_t hrtimer_lock; struct hrtimer hrtimer; ktime_t hrtimer_interval; unsigned int hrtimer_active; }; /** * struct perf_event_cpu_context - per cpu event context structure */ struct perf_cpu_context { struct perf_event_context ctx; struct perf_event_context *task_ctx; int online; #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; #endif /* * Per-CPU storage for iterators used in visit_groups_merge. The default * storage is of size 2 to hold the CPU and any CPU event iterators. */ int heap_size; struct perf_event **heap; struct perf_event *heap_default[2]; }; struct perf_output_handle { struct perf_event *event; struct perf_buffer *rb; unsigned long wakeup; unsigned long size; union { u64 flags; /* perf_output*() */ u64 aux_flags; /* perf_aux_output*() */ struct { u64 skip_read : 1; }; }; union { void *addr; unsigned long head; }; int page; }; struct bpf_perf_event_data_kern { bpf_user_pt_regs_t *regs; struct perf_sample_data *data; struct perf_event *event; }; #ifdef CONFIG_CGROUP_PERF /* * perf_cgroup_info keeps track of time_enabled for a cgroup. * This is a per-cpu dynamically allocated data structure. */ struct perf_cgroup_info { u64 time; u64 timestamp; u64 timeoffset; int active; }; struct perf_cgroup { struct cgroup_subsys_state css; struct perf_cgroup_info __percpu *info; }; /* * Must ensure cgroup is pinned (css_get) before calling * this function. In other words, we cannot call this function * if there is no cgroup event for the current CPU context. */ static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) { return container_of(task_css_check(task, perf_event_cgrp_id, ctx ? lockdep_is_held(&ctx->lock) : true), struct perf_cgroup, css); } #endif /* CONFIG_CGROUP_PERF */ #ifdef CONFIG_PERF_EVENTS extern struct perf_event_context *perf_cpu_task_ctx(void); extern void *perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event); extern void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size); extern int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size); extern void *perf_get_aux(struct perf_output_handle *handle); extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern int perf_pmu_unregister(struct pmu *pmu); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); extern struct file *perf_event_get(unsigned int fd); extern const struct perf_event *perf_get_event(struct file *file); extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); extern void perf_pmu_resched(struct pmu *pmu); extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, struct task_struct *task, perf_overflow_handler_t callback, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); extern int perf_event_read_local(struct perf_event *event, u64 *value, u64 *enabled, u64 *running); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); static inline bool branch_sample_no_flags(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS; } static inline bool branch_sample_no_cycles(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES; } static inline bool branch_sample_type(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE; } static inline bool branch_sample_hw_index(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; } static inline bool branch_sample_priv(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } static inline bool branch_sample_counters(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; } static inline bool branch_sample_call_stack(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; } struct perf_sample_data { /* * Fields set by perf_sample_data_init() unconditionally, * group so as to minimize the cachelines touched. */ u64 sample_flags; u64 period; u64 dyn_size; /* * Fields commonly set by __perf_event_header__init_id(), * group so as to minimize the cachelines touched. */ u64 type; struct { u32 pid; u32 tid; } tid_entry; u64 time; u64 id; struct { u32 cpu; u32 reserved; } cpu_entry; /* * The other fields, optionally {set,used} by * perf_{prepare,output}_sample(). */ u64 ip; struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; struct perf_regs regs_user; struct perf_regs regs_intr; u64 stack_user_size; u64 stream_id; u64 cgroup; u64 addr; u64 phys_addr; u64 data_page_size; u64 code_page_size; u64 aux_size; } ____cacheline_aligned; /* default value for data source */ #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ PERF_MEM_S(LVL, NA) |\ PERF_MEM_S(SNOOP, NA) |\ PERF_MEM_S(LOCK, NA) |\ PERF_MEM_S(TLB, NA) |\ PERF_MEM_S(LVLNUM, NA)) static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) { /* remaining struct members initialized in perf_prepare_sample() */ data->sample_flags = PERF_SAMPLE_PERIOD; data->period = period; data->dyn_size = 0; if (addr) { data->addr = addr; data->sample_flags |= PERF_SAMPLE_ADDR; } } static inline void perf_sample_save_callchain(struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) { int size = 1; if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) return; if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN)) return; data->callchain = perf_callchain(event, regs); size += data->callchain->nr; data->dyn_size += size * sizeof(u64); data->sample_flags |= PERF_SAMPLE_CALLCHAIN; } static inline void perf_sample_save_raw_data(struct perf_sample_data *data, struct perf_event *event, struct perf_raw_record *raw) { struct perf_raw_frag *frag = &raw->frag; u32 sum = 0; int size; if (!(event->attr.sample_type & PERF_SAMPLE_RAW)) return; if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW)) return; do { sum += frag->size; if (perf_raw_frag_last(frag)) break; frag = frag->next; } while (1); size = round_up(sum + sizeof(u32), sizeof(u64)); raw->size = size - sizeof(u32); frag->pad = raw->size - sum; data->raw = raw; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_RAW; } static inline bool has_branch_stack(struct perf_event *event) { return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; } static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, struct perf_branch_stack *brs, u64 *brs_cntr) { int size = sizeof(u64); /* nr */ if (!has_branch_stack(event)) return; if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK)) return; if (branch_sample_hw_index(event)) size += sizeof(u64); brs->nr = min_t(u16, event->attr.sample_max_stack, brs->nr); size += brs->nr * sizeof(struct perf_branch_entry); /* * The extension space for counters is appended after the * struct perf_branch_stack. It is used to store the occurrences * of events of each branch. */ if (brs_cntr) size += brs->nr * sizeof(u64); data->br_stack = brs; data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } static inline u32 perf_sample_data_size(struct perf_sample_data *data, struct perf_event *event) { u32 size = sizeof(struct perf_event_header); size += event->header_size + event->id_header_size; size += data->dyn_size; return size; } /* * Clear all bitfields in the perf_branch_entry. * The to and from fields are not cleared because they are * systematically modified by caller. */ static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br) { br->mispred = 0; br->predicted = 0; br->in_tx = 0; br->abort = 0; br->cycles = 0; br->type = 0; br->spec = PERF_BR_SPEC_NA; br->reserved = 0; } extern void perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event); extern void perf_prepare_sample(struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs); extern void perf_prepare_header(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs); extern int perf_event_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern void perf_event_output_forward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern int perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); static inline bool is_default_overflow_handler(struct perf_event *event) { perf_overflow_handler_t overflow_handler = event->overflow_handler; if (likely(overflow_handler == perf_event_output_forward)) return true; if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; } extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event); extern void perf_event__output_id_sample(struct perf_event *event, struct perf_output_handle *handle, struct perf_sample_data *sample); extern void perf_log_lost_samples(struct perf_event *event, u64 lost); static inline bool event_has_any_exclude_flag(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; return attr->exclude_idle || attr->exclude_user || attr->exclude_kernel || attr->exclude_hv || attr->exclude_guest || attr->exclude_host; } static inline bool is_sampling_event(struct perf_event *event) { return event->attr.sample_period != 0; } /* * Return 1 for a software event, 0 for a hardware event */ static inline int is_software_event(struct perf_event *event) { return event->event_caps & PERF_EV_CAP_SOFTWARE; } /* * Return 1 for event in sw context, 0 for event in hw context */ static inline int in_software_context(struct perf_event *event) { return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context; } static inline int is_exclusive_pmu(struct pmu *pmu) { return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; } extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } #endif /* * When generating a perf sample in-line, instead of from an interrupt / * exception, we lack a pt_regs. This is typically used from software events * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. * * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests * - sp for PERF_SAMPLE_CALLCHAIN * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) * * NOTE: assumes @regs is otherwise already 0 filled; this is important for * things like PERF_SAMPLE_REGS_INTR. */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } static __always_inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { if (static_key_false(&perf_swevent_enabled[event_id])) __perf_sw_event(event_id, nr, regs, addr); } DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); /* * 'Special' version for the scheduler, it hard assumes no recursion, * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption. */ static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); perf_fetch_caller_regs(regs); ___perf_sw_event(event_id, nr, regs, addr); } extern struct static_key_false perf_sched_events; static __always_inline bool __perf_sw_enabled(int swevt) { return static_key_false(&perf_swevent_enabled[swevt]); } static inline void perf_event_task_migrate(struct task_struct *task) { if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) task->sched_migrated = 1; } static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && task->sched_migrated) { __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); task->sched_migrated = 0; } } static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); #ifdef CONFIG_CGROUP_PERF if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && perf_cgroup_from_task(prev, NULL) != perf_cgroup_from_task(next, NULL)) __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); #endif if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } extern void perf_event_mmap(struct vm_area_struct *vma); extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym); extern void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags); #define PERF_GUEST_ACTIVE 0x01 #define PERF_GUEST_USER 0x02 struct perf_guest_info_callbacks { unsigned int (*state)(void); unsigned long (*get_ip)(void); unsigned int (*handle_intel_pt_intr)(void); }; #ifdef CONFIG_GUEST_PERF_EVENTS extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state); DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip); DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr); static inline unsigned int perf_guest_state(void) { return static_call(__perf_guest_state)(); } static inline unsigned long perf_guest_get_ip(void) { return static_call(__perf_guest_get_ip)(); } static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return static_call(__perf_guest_handle_intel_pt_intr)(); } extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); #else /* !CONFIG_GUEST_PERF_EVENTS: */ static inline unsigned int perf_guest_state(void) { return 0; } static inline unsigned long perf_guest_get_ip(void) { return 0; } static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; } #endif /* !CONFIG_GUEST_PERF_EVENTS */ extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); extern void perf_event_text_poke(const void *addr, const void *old_bytes, size_t old_len, const void *new_bytes, size_t new_len); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern struct perf_callchain_entry *get_callchain_entry(int *rctx); extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) { if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; ++ctx->contexts; return 0; } else { ctx->contexts_maxed = true; return -1; /* no more room, stop walking the stack */ } } static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) { if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; ++ctx->nr; return 0; } else { return -1; /* no more room, stop walking the stack */ } } extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_sample_rate; extern void perf_sample_event_took(u64 sample_len_ns); /* Access to perf_event_open(2) syscall. */ #define PERF_SECURITY_OPEN 0 /* Finer grained perf_event_open(2) access control. */ #define PERF_SECURITY_CPU 1 #define PERF_SECURITY_KERNEL 2 #define PERF_SECURITY_TRACEPOINT 3 static inline int perf_is_paranoid(void) { return sysctl_perf_event_paranoid > -1; } extern int perf_allow_kernel(void); static inline int perf_allow_cpu(void) { if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) return -EACCES; return security_perf_event_open(PERF_SECURITY_CPU); } static inline int perf_allow_tracepoint(void) { if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) return -EPERM; return security_perf_event_open(PERF_SECURITY_TRACEPOINT); } extern int perf_exclude_event(struct perf_event *event, struct pt_regs *regs); extern void perf_event_init(void); extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs); extern unsigned long perf_instruction_pointer(struct perf_event *event, struct pt_regs *regs); #ifndef perf_arch_misc_flags # define perf_arch_misc_flags(regs) \ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) # define perf_arch_instruction_pointer(regs) instruction_pointer(regs) #endif #ifndef perf_arch_bpf_user_pt_regs # define perf_arch_bpf_user_pt_regs(regs) regs #endif #ifndef perf_arch_guest_misc_flags static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs) { unsigned long guest_state = perf_guest_state(); if (!(guest_state & PERF_GUEST_ACTIVE)) return 0; if (guest_state & PERF_GUEST_USER) return PERF_RECORD_MISC_GUEST_USER; else return PERF_RECORD_MISC_GUEST_KERNEL; } # define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) #endif static inline bool needs_branch_stack(struct perf_event *event) { return event->attr.branch_sample_type != 0; } static inline bool has_aux(struct perf_event *event) { return event->pmu && event->pmu->setup_aux; } static inline bool has_aux_action(struct perf_event *event) { return event->attr.aux_sample_size || event->attr.aux_pause || event->attr.aux_resume; } static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; } static inline bool has_addr_filter(struct perf_event *event) { return event->pmu->nr_addr_filters; } /* * An inherited event uses parent's filters */ static inline struct perf_addr_filters_head * perf_event_addr_filters(struct perf_event *event) { struct perf_addr_filters_head *ifh = &event->addr_filters; if (event->parent) ifh = &event->parent->addr_filters; return ifh; } static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) { /* Only the parent has fasync state */ if (event->parent) event = event->parent; return &event->fasync; } extern void perf_event_addr_filters_sync(struct perf_event *event); extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id); extern int perf_output_begin(struct perf_output_handle *handle, struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern int perf_output_begin_forward(struct perf_output_handle *handle, struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern int perf_output_begin_backward(struct perf_output_handle *handle, struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern void perf_output_end(struct perf_output_handle *handle); extern unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); extern unsigned int perf_output_skip(struct perf_output_handle *handle, unsigned int len); extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, struct perf_output_handle *handle, unsigned long from, unsigned long to); extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); extern u64 perf_swevent_set_period(struct perf_event *event); extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); extern int perf_event_account_interrupt(struct perf_event *event); extern int perf_event_period(struct perf_event *event, u64 value); extern u64 perf_event_pause(struct perf_event *event, bool reset); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) { return NULL; } static inline void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) { } static inline int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) { return -EINVAL; } static inline void * perf_get_aux(struct perf_output_handle *handle) { return NULL; } static inline void perf_event_task_migrate(struct task_struct *task) { } static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { } static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } static inline int perf_event_init_task(struct task_struct *child, u64 clone_flags) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } static inline const struct perf_event *perf_get_event(struct file *file) { return ERR_PTR(-EINVAL); } static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) { return ERR_PTR(-EINVAL); } static inline int perf_event_read_local(struct perf_event *event, u64 *value, u64 *enabled, u64 *running) { return -EINVAL; } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } static inline int perf_event_refresh(struct perf_event *event, int refresh) { return -EINVAL; } static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline void perf_event_mmap(struct vm_area_struct *vma) { } typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym) { } static inline void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_text_poke(const void *addr, const void *old_bytes, size_t old_len, const void *new_bytes, size_t new_len) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } static inline int perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; } static inline u64 perf_event_pause(struct perf_event *event, bool reset) { return 0; } static inline int perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; } #endif /* !CONFIG_PERF_EVENTS */ #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) extern void perf_restore_debug_store(void); #else static inline void perf_restore_debug_store(void) { } #endif #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) struct perf_pmu_events_attr { struct device_attribute attr; u64 id; const char *event_str; }; struct perf_pmu_events_ht_attr { struct device_attribute attr; u64 id; const char *event_str_ht; const char *event_str_noht; }; struct perf_pmu_events_hybrid_attr { struct device_attribute attr; u64 id; const char *event_str; u64 pmu_type; }; struct perf_pmu_format_hybrid_attr { struct device_attribute attr; u64 pmu_type; }; ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ static struct perf_pmu_events_attr _var = { \ .attr = __ATTR(_name, 0444, _show, NULL), \ .id = _id, \ }; #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ static struct perf_pmu_events_attr _var = { \ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ .id = 0, \ .event_str = _str, \ }; #define PMU_EVENT_ATTR_ID(_name, _show, _id) \ (&((struct perf_pmu_events_attr[]) { \ { .attr = __ATTR(_name, 0444, _show, NULL), \ .id = _id, } \ })[0].attr.attr) #define PMU_FORMAT_ATTR_SHOW(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ #define PMU_FORMAT_ATTR(_name, _format) \ PMU_FORMAT_ATTR_SHOW(_name, _format) \ \ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) /* Performance counter hotplug functions */ #ifdef CONFIG_PERF_EVENTS extern int perf_event_init_cpu(unsigned int cpu); extern int perf_event_exit_cpu(unsigned int cpu); #else # define perf_event_init_cpu NULL # define perf_event_exit_cpu NULL #endif extern void arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now); /* * Snapshot branch stack on software events. * * Branch stack can be very useful in understanding software events. For * example, when a long function, e.g. sys_perf_event_open, returns an * errno, it is not obvious why the function failed. Branch stack could * provide very helpful information in this type of scenarios. * * On software event, it is necessary to stop the hardware branch recorder * fast. Otherwise, the hardware register/buffer will be flushed with * entries of the triggering event. Therefore, static call is used to * stop the hardware recorder. */ /* * cnt is the number of entries allocated for entries. * Return number of entries copied to . */ typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries, unsigned int cnt); DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); #ifndef PERF_NEEDS_LOPWR_CB static inline void perf_lopwr_cb(bool mode) { } #endif #endif /* _LINUX_PERF_EVENT_H */
466 466 465 465 1599 1571 466 466 466 1595 6942 6941 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 // SPDX-License-Identifier: GPL-2.0-or-later /* * linux/drivers/net/netconsole.c * * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> * * This file contains the implementation of an IRQ-safe, crash-safe * kernel console implementation that outputs kernel messages to the * network. * * Modification history: * * 2001-09-17 started by Ingo Molnar. * 2003-08-11 2.6 port by Matt Mackall * simplified options * generic card hooks * works non-modular * 2003-09-07 rewritten with netpoll api */ /**************************************************************** * ****************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/console.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netpoll.h> #include <linux/inet.h> #include <linux/configfs.h> #include <linux/etherdevice.h> #include <linux/u64_stats_sync.h> #include <linux/utsname.h> #include <linux/rtnetlink.h> MODULE_AUTHOR("Matt Mackall <mpm@selenic.com>"); MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); #define MAX_PARAM_LENGTH 256 #define MAX_EXTRADATA_ENTRY_LEN 256 #define MAX_EXTRADATA_VALUE_LEN 200 /* The number 3 comes from userdata entry format characters (' ', '=', '\n') */ #define MAX_EXTRADATA_NAME_LEN (MAX_EXTRADATA_ENTRY_LEN - \ MAX_EXTRADATA_VALUE_LEN - 3) #define MAX_EXTRADATA_ITEMS 16 #define MAX_PRINT_CHUNK 1000 static char config[MAX_PARAM_LENGTH]; module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]"); static bool oops_only; module_param(oops_only, bool, 0600); MODULE_PARM_DESC(oops_only, "Only log oops messages"); #define NETCONSOLE_PARAM_TARGET_PREFIX "cmdline" #ifndef MODULE static int __init option_setup(char *opt) { strscpy(config, opt, MAX_PARAM_LENGTH); return 1; } __setup("netconsole=", option_setup); #endif /* MODULE */ /* Linked list of all configured targets */ static LIST_HEAD(target_list); /* target_cleanup_list is used to track targets that need to be cleaned outside * of target_list_lock. It should be cleaned in the same function it is * populated. */ static LIST_HEAD(target_cleanup_list); /* This needs to be a spinlock because write_msg() cannot sleep */ static DEFINE_SPINLOCK(target_list_lock); /* This needs to be a mutex because netpoll_cleanup might sleep */ static DEFINE_MUTEX(target_cleanup_list_lock); /* * Console driver for extended netconsoles. Registered on the first use to * avoid unnecessarily enabling ext message formatting. */ static struct console netconsole_ext; struct netconsole_target_stats { u64_stats_t xmit_drop_count; u64_stats_t enomem_count; struct u64_stats_sync syncp; }; /* Features enabled in sysdata. Contrary to userdata, this data is populated by * the kernel. The fields are designed as bitwise flags, allowing multiple * features to be set in sysdata_fields. */ enum sysdata_feature { /* Populate the CPU that sends the message */ SYSDATA_CPU_NR = BIT(0), /* Populate the task name (as in current->comm) in sysdata */ SYSDATA_TASKNAME = BIT(1), /* Kernel release/version as part of sysdata */ SYSDATA_RELEASE = BIT(2), }; /** * struct netconsole_target - Represents a configured netconsole target. * @list: Links this target into the target_list. * @group: Links us into the configfs subsystem hierarchy. * @userdata_group: Links to the userdata configfs hierarchy * @extradata_complete: Cached, formatted string of append * @userdata_length: String length of usedata in extradata_complete. * @sysdata_fields: Sysdata features enabled. * @stats: Packet send stats for the target. Used for debugging. * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). * We maintain a strict 1:1 correspondence between this and * whether the corresponding netpoll is active or inactive. * Also, other parameters of a target may be modified at * runtime only when it is disabled (enabled == 0). * @extended: Denotes whether console is extended or not. * @release: Denotes whether kernel release version should be prepended * to the message. Depends on extended console. * @np: The netpoll structure for this target. * Contains the other userspace visible parameters: * dev_name (read-write) * local_port (read-write) * remote_port (read-write) * local_ip (read-write) * remote_ip (read-write) * local_mac (read-only) * remote_mac (read-write) * @buf: The buffer used to send the full msg to the network stack */ struct netconsole_target { struct list_head list; #ifdef CONFIG_NETCONSOLE_DYNAMIC struct config_group group; struct config_group userdata_group; char extradata_complete[MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS]; size_t userdata_length; /* bit-wise with sysdata_feature bits */ u32 sysdata_fields; #endif struct netconsole_target_stats stats; bool enabled; bool extended; bool release; struct netpoll np; /* protected by target_list_lock */ char buf[MAX_PRINT_CHUNK]; }; #ifdef CONFIG_NETCONSOLE_DYNAMIC static struct configfs_subsystem netconsole_subsys; static DEFINE_MUTEX(dynamic_netconsole_mutex); static int __init dynamic_netconsole_init(void) { config_group_init(&netconsole_subsys.su_group); mutex_init(&netconsole_subsys.su_mutex); return configfs_register_subsystem(&netconsole_subsys); } static void __exit dynamic_netconsole_exit(void) { configfs_unregister_subsystem(&netconsole_subsys); } /* * Targets that were created by parsing the boot/module option string * do not exist in the configfs hierarchy (and have NULL names) and will * never go away, so make these a no-op for them. */ static void netconsole_target_get(struct netconsole_target *nt) { if (config_item_name(&nt->group.cg_item)) config_group_get(&nt->group); } static void netconsole_target_put(struct netconsole_target *nt) { if (config_item_name(&nt->group.cg_item)) config_group_put(&nt->group); } #else /* !CONFIG_NETCONSOLE_DYNAMIC */ static int __init dynamic_netconsole_init(void) { return 0; } static void __exit dynamic_netconsole_exit(void) { } /* * No danger of targets going away from under us when dynamic * reconfigurability is off. */ static void netconsole_target_get(struct netconsole_target *nt) { } static void netconsole_target_put(struct netconsole_target *nt) { } static void populate_configfs_item(struct netconsole_target *nt, int cmdline_count) { } #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Allocate and initialize with defaults. * Note that these targets get their config_item fields zeroed-out. */ static struct netconsole_target *alloc_and_init(void) { struct netconsole_target *nt; nt = kzalloc(sizeof(*nt), GFP_KERNEL); if (!nt) return nt; if (IS_ENABLED(CONFIG_NETCONSOLE_EXTENDED_LOG)) nt->extended = true; if (IS_ENABLED(CONFIG_NETCONSOLE_PREPEND_RELEASE)) nt->release = true; nt->np.name = "netconsole"; strscpy(nt->np.dev_name, "eth0", IFNAMSIZ); nt->np.local_port = 6665; nt->np.remote_port = 6666; eth_broadcast_addr(nt->np.remote_mac); return nt; } /* Clean up every target in the cleanup_list and move the clean targets back to * the main target_list. */ static void netconsole_process_cleanups_core(void) { struct netconsole_target *nt, *tmp; unsigned long flags; /* The cleanup needs RTNL locked */ ASSERT_RTNL(); mutex_lock(&target_cleanup_list_lock); list_for_each_entry_safe(nt, tmp, &target_cleanup_list, list) { /* all entries in the cleanup_list needs to be disabled */ WARN_ON_ONCE(nt->enabled); do_netpoll_cleanup(&nt->np); /* moved the cleaned target to target_list. Need to hold both * locks */ spin_lock_irqsave(&target_list_lock, flags); list_move(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); } WARN_ON_ONCE(!list_empty(&target_cleanup_list)); mutex_unlock(&target_cleanup_list_lock); } #ifdef CONFIG_NETCONSOLE_DYNAMIC /* * Our subsystem hierarchy is: * * /sys/kernel/config/netconsole/ * | * <target>/ * | enabled * | release * | dev_name * | local_port * | remote_port * | local_ip * | remote_ip * | local_mac * | remote_mac * | transmit_errors * | userdata/ * | <key>/ * | value * | ... * | * <target>/... */ static struct netconsole_target *to_target(struct config_item *item) { struct config_group *cfg_group; cfg_group = to_config_group(item); if (!cfg_group) return NULL; return container_of(to_config_group(item), struct netconsole_target, group); } /* Do the list cleanup with the rtnl lock hold. rtnl lock is necessary because * netdev might be cleaned-up by calling __netpoll_cleanup(), */ static void netconsole_process_cleanups(void) { /* rtnl lock is called here, because it has precedence over * target_cleanup_list_lock mutex and target_cleanup_list */ rtnl_lock(); netconsole_process_cleanups_core(); rtnl_unlock(); } /* Get rid of possible trailing newline, returning the new length */ static void trim_newline(char *s, size_t maxlen) { size_t len; len = strnlen(s, maxlen); if (s[len - 1] == '\n') s[len - 1] = '\0'; } /* * Attribute operations for netconsole_target. */ static ssize_t enabled_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->enabled); } static ssize_t extended_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->extended); } static ssize_t release_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->release); } static ssize_t dev_name_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%s\n", to_target(item)->np.dev_name); } static ssize_t local_port_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->np.local_port); } static ssize_t remote_port_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%d\n", to_target(item)->np.remote_port); } static ssize_t local_ip_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) return sysfs_emit(buf, "%pI6c\n", &nt->np.local_ip.in6); else return sysfs_emit(buf, "%pI4\n", &nt->np.local_ip); } static ssize_t remote_ip_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); if (nt->np.ipv6) return sysfs_emit(buf, "%pI6c\n", &nt->np.remote_ip.in6); else return sysfs_emit(buf, "%pI4\n", &nt->np.remote_ip); } static ssize_t local_mac_show(struct config_item *item, char *buf) { struct net_device *dev = to_target(item)->np.dev; static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; return sysfs_emit(buf, "%pM\n", dev ? dev->dev_addr : bcast); } static ssize_t remote_mac_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%pM\n", to_target(item)->np.remote_mac); } static ssize_t transmit_errors_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item); u64 xmit_drop_count, enomem_count; unsigned int start; do { start = u64_stats_fetch_begin(&nt->stats.syncp); xmit_drop_count = u64_stats_read(&nt->stats.xmit_drop_count); enomem_count = u64_stats_read(&nt->stats.enomem_count); } while (u64_stats_fetch_retry(&nt->stats.syncp, start)); return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count); } /* configfs helper to display if cpu_nr sysdata feature is enabled */ static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item->ci_parent); bool cpu_nr_enabled; mutex_lock(&dynamic_netconsole_mutex); cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR); mutex_unlock(&dynamic_netconsole_mutex); return sysfs_emit(buf, "%d\n", cpu_nr_enabled); } /* configfs helper to display if taskname sysdata feature is enabled */ static ssize_t sysdata_taskname_enabled_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item->ci_parent); bool taskname_enabled; mutex_lock(&dynamic_netconsole_mutex); taskname_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME); mutex_unlock(&dynamic_netconsole_mutex); return sysfs_emit(buf, "%d\n", taskname_enabled); } static ssize_t sysdata_release_enabled_show(struct config_item *item, char *buf) { struct netconsole_target *nt = to_target(item->ci_parent); bool release_enabled; mutex_lock(&dynamic_netconsole_mutex); release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME); mutex_unlock(&dynamic_netconsole_mutex); return sysfs_emit(buf, "%d\n", release_enabled); } /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. * The user is expected to set the desired parameters first (which * would enable him to dynamically add new netpoll targets for new * network interfaces as and when they come up). */ static ssize_t enabled_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); unsigned long flags; bool enabled; ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); ret = kstrtobool(buf, &enabled); if (ret) goto out_unlock; ret = -EINVAL; if (enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); goto out_unlock; } if (enabled) { /* true */ if (nt->release && !nt->extended) { pr_err("Not enabling netconsole. Release feature requires extended log message"); goto out_unlock; } if (nt->extended && !console_is_registered(&netconsole_ext)) register_console(&netconsole_ext); /* * Skip netpoll_parse_options() -- all the attributes are * already configured via configfs. Just print them out. */ netpoll_print_options(&nt->np); ret = netpoll_setup(&nt->np); if (ret) goto out_unlock; nt->enabled = true; pr_info("network logging started\n"); } else { /* false */ /* We need to disable the netconsole before cleaning it up * otherwise we might end up in write_msg() with * nt->np.dev == NULL and nt->enabled == true */ mutex_lock(&target_cleanup_list_lock); spin_lock_irqsave(&target_list_lock, flags); nt->enabled = false; /* Remove the target from the list, while holding * target_list_lock */ list_move(&nt->list, &target_cleanup_list); spin_unlock_irqrestore(&target_list_lock, flags); mutex_unlock(&target_cleanup_list_lock); } ret = strnlen(buf, count); /* Deferred cleanup */ netconsole_process_cleanups(); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t release_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); bool release; ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); ret = -EINVAL; goto out_unlock; } ret = kstrtobool(buf, &release); if (ret) goto out_unlock; nt->release = release; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t extended_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); bool extended; ssize_t ret; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); ret = -EINVAL; goto out_unlock; } ret = kstrtobool(buf, &extended); if (ret) goto out_unlock; nt->extended = extended; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t dev_name_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); mutex_unlock(&dynamic_netconsole_mutex); return -EINVAL; } strscpy(nt->np.dev_name, buf, IFNAMSIZ); trim_newline(nt->np.dev_name, IFNAMSIZ); mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); } static ssize_t local_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } ret = kstrtou16(buf, 10, &nt->np.local_port); if (ret < 0) goto out_unlock; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t remote_port_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } ret = kstrtou16(buf, 10, &nt->np.remote_port); if (ret < 0) goto out_unlock; ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t local_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } if (strnchr(buf, count, ':')) { const char *end; if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) { if (*end && *end != '\n') { pr_err("invalid IPv6 address at: <%c>\n", *end); goto out_unlock; } nt->np.ipv6 = true; } else goto out_unlock; } else { if (!nt->np.ipv6) nt->np.local_ip.ip = in_aton(buf); else goto out_unlock; } ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t remote_ip_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } if (strnchr(buf, count, ':')) { const char *end; if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) { if (*end && *end != '\n') { pr_err("invalid IPv6 address at: <%c>\n", *end); goto out_unlock; } nt->np.ipv6 = true; } else goto out_unlock; } else { if (!nt->np.ipv6) nt->np.remote_ip.ip = in_aton(buf); else goto out_unlock; } ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } /* Count number of entries we have in extradata. * This is important because the extradata_complete only supports * MAX_EXTRADATA_ITEMS entries. Before enabling any new {user,sys}data * feature, number of entries needs to checked for available space. */ static size_t count_extradata_entries(struct netconsole_target *nt) { size_t entries; /* Userdata entries */ entries = list_count_nodes(&nt->userdata_group.cg_children); /* Plus sysdata entries */ if (nt->sysdata_fields & SYSDATA_CPU_NR) entries += 1; if (nt->sysdata_fields & SYSDATA_TASKNAME) entries += 1; if (nt->sysdata_fields & SYSDATA_RELEASE) entries += 1; return entries; } static ssize_t remote_mac_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item); u8 remote_mac[ETH_ALEN]; ssize_t ret = -EINVAL; mutex_lock(&dynamic_netconsole_mutex); if (nt->enabled) { pr_err("target (%s) is enabled, disable to update parameters\n", config_item_name(&nt->group.cg_item)); goto out_unlock; } if (!mac_pton(buf, remote_mac)) goto out_unlock; if (buf[MAC_ADDR_STR_LEN] && buf[MAC_ADDR_STR_LEN] != '\n') goto out_unlock; memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); ret = strnlen(buf, count); out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } struct userdatum { struct config_item item; char value[MAX_EXTRADATA_VALUE_LEN]; }; static struct userdatum *to_userdatum(struct config_item *item) { return container_of(item, struct userdatum, item); } struct userdata { struct config_group group; }; static struct userdata *to_userdata(struct config_item *item) { return container_of(to_config_group(item), struct userdata, group); } static struct netconsole_target *userdata_to_target(struct userdata *ud) { struct config_group *netconsole_group; netconsole_group = to_config_group(ud->group.cg_item.ci_parent); return to_target(&netconsole_group->cg_item); } static ssize_t userdatum_value_show(struct config_item *item, char *buf) { return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0])); } static void update_userdata(struct netconsole_target *nt) { int complete_idx = 0, child_count = 0; struct list_head *entry; /* Clear the current string in case the last userdatum was deleted */ nt->userdata_length = 0; nt->extradata_complete[0] = 0; list_for_each(entry, &nt->userdata_group.cg_children) { struct userdatum *udm_item; struct config_item *item; if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS)) break; child_count++; item = container_of(entry, struct config_item, ci_entry); udm_item = to_userdatum(item); /* Skip userdata with no value set */ if (strnlen(udm_item->value, MAX_EXTRADATA_VALUE_LEN) == 0) continue; /* This doesn't overflow extradata_complete since it will write * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is * checked to not exceed MAX items with child_count above */ complete_idx += scnprintf(&nt->extradata_complete[complete_idx], MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", item->ci_name, udm_item->value); } nt->userdata_length = strnlen(nt->extradata_complete, sizeof(nt->extradata_complete)); } static ssize_t userdatum_value_store(struct config_item *item, const char *buf, size_t count) { struct userdatum *udm = to_userdatum(item); struct netconsole_target *nt; struct userdata *ud; ssize_t ret; if (count > MAX_EXTRADATA_VALUE_LEN) return -EMSGSIZE; mutex_lock(&dynamic_netconsole_mutex); ret = strscpy(udm->value, buf, sizeof(udm->value)); if (ret < 0) goto out_unlock; trim_newline(udm->value, sizeof(udm->value)); ud = to_userdata(item->ci_parent); nt = userdata_to_target(ud); update_userdata(nt); ret = count; out_unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } /* disable_sysdata_feature - Disable sysdata feature and clean sysdata * @nt: target that is disabling the feature * @feature: feature being disabled */ static void disable_sysdata_feature(struct netconsole_target *nt, enum sysdata_feature feature) { nt->sysdata_fields &= ~feature; nt->extradata_complete[nt->userdata_length] = 0; } static ssize_t sysdata_release_enabled_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item->ci_parent); bool release_enabled, curr; ssize_t ret; ret = kstrtobool(buf, &release_enabled); if (ret) return ret; mutex_lock(&dynamic_netconsole_mutex); curr = !!(nt->sysdata_fields & SYSDATA_RELEASE); if (release_enabled == curr) goto unlock_ok; if (release_enabled && count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { ret = -ENOSPC; goto unlock; } if (release_enabled) nt->sysdata_fields |= SYSDATA_RELEASE; else disable_sysdata_feature(nt, SYSDATA_RELEASE); unlock_ok: ret = strnlen(buf, count); unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } static ssize_t sysdata_taskname_enabled_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item->ci_parent); bool taskname_enabled, curr; ssize_t ret; ret = kstrtobool(buf, &taskname_enabled); if (ret) return ret; mutex_lock(&dynamic_netconsole_mutex); curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME); if (taskname_enabled == curr) goto unlock_ok; if (taskname_enabled && count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { ret = -ENOSPC; goto unlock; } if (taskname_enabled) nt->sysdata_fields |= SYSDATA_TASKNAME; else disable_sysdata_feature(nt, SYSDATA_TASKNAME); unlock_ok: ret = strnlen(buf, count); unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } /* configfs helper to sysdata cpu_nr feature */ static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item, const char *buf, size_t count) { struct netconsole_target *nt = to_target(item->ci_parent); bool cpu_nr_enabled, curr; ssize_t ret; ret = kstrtobool(buf, &cpu_nr_enabled); if (ret) return ret; mutex_lock(&dynamic_netconsole_mutex); curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR); if (cpu_nr_enabled == curr) /* no change requested */ goto unlock_ok; if (cpu_nr_enabled && count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { /* user wants the new feature, but there is no space in the * buffer. */ ret = -ENOSPC; goto unlock; } if (cpu_nr_enabled) nt->sysdata_fields |= SYSDATA_CPU_NR; else /* This is special because extradata_complete might have * remaining data from previous sysdata, and it needs to be * cleaned. */ disable_sysdata_feature(nt, SYSDATA_CPU_NR); unlock_ok: ret = strnlen(buf, count); unlock: mutex_unlock(&dynamic_netconsole_mutex); return ret; } CONFIGFS_ATTR(userdatum_, value); CONFIGFS_ATTR(sysdata_, cpu_nr_enabled); CONFIGFS_ATTR(sysdata_, taskname_enabled); CONFIGFS_ATTR(sysdata_, release_enabled); static struct configfs_attribute *userdatum_attrs[] = { &userdatum_attr_value, NULL, }; static void userdatum_release(struct config_item *item) { kfree(to_userdatum(item)); } static struct configfs_item_operations userdatum_ops = { .release = userdatum_release, }; static const struct config_item_type userdatum_type = { .ct_item_ops = &userdatum_ops, .ct_attrs = userdatum_attrs, .ct_owner = THIS_MODULE, }; static struct config_item *userdatum_make_item(struct config_group *group, const char *name) { struct netconsole_target *nt; struct userdatum *udm; struct userdata *ud; if (strlen(name) > MAX_EXTRADATA_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); ud = to_userdata(&group->cg_item); nt = userdata_to_target(ud); if (count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) return ERR_PTR(-ENOSPC); udm = kzalloc(sizeof(*udm), GFP_KERNEL); if (!udm) return ERR_PTR(-ENOMEM); config_item_init_type_name(&udm->item, name, &userdatum_type); return &udm->item; } static void userdatum_drop(struct config_group *group, struct config_item *item) { struct netconsole_target *nt; struct userdata *ud; ud = to_userdata(&group->cg_item); nt = userdata_to_target(ud); mutex_lock(&dynamic_netconsole_mutex); update_userdata(nt); config_item_put(item); mutex_unlock(&dynamic_netconsole_mutex); } static struct configfs_attribute *userdata_attrs[] = { &sysdata_attr_cpu_nr_enabled, &sysdata_attr_taskname_enabled, &sysdata_attr_release_enabled, NULL, }; static struct configfs_group_operations userdata_ops = { .make_item = userdatum_make_item, .drop_item = userdatum_drop, }; static const struct config_item_type userdata_type = { .ct_item_ops = &userdatum_ops, .ct_group_ops = &userdata_ops, .ct_attrs = userdata_attrs, .ct_owner = THIS_MODULE, }; CONFIGFS_ATTR(, enabled); CONFIGFS_ATTR(, extended); CONFIGFS_ATTR(, dev_name); CONFIGFS_ATTR(, local_port); CONFIGFS_ATTR(, remote_port); CONFIGFS_ATTR(, local_ip); CONFIGFS_ATTR(, remote_ip); CONFIGFS_ATTR_RO(, local_mac); CONFIGFS_ATTR(, remote_mac); CONFIGFS_ATTR(, release); CONFIGFS_ATTR_RO(, transmit_errors); static struct configfs_attribute *netconsole_target_attrs[] = { &attr_enabled, &attr_extended, &attr_release, &attr_dev_name, &attr_local_port, &attr_remote_port, &attr_local_ip, &attr_remote_ip, &attr_local_mac, &attr_remote_mac, &attr_transmit_errors, NULL, }; /* * Item operations and type for netconsole_target. */ static void netconsole_target_release(struct config_item *item) { kfree(to_target(item)); } static struct configfs_item_operations netconsole_target_item_ops = { .release = netconsole_target_release, }; static const struct config_item_type netconsole_target_type = { .ct_attrs = netconsole_target_attrs, .ct_item_ops = &netconsole_target_item_ops, .ct_owner = THIS_MODULE, }; static void init_target_config_group(struct netconsole_target *nt, const char *name) { config_group_init_type_name(&nt->group, name, &netconsole_target_type); config_group_init_type_name(&nt->userdata_group, "userdata", &userdata_type); configfs_add_default_group(&nt->userdata_group, &nt->group); } static struct netconsole_target *find_cmdline_target(const char *name) { struct netconsole_target *nt, *ret = NULL; unsigned long flags; spin_lock_irqsave(&target_list_lock, flags); list_for_each_entry(nt, &target_list, list) { if (!strcmp(nt->group.cg_item.ci_name, name)) { ret = nt; break; } } spin_unlock_irqrestore(&target_list_lock, flags); return ret; } /* * Group operations and type for netconsole_subsys. */ static struct config_group *make_netconsole_target(struct config_group *group, const char *name) { struct netconsole_target *nt; unsigned long flags; /* Checking if a target by this name was created at boot time. If so, * attach a configfs entry to that target. This enables dynamic * control. */ if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX, strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) { nt = find_cmdline_target(name); if (nt) { init_target_config_group(nt, name); return &nt->group; } } nt = alloc_and_init(); if (!nt) return ERR_PTR(-ENOMEM); /* Initialize the config_group member */ init_target_config_group(nt, name); /* Adding, but it is disabled */ spin_lock_irqsave(&target_list_lock, flags); list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); return &nt->group; } static void drop_netconsole_target(struct config_group *group, struct config_item *item) { unsigned long flags; struct netconsole_target *nt = to_target(item); spin_lock_irqsave(&target_list_lock, flags); list_del(&nt->list); spin_unlock_irqrestore(&target_list_lock, flags); /* * The target may have never been enabled, or was manually disabled * before being removed so netpoll may have already been cleaned up. */ if (nt->enabled) netpoll_cleanup(&nt->np); config_item_put(&nt->group.cg_item); } static struct configfs_group_operations netconsole_subsys_group_ops = { .make_group = make_netconsole_target, .drop_item = drop_netconsole_target, }; static const struct config_item_type netconsole_subsys_type = { .ct_group_ops = &netconsole_subsys_group_ops, .ct_owner = THIS_MODULE, }; /* The netconsole configfs subsystem */ static struct configfs_subsystem netconsole_subsys = { .su_group = { .cg_item = { .ci_namebuf = "netconsole", .ci_type = &netconsole_subsys_type, }, }, }; static void populate_configfs_item(struct netconsole_target *nt, int cmdline_count) { char target_name[16]; snprintf(target_name, sizeof(target_name), "%s%d", NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count); init_target_config_group(nt, target_name); } static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset) { /* Append cpu=%d at extradata_complete after userdata str */ return scnprintf(&nt->extradata_complete[offset], MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n", raw_smp_processor_id()); } static int sysdata_append_taskname(struct netconsole_target *nt, int offset) { return scnprintf(&nt->extradata_complete[offset], MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n", current->comm); } static int sysdata_append_release(struct netconsole_target *nt, int offset) { return scnprintf(&nt->extradata_complete[offset], MAX_EXTRADATA_ENTRY_LEN, " release=%s\n", init_utsname()->release); } /* * prepare_extradata - append sysdata at extradata_complete in runtime * @nt: target to send message to */ static int prepare_extradata(struct netconsole_target *nt) { int extradata_len; /* userdata was appended when configfs write helper was called * by update_userdata(). */ extradata_len = nt->userdata_length; if (!nt->sysdata_fields) goto out; if (nt->sysdata_fields & SYSDATA_CPU_NR) extradata_len += sysdata_append_cpu_nr(nt, extradata_len); if (nt->sysdata_fields & SYSDATA_TASKNAME) extradata_len += sysdata_append_taskname(nt, extradata_len); if (nt->sysdata_fields & SYSDATA_RELEASE) extradata_len += sysdata_append_release(nt, extradata_len); WARN_ON_ONCE(extradata_len > MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS); out: return extradata_len; } #else /* CONFIG_NETCONSOLE_DYNAMIC not set */ static int prepare_extradata(struct netconsole_target *nt) { return 0; } #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Handle network interface device notifications */ static int netconsole_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { unsigned long flags; struct netconsole_target *nt, *tmp; struct net_device *dev = netdev_notifier_info_to_dev(ptr); bool stopped = false; if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || event == NETDEV_RELEASE || event == NETDEV_JOIN)) goto done; mutex_lock(&target_cleanup_list_lock); spin_lock_irqsave(&target_list_lock, flags); list_for_each_entry_safe(nt, tmp, &target_list, list) { netconsole_target_get(nt); if (nt->np.dev == dev) { switch (event) { case NETDEV_CHANGENAME: strscpy(nt->np.dev_name, dev->name, IFNAMSIZ); break; case NETDEV_RELEASE: case NETDEV_JOIN: case NETDEV_UNREGISTER: nt->enabled = false; list_move(&nt->list, &target_cleanup_list); stopped = true; } } netconsole_target_put(nt); } spin_unlock_irqrestore(&target_list_lock, flags); mutex_unlock(&target_cleanup_list_lock); if (stopped) { const char *msg = "had an event"; switch (event) { case NETDEV_UNREGISTER: msg = "unregistered"; break; case NETDEV_RELEASE: msg = "released slaves"; break; case NETDEV_JOIN: msg = "is joining a master device"; break; } pr_info("network logging stopped on interface %s as it %s\n", dev->name, msg); } /* Process target_cleanup_list entries. By the end, target_cleanup_list * should be empty */ netconsole_process_cleanups_core(); done: return NOTIFY_DONE; } static struct notifier_block netconsole_netdev_notifier = { .notifier_call = netconsole_netdev_event, }; /** * send_udp - Wrapper for netpoll_send_udp that counts errors * @nt: target to send message to * @msg: message to send * @len: length of message * * Calls netpoll_send_udp and classifies the return value. If an error * occurred it increments statistics in nt->stats accordingly. * Only calls netpoll_send_udp if CONFIG_NETCONSOLE_DYNAMIC is disabled. */ static void send_udp(struct netconsole_target *nt, const char *msg, int len) { int result = netpoll_send_udp(&nt->np, msg, len); if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) { if (result == NET_XMIT_DROP) { u64_stats_update_begin(&nt->stats.syncp); u64_stats_inc(&nt->stats.xmit_drop_count); u64_stats_update_end(&nt->stats.syncp); } else if (result == -ENOMEM) { u64_stats_update_begin(&nt->stats.syncp); u64_stats_inc(&nt->stats.enomem_count); u64_stats_update_end(&nt->stats.syncp); } } } static void send_msg_no_fragmentation(struct netconsole_target *nt, const char *msg, int msg_len, int release_len) { const char *extradata = NULL; const char *release; #ifdef CONFIG_NETCONSOLE_DYNAMIC extradata = nt->extradata_complete; #endif if (release_len) { release = init_utsname()->release; scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); msg_len += release_len; } else { memcpy(nt->buf, msg, msg_len); } if (extradata) msg_len += scnprintf(&nt->buf[msg_len], MAX_PRINT_CHUNK - msg_len, "%s", extradata); send_udp(nt, nt->buf, msg_len); } static void append_release(char *buf) { const char *release; release = init_utsname()->release; scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release); } static void send_fragmented_body(struct netconsole_target *nt, const char *msgbody, int header_len, int msgbody_len, int extradata_len) { int sent_extradata, preceding_bytes; const char *extradata = NULL; int body_len, offset = 0; #ifdef CONFIG_NETCONSOLE_DYNAMIC extradata = nt->extradata_complete; #endif /* body_len represents the number of bytes that will be sent. This is * bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple * packets */ body_len = msgbody_len + extradata_len; /* In each iteration of the while loop below, we send a packet * containing the header and a portion of the body. The body is * composed of two parts: msgbody and extradata. We keep track of how * many bytes have been sent so far using the offset variable, which * ranges from 0 to the total length of the body. */ while (offset < body_len) { int this_header = header_len; bool msgbody_written = false; int this_offset = 0; int this_chunk = 0; this_header += scnprintf(nt->buf + this_header, MAX_PRINT_CHUNK - this_header, ",ncfrag=%d/%d;", offset, body_len); /* Not all msgbody data has been written yet */ if (offset < msgbody_len) { this_chunk = min(msgbody_len - offset, MAX_PRINT_CHUNK - this_header); if (WARN_ON_ONCE(this_chunk <= 0)) return; memcpy(nt->buf + this_header, msgbody + offset, this_chunk); this_offset += this_chunk; } /* msgbody was finally written, either in the previous * messages and/or in the current buf. Time to write * the extradata. */ msgbody_written |= offset + this_offset >= msgbody_len; /* Msg body is fully written and there is pending extradata to * write, append extradata in this chunk */ if (msgbody_written && offset + this_offset < body_len) { /* Track how much user data was already sent. First * time here, sent_userdata is zero */ sent_extradata = (offset + this_offset) - msgbody_len; /* offset of bytes used in current buf */ preceding_bytes = this_chunk + this_header; if (WARN_ON_ONCE(sent_extradata < 0)) return; this_chunk = min(extradata_len - sent_extradata, MAX_PRINT_CHUNK - preceding_bytes); if (WARN_ON_ONCE(this_chunk < 0)) /* this_chunk could be zero if all the previous * message used all the buffer. This is not a * problem, extradata will be sent in the next * iteration */ return; memcpy(nt->buf + this_header + this_offset, extradata + sent_extradata, this_chunk); this_offset += this_chunk; } send_udp(nt, nt->buf, this_header + this_offset); offset += this_offset; } } static void send_msg_fragmented(struct netconsole_target *nt, const char *msg, int msg_len, int release_len, int extradata_len) { int header_len, msgbody_len; const char *msgbody; /* need to insert extra header fields, detect header and msgbody */ msgbody = memchr(msg, ';', msg_len); if (WARN_ON_ONCE(!msgbody)) return; header_len = msgbody - msg; msgbody_len = msg_len - header_len - 1; msgbody++; /* * Transfer multiple chunks with the following extra header. * "ncfrag=<byte-offset>/<total-bytes>" */ if (release_len) append_release(nt->buf); /* Copy the header into the buffer */ memcpy(nt->buf + release_len, msg, header_len); header_len += release_len; /* for now on, the header will be persisted, and the msgbody * will be replaced */ send_fragmented_body(nt, msgbody, header_len, msgbody_len, extradata_len); } /** * send_ext_msg_udp - send extended log message to target * @nt: target to send message to * @msg: extended log message to send * @msg_len: length of message * * Transfer extended log @msg to @nt. If @msg is longer than * MAX_PRINT_CHUNK, it'll be split and transmitted in multiple chunks with * ncfrag header field added to identify them. */ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, int msg_len) { int release_len = 0; int extradata_len; extradata_len = prepare_extradata(nt); if (nt->release) release_len = strlen(init_utsname()->release) + 1; if (msg_len + release_len + extradata_len <= MAX_PRINT_CHUNK) return send_msg_no_fragmentation(nt, msg, msg_len, release_len); return send_msg_fragmented(nt, msg, msg_len, release_len, extradata_len); } static void write_ext_msg(struct console *con, const char *msg, unsigned int len) { struct netconsole_target *nt; unsigned long flags; if ((oops_only && !oops_in_progress) || list_empty(&target_list)) return; spin_lock_irqsave(&target_list_lock, flags); list_for_each_entry(nt, &target_list, list) if (nt->extended && nt->enabled && netif_running(nt->np.dev)) send_ext_msg_udp(nt, msg, len); spin_unlock_irqrestore(&target_list_lock, flags); } static void write_msg(struct console *con, const char *msg, unsigned int len) { int frag, left; unsigned long flags; struct netconsole_target *nt; const char *tmp; if (oops_only && !oops_in_progress) return; /* Avoid taking lock and disabling interrupts unnecessarily */ if (list_empty(&target_list)) return; spin_lock_irqsave(&target_list_lock, flags); list_for_each_entry(nt, &target_list, list) { if (!nt->extended && nt->enabled && netif_running(nt->np.dev)) { /* * We nest this inside the for-each-target loop above * so that we're able to get as much logging out to * at least one target if we die inside here, instead * of unnecessarily keeping all targets in lock-step. */ tmp = msg; for (left = len; left;) { frag = min(left, MAX_PRINT_CHUNK); send_udp(nt, tmp, frag); tmp += frag; left -= frag; } } } spin_unlock_irqrestore(&target_list_lock, flags); } /* Allocate new target (from boot/module param) and setup netpoll for it */ static struct netconsole_target *alloc_param_target(char *target_config, int cmdline_count) { struct netconsole_target *nt; int err; nt = alloc_and_init(); if (!nt) { err = -ENOMEM; goto fail; } if (*target_config == '+') { nt->extended = true; target_config++; } if (*target_config == 'r') { if (!nt->extended) { pr_err("Netconsole configuration error. Release feature requires extended log message"); err = -EINVAL; goto fail; } nt->release = true; target_config++; } /* Parse parameters and setup netpoll */ err = netpoll_parse_options(&nt->np, target_config); if (err) goto fail; err = netpoll_setup(&nt->np); if (err) { pr_err("Not enabling netconsole for %s%d. Netpoll setup failed\n", NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count); if (!IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) /* only fail if dynamic reconfiguration is set, * otherwise, keep the target in the list, but disabled. */ goto fail; } else { nt->enabled = true; } populate_configfs_item(nt, cmdline_count); return nt; fail: kfree(nt); return ERR_PTR(err); } /* Cleanup netpoll for given target (from boot/module param) and free it */ static void free_param_target(struct netconsole_target *nt) { netpoll_cleanup(&nt->np); kfree(nt); } static struct console netconsole_ext = { .name = "netcon_ext", .flags = CON_ENABLED | CON_EXTENDED, .write = write_ext_msg, }; static struct console netconsole = { .name = "netcon", .flags = CON_ENABLED, .write = write_msg, }; static int __init init_netconsole(void) { int err; struct netconsole_target *nt, *tmp; unsigned int count = 0; bool extended = false; unsigned long flags; char *target_config; char *input = config; if (strnlen(input, MAX_PARAM_LENGTH)) { while ((target_config = strsep(&input, ";"))) { nt = alloc_param_target(target_config, count); if (IS_ERR(nt)) { if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) continue; err = PTR_ERR(nt); goto fail; } /* Dump existing printks when we register */ if (nt->extended) { extended = true; netconsole_ext.flags |= CON_PRINTBUFFER; } else { netconsole.flags |= CON_PRINTBUFFER; } spin_lock_irqsave(&target_list_lock, flags); list_add(&nt->list, &target_list); spin_unlock_irqrestore(&target_list_lock, flags); count++; } } err = register_netdevice_notifier(&netconsole_netdev_notifier); if (err) goto fail; err = dynamic_netconsole_init(); if (err) goto undonotifier; if (extended) register_console(&netconsole_ext); register_console(&netconsole); pr_info("network logging started\n"); return err; undonotifier: unregister_netdevice_notifier(&netconsole_netdev_notifier); fail: pr_err("cleaning up\n"); /* * Remove all targets and destroy them (only targets created * from the boot/module option exist here). Skipping the list * lock is safe here, and netpoll_cleanup() will sleep. */ list_for_each_entry_safe(nt, tmp, &target_list, list) { list_del(&nt->list); free_param_target(nt); } return err; } static void __exit cleanup_netconsole(void) { struct netconsole_target *nt, *tmp; if (console_is_registered(&netconsole_ext)) unregister_console(&netconsole_ext); unregister_console(&netconsole); dynamic_netconsole_exit(); unregister_netdevice_notifier(&netconsole_netdev_notifier); /* * Targets created via configfs pin references on our module * and would first be rmdir(2)'ed from userspace. We reach * here only when they are already destroyed, and only those * created from the boot/module option are left, so remove and * destroy them. Skipping the list lock is safe here, and * netpoll_cleanup() will sleep. */ list_for_each_entry_safe(nt, tmp, &target_list, list) { list_del(&nt->list); free_param_target(nt); } } /* * Use late_initcall to ensure netconsole is * initialized after network device driver if built-in. * * late_initcall() and module_init() are identical if built as module. */ late_initcall(init_netconsole); module_exit(cleanup_netconsole);
5 5 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 // SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * * See COPYING in top-level directory. */ #include "protocol.h" #include "orangefs-kernel.h" /* tags assigned to kernel upcall operations */ static __u64 next_tag_value; static DEFINE_SPINLOCK(next_tag_value_lock); /* the orangefs memory caches */ /* a cache for orangefs upcall/downcall operations */ static struct kmem_cache *op_cache; int op_cache_initialize(void) { op_cache = kmem_cache_create("orangefs_op_cache", sizeof(struct orangefs_kernel_op_s), 0, 0, NULL); if (!op_cache) { gossip_err("Cannot create orangefs_op_cache\n"); return -ENOMEM; } /* initialize our atomic tag counter */ spin_lock(&next_tag_value_lock); next_tag_value = 100; spin_unlock(&next_tag_value_lock); return 0; } int op_cache_finalize(void) { kmem_cache_destroy(op_cache); return 0; } char *get_opname_string(struct orangefs_kernel_op_s *new_op) { if (new_op) { __s32 type = new_op->upcall.type; if (type == ORANGEFS_VFS_OP_FILE_IO) return "OP_FILE_IO"; else if (type == ORANGEFS_VFS_OP_LOOKUP) return "OP_LOOKUP"; else if (type == ORANGEFS_VFS_OP_CREATE) return "OP_CREATE"; else if (type == ORANGEFS_VFS_OP_GETATTR) return "OP_GETATTR"; else if (type == ORANGEFS_VFS_OP_REMOVE) return "OP_REMOVE"; else if (type == ORANGEFS_VFS_OP_MKDIR) return "OP_MKDIR"; else if (type == ORANGEFS_VFS_OP_READDIR) return "OP_READDIR"; else if (type == ORANGEFS_VFS_OP_READDIRPLUS) return "OP_READDIRPLUS"; else if (type == ORANGEFS_VFS_OP_SETATTR) return "OP_SETATTR"; else if (type == ORANGEFS_VFS_OP_SYMLINK) return "OP_SYMLINK"; else if (type == ORANGEFS_VFS_OP_RENAME) return "OP_RENAME"; else if (type == ORANGEFS_VFS_OP_STATFS) return "OP_STATFS"; else if (type == ORANGEFS_VFS_OP_TRUNCATE) return "OP_TRUNCATE"; else if (type == ORANGEFS_VFS_OP_RA_FLUSH) return "OP_RA_FLUSH"; else if (type == ORANGEFS_VFS_OP_FS_MOUNT) return "OP_FS_MOUNT"; else if (type == ORANGEFS_VFS_OP_FS_UMOUNT) return "OP_FS_UMOUNT"; else if (type == ORANGEFS_VFS_OP_GETXATTR) return "OP_GETXATTR"; else if (type == ORANGEFS_VFS_OP_SETXATTR) return "OP_SETXATTR"; else if (type == ORANGEFS_VFS_OP_LISTXATTR) return "OP_LISTXATTR"; else if (type == ORANGEFS_VFS_OP_REMOVEXATTR) return "OP_REMOVEXATTR"; else if (type == ORANGEFS_VFS_OP_PARAM) return "OP_PARAM"; else if (type == ORANGEFS_VFS_OP_PERF_COUNT) return "OP_PERF_COUNT"; else if (type == ORANGEFS_VFS_OP_CANCEL) return "OP_CANCEL"; else if (type == ORANGEFS_VFS_OP_FSYNC) return "OP_FSYNC"; else if (type == ORANGEFS_VFS_OP_FSKEY) return "OP_FSKEY"; else if (type == ORANGEFS_VFS_OP_FEATURES) return "OP_FEATURES"; } return "OP_UNKNOWN?"; } void orangefs_new_tag(struct orangefs_kernel_op_s *op) { spin_lock(&next_tag_value_lock); op->tag = next_tag_value++; if (next_tag_value == 0) next_tag_value = 100; spin_unlock(&next_tag_value_lock); } struct orangefs_kernel_op_s *op_alloc(__s32 type) { struct orangefs_kernel_op_s *new_op = NULL; new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL); if (new_op) { INIT_LIST_HEAD(&new_op->list); spin_lock_init(&new_op->lock); init_completion(&new_op->waitq); new_op->upcall.type = ORANGEFS_VFS_OP_INVALID; new_op->downcall.type = ORANGEFS_VFS_OP_INVALID; new_op->downcall.status = -1; new_op->op_state = OP_VFS_STATE_UNKNOWN; /* initialize the op specific tag and upcall credentials */ orangefs_new_tag(new_op); new_op->upcall.type = type; new_op->attempts = 0; gossip_debug(GOSSIP_CACHE_DEBUG, "Alloced OP (%p: %llu %s)\n", new_op, llu(new_op->tag), get_opname_string(new_op)); new_op->upcall.uid = from_kuid(&init_user_ns, current_fsuid()); new_op->upcall.gid = from_kgid(&init_user_ns, current_fsgid()); } else { gossip_err("op_alloc: kmem_cache_zalloc failed!\n"); } return new_op; } void op_release(struct orangefs_kernel_op_s *orangefs_op) { if (orangefs_op) { gossip_debug(GOSSIP_CACHE_DEBUG, "Releasing OP (%p: %llu)\n", orangefs_op, llu(orangefs_op->tag)); kmem_cache_free(op_cache, orangefs_op); } else { gossip_err("NULL pointer in op_release\n"); } }
1 4 4 4 4 4 4 4 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 2 1 1 1 1 2 1 2 1 1 3 3 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 // SPDX-License-Identifier: GPL-2.0-only /* * GCM: Galois/Counter Mode. * * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> */ #include <crypto/gf128mul.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <crypto/gcm.h> #include <crypto/hash.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> struct gcm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_ahash_spawn ghash; }; struct crypto_gcm_ctx { struct crypto_skcipher *ctr; struct crypto_ahash *ghash; }; struct crypto_rfc4106_ctx { struct crypto_aead *child; u8 nonce[4]; }; struct crypto_rfc4106_req_ctx { struct scatterlist src[3]; struct scatterlist dst[3]; struct aead_request subreq; }; struct crypto_rfc4543_instance_ctx { struct crypto_aead_spawn aead; }; struct crypto_rfc4543_ctx { struct crypto_aead *child; u8 nonce[4]; }; struct crypto_rfc4543_req_ctx { struct aead_request subreq; }; struct crypto_gcm_ghash_ctx { unsigned int cryptlen; struct scatterlist *src; int (*complete)(struct aead_request *req, u32 flags); }; struct crypto_gcm_req_priv_ctx { u8 iv[16]; u8 auth_tag[16]; u8 iauth_tag[16]; struct scatterlist src[3]; struct scatterlist dst[3]; struct scatterlist sg; struct crypto_gcm_ghash_ctx ghash_ctx; union { struct ahash_request ahreq; struct skcipher_request skreq; } u; }; static struct { u8 buf[16]; struct scatterlist sg; } *gcm_zeroes; static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ahash *ghash = ctx->ghash; struct crypto_skcipher *ctr = ctx->ctr; struct { be128 hash; u8 iv[16]; struct crypto_wait wait; struct scatterlist sg[1]; struct skcipher_request req; } *data; int err; crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(ctr, key, keylen); if (err) return err; data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), GFP_KERNEL); if (!data) return -ENOMEM; crypto_init_wait(&data->wait); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &data->wait); skcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait); if (err) goto out; crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); out: kfree_sensitive(data); return err; } static int crypto_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { return crypto_gcm_check_authsize(authsize); } static void crypto_gcm_init_common(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __be32 counter = cpu_to_be32(1); struct scatterlist *sg; memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE); memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4); sg_init_table(pctx->src, 3); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); if (sg != pctx->src + 1) sg_chain(pctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(pctx->dst, 3); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); if (sg != pctx->dst + 1) sg_chain(pctx->dst, 2, sg); } } static void crypto_gcm_init_crypt(struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct skcipher_request *skreq = &pctx->u.skreq; struct scatterlist *dst; dst = req->src == req->dst ? pctx->src : pctx->dst; skcipher_request_set_tfm(skreq, ctx->ctr); skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), pctx->iv); } static inline unsigned int gcm_remain(unsigned int len) { len &= 0xfU; return len ? 16 - len : 0; } static void gcm_hash_len_done(void *data, int err); static int gcm_hash_update(struct aead_request *req, crypto_completion_t compl, struct scatterlist *src, unsigned int len, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; ahash_request_set_callback(ahreq, flags, compl, req); ahash_request_set_crypt(ahreq, src, NULL, len); return crypto_ahash_update(ahreq); } static int gcm_hash_remain(struct aead_request *req, unsigned int remain, crypto_completion_t compl, u32 flags) { return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags); } static int gcm_hash_len(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; be128 lengths; lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(gctx->cryptlen * 8); memcpy(pctx->iauth_tag, &lengths, 16); sg_init_one(&pctx->sg, pctx->iauth_tag, 16); ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req); ahash_request_set_crypt(ahreq, &pctx->sg, pctx->iauth_tag, sizeof(lengths)); return crypto_ahash_finup(ahreq); } static int gcm_hash_len_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; return gctx->complete(req, flags); } static void gcm_hash_len_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_len_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags) { return gcm_hash_len(req, flags) ?: gcm_hash_len_continue(req, flags); } static void gcm_hash_crypt_remain_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_crypt_remain_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; unsigned int remain; remain = gcm_remain(gctx->cryptlen); if (remain) return gcm_hash_remain(req, remain, gcm_hash_crypt_remain_done, flags) ?: gcm_hash_crypt_remain_continue(req, flags); return gcm_hash_crypt_remain_continue(req, flags); } static void gcm_hash_crypt_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_crypt_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; if (gctx->cryptlen) return gcm_hash_update(req, gcm_hash_crypt_done, gctx->src, gctx->cryptlen, flags) ?: gcm_hash_crypt_continue(req, flags); return gcm_hash_crypt_remain_continue(req, flags); } static void gcm_hash_assoc_remain_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_assoc_remain_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags) { unsigned int remain; remain = gcm_remain(req->assoclen); if (remain) return gcm_hash_remain(req, remain, gcm_hash_assoc_remain_done, flags) ?: gcm_hash_assoc_remain_continue(req, flags); return gcm_hash_assoc_remain_continue(req, flags); } static void gcm_hash_assoc_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_assoc_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_init_continue(struct aead_request *req, u32 flags) { if (req->assoclen) return gcm_hash_update(req, gcm_hash_assoc_done, req->src, req->assoclen, flags) ?: gcm_hash_assoc_continue(req, flags); return gcm_hash_assoc_remain_continue(req, flags); } static void gcm_hash_init_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_init_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); ahash_request_set_tfm(ahreq, ctx->ghash); ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req); return crypto_ahash_init(ahreq) ?: gcm_hash_init_continue(req, flags); } static int gcm_enc_copy_hash(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); u8 *auth_tag = pctx->auth_tag; crypto_xor(auth_tag, pctx->iauth_tag, 16); scatterwalk_map_and_copy(auth_tag, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); return 0; } static int gcm_encrypt_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst); gctx->cryptlen = req->cryptlen; gctx->complete = gcm_enc_copy_hash; return gcm_hash(req, flags); } static void gcm_encrypt_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_encrypt_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int crypto_gcm_encrypt(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct skcipher_request *skreq = &pctx->u.skreq; u32 flags = aead_request_flags(req); crypto_gcm_init_common(req); crypto_gcm_init_crypt(req, req->cryptlen); skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req); return crypto_skcipher_encrypt(skreq) ?: gcm_encrypt_continue(req, flags); } static int crypto_gcm_verify(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); u8 *auth_tag = pctx->auth_tag; u8 *iauth_tag = pctx->iauth_tag; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen - authsize; crypto_xor(auth_tag, iauth_tag, 16); scatterwalk_map_and_copy(iauth_tag, req->src, req->assoclen + cryptlen, authsize, 0); return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } static void gcm_decrypt_done(void *data, int err) { struct aead_request *req = data; if (!err) err = crypto_gcm_verify(req); aead_request_complete(req, err); } static int gcm_dec_hash_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct skcipher_request *skreq = &pctx->u.skreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; crypto_gcm_init_crypt(req, gctx->cryptlen); skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req); return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req); } static int crypto_gcm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u32 flags = aead_request_flags(req); cryptlen -= authsize; crypto_gcm_init_common(req); gctx->src = sg_next(pctx->src); gctx->cryptlen = cryptlen; gctx->complete = gcm_dec_hash_continue; return gcm_hash(req, flags); } static int crypto_gcm_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct gcm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_skcipher *ctr; struct crypto_ahash *ghash; unsigned long align; int err; ghash = crypto_spawn_ahash(&ictx->ghash); if (IS_ERR(ghash)) return PTR_ERR(ghash); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_hash; ctx->ctr = ctr; ctx->ghash = ghash; align = crypto_aead_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize(tfm, align + offsetof(struct crypto_gcm_req_priv_ctx, u) + max(sizeof(struct skcipher_request) + crypto_skcipher_reqsize(ctr), sizeof(struct ahash_request) + crypto_ahash_reqsize(ghash))); return 0; err_free_hash: crypto_free_ahash(ghash); return err; } static void crypto_gcm_exit_tfm(struct crypto_aead *tfm) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->ghash); crypto_free_skcipher(ctx->ctr); } static void crypto_gcm_free(struct aead_instance *inst) { struct gcm_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_skcipher(&ctx->ctr); crypto_drop_ahash(&ctx->ghash); kfree(inst); } static int crypto_gcm_create_common(struct crypto_template *tmpl, struct rtattr **tb, const char *ctr_name, const char *ghash_name) { struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; struct hash_alg_common *ghash; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ctx->ghash, aead_crypto_instance(inst), ghash_name, 0, mask); if (err) goto err_free_inst; ghash = crypto_spawn_ahash_alg(&ctx->ghash); err = -EINVAL; if (strcmp(ghash->base.cra_name, "ghash") != 0 || ghash->digestsize != 16) goto err_free_inst; err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst), ctr_name, 0, mask); if (err) goto err_free_inst; ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr->base.cra_driver_name, ghash->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.init = crypto_gcm_init_tfm; inst->alg.exit = crypto_gcm_exit_tfm; inst->alg.setkey = crypto_gcm_setkey; inst->alg.setauthsize = crypto_gcm_setauthsize; inst->alg.encrypt = crypto_gcm_encrypt; inst->alg.decrypt = crypto_gcm_decrypt; inst->free = crypto_gcm_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: crypto_gcm_free(inst); } return err; } static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return PTR_ERR(cipher_name); if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash"); } static int crypto_gcm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *ctr_name; const char *ghash_name; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) return PTR_ERR(ctr_name); ghash_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(ghash_name)) return PTR_ERR(ghash_name); return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name); } static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; if (keylen < 4) return -EINVAL; keylen -= 4; memcpy(ctx->nonce, key + keylen, 4); crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); int err; err = crypto_rfc4106_check_authsize(authsize); if (err) return err; return crypto_aead_setauthsize(ctx->child, authsize); } static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) { struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); struct aead_request *subreq = &rctx->subreq; struct crypto_aead *child = ctx->child; struct scatterlist *sg; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, rctx->src, req->src == req->dst ? rctx->src : rctx->dst, req->cryptlen, iv); aead_request_set_ad(subreq, req->assoclen - 8); return subreq; } static int crypto_rfc4106_encrypt(struct aead_request *req) { int err; err = crypto_ipsec_check_assoclen(req->assoclen); if (err) return err; req = crypto_rfc4106_crypt(req); return crypto_aead_encrypt(req); } static int crypto_rfc4106_decrypt(struct aead_request *req) { int err; err = crypto_ipsec_check_assoclen(req->assoclen); if (err) return err; req = crypto_rfc4106_crypt(req); return crypto_aead_decrypt(req); } static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; unsigned long align; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); ctx->child = aead; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, sizeof(struct crypto_rfc4106_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 24); return 0; } static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static void crypto_rfc4106_free(struct aead_instance *inst) { crypto_drop_aead(aead_instance_ctx(inst)); kfree(inst); } static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = aead_instance_ctx(inst); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_aead_alg(spawn); err = -EINVAL; /* Underlying IV size must be 12. */ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "rfc4106(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME || snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4106(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); inst->alg.ivsize = GCM_RFC4106_IV_SIZE; inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.init = crypto_rfc4106_init_tfm; inst->alg.exit = crypto_rfc4106_exit_tfm; inst->alg.setkey = crypto_rfc4106_setkey; inst->alg.setauthsize = crypto_rfc4106_setauthsize; inst->alg.encrypt = crypto_rfc4106_encrypt; inst->alg.decrypt = crypto_rfc4106_decrypt; inst->free = crypto_rfc4106_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: crypto_rfc4106_free(inst); } return err; } static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; if (keylen < 4) return -EINVAL; keylen -= 4; memcpy(ctx->nonce, key + keylen, 4); crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); if (authsize != 16) return -EINVAL; return crypto_aead_setauthsize(ctx->child, authsize); } static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); struct crypto_rfc4543_req_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->subreq; unsigned int authsize = crypto_aead_authsize(aead); u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); if (req->src != req->dst) { unsigned int nbytes = req->assoclen + req->cryptlen - (enc ? 0 : authsize); memcpy_sglist(req->dst, req->src, nbytes); } memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); aead_request_set_tfm(subreq, ctx->child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, enc ? 0 : authsize, iv); aead_request_set_ad(subreq, req->assoclen + req->cryptlen - subreq->cryptlen); return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); } static int crypto_rfc4543_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: crypto_rfc4543_crypt(req, true); } static int crypto_rfc4543_decrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: crypto_rfc4543_crypt(req, false); } static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct crypto_rfc4543_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; unsigned long align; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); ctx->child = aead; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, sizeof(struct crypto_rfc4543_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + GCM_AES_IV_SIZE); return 0; } static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static void crypto_rfc4543_free(struct aead_instance *inst) { struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_aead(&ctx->aead); kfree(inst); } static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); err = crypto_grab_aead(&ctx->aead, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_aead_alg(&ctx->aead); err = -EINVAL; /* Underlying IV size must be 12. */ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "rfc4543(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME || snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4543(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); inst->alg.ivsize = GCM_RFC4543_IV_SIZE; inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.init = crypto_rfc4543_init_tfm; inst->alg.exit = crypto_rfc4543_exit_tfm; inst->alg.setkey = crypto_rfc4543_setkey; inst->alg.setauthsize = crypto_rfc4543_setauthsize; inst->alg.encrypt = crypto_rfc4543_encrypt; inst->alg.decrypt = crypto_rfc4543_decrypt; inst->free = crypto_rfc4543_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: crypto_rfc4543_free(inst); } return err; } static struct crypto_template crypto_gcm_tmpls[] = { { .name = "gcm_base", .create = crypto_gcm_base_create, .module = THIS_MODULE, }, { .name = "gcm", .create = crypto_gcm_create, .module = THIS_MODULE, }, { .name = "rfc4106", .create = crypto_rfc4106_create, .module = THIS_MODULE, }, { .name = "rfc4543", .create = crypto_rfc4543_create, .module = THIS_MODULE, }, }; static int __init crypto_gcm_module_init(void) { int err; gcm_zeroes = kzalloc(sizeof(*gcm_zeroes), GFP_KERNEL); if (!gcm_zeroes) return -ENOMEM; sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf)); err = crypto_register_templates(crypto_gcm_tmpls, ARRAY_SIZE(crypto_gcm_tmpls)); if (err) kfree(gcm_zeroes); return err; } static void __exit crypto_gcm_module_exit(void) { kfree(gcm_zeroes); crypto_unregister_templates(crypto_gcm_tmpls, ARRAY_SIZE(crypto_gcm_tmpls)); } module_init(crypto_gcm_module_init); module_exit(crypto_gcm_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Galois/Counter Mode"); MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); MODULE_ALIAS_CRYPTO("gcm_base"); MODULE_ALIAS_CRYPTO("rfc4106"); MODULE_ALIAS_CRYPTO("rfc4543"); MODULE_ALIAS_CRYPTO("gcm");
6 6 5 5 1 4 1 3 3 1 2 1 1 1 1 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 // SPDX-License-Identifier: GPL-2.0-or-later /* * ASIX AX88172A based USB 2.0 Ethernet Devices * Copyright (C) 2012 OMICRON electronics GmbH * * Supports external PHYs via phylib. Based on the driver for the * AX88772. Original copyrights follow: * * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (C) 2006 James Painter <jamie.painter@iname.com> * Copyright (c) 2002-2003 TiVo Inc. */ #include "asix.h" #include <linux/phy.h> struct ax88172a_private { struct mii_bus *mdio; struct phy_device *phydev; char phy_name[PHY_ID_SIZE]; u8 phy_addr; u16 oldmode; int use_embdphy; struct asix_rx_fixup_info rx_fixup_info; }; /* set MAC link settings according to information from phylib */ static void ax88172a_adjust_link(struct net_device *netdev) { struct phy_device *phydev = netdev->phydev; struct usbnet *dev = netdev_priv(netdev); struct ax88172a_private *priv = dev->driver_priv; u16 mode = 0; if (phydev->link) { mode = AX88772_MEDIUM_DEFAULT; if (phydev->duplex == DUPLEX_HALF) mode &= ~AX_MEDIUM_FD; if (phydev->speed != SPEED_100) mode &= ~AX_MEDIUM_PS; } if (mode != priv->oldmode) { asix_write_medium_mode(dev, mode, 0); priv->oldmode = mode; netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n", phydev->speed, phydev->duplex, mode); phy_print_status(phydev); } } static void ax88172a_status(struct usbnet *dev, struct urb *urb) { /* link changes are detected by polling the phy */ } /* use phylib infrastructure */ static int ax88172a_init_mdio(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; int ret; priv->mdio = mdiobus_alloc(); if (!priv->mdio) { netdev_err(dev->net, "Could not allocate MDIO bus\n"); return -ENOMEM; } priv->mdio->priv = (void *)dev; priv->mdio->read = &asix_mdio_bus_read; priv->mdio->write = &asix_mdio_bus_write; priv->mdio->name = "Asix MDIO Bus"; /* mii bus name is usb-<usb bus number>-<usb device number> */ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); ret = mdiobus_register(priv->mdio); if (ret) { netdev_err(dev->net, "Could not register MDIO bus\n"); goto mfree; } netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id); return 0; mfree: mdiobus_free(priv->mdio); return ret; } static void ax88172a_remove_mdio(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id); mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); } static const struct net_device_ops ax88172a_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, }; static const struct ethtool_ops ax88172a_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_wol = asix_get_wol, .set_wol = asix_set_wol, .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, .nway_reset = phy_ethtool_nway_reset, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy) { int ret; ret = asix_sw_reset(dev, AX_SWRESET_IPPD, 0); if (ret < 0) goto err; msleep(150); ret = asix_sw_reset(dev, AX_SWRESET_CLEAR, 0); if (ret < 0) goto err; msleep(150); ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD, 0); if (ret < 0) goto err; return 0; err: return ret; } static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) { int ret; u8 buf[ETH_ALEN]; struct ax88172a_private *priv; ret = usbnet_get_endpoints(dev, intf); if (ret) return ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev->driver_priv = priv; /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); if (ret < ETH_ALEN) { netdev_err(dev->net, "Failed to read MAC address: %d\n", ret); ret = -EIO; goto free; } eth_hw_addr_set(dev->net, buf); dev->net->netdev_ops = &ax88172a_netdev_ops; dev->net->ethtool_ops = &ax88172a_ethtool_ops; /* are we using the internal or the external phy? */ ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf, 0); if (ret < 0) { netdev_err(dev->net, "Failed to read software interface selection register: %d\n", ret); goto free; } netdev_dbg(dev->net, "AX_CMD_SW_PHY_STATUS = 0x%02x\n", buf[0]); switch (buf[0] & AX_PHY_SELECT_MASK) { case AX_PHY_SELECT_INTERNAL: netdev_dbg(dev->net, "use internal phy\n"); priv->use_embdphy = 1; break; case AX_PHY_SELECT_EXTERNAL: netdev_dbg(dev->net, "use external phy\n"); priv->use_embdphy = 0; break; default: netdev_err(dev->net, "Interface mode not supported by driver\n"); ret = -ENOTSUPP; goto free; } ret = asix_read_phy_addr(dev, priv->use_embdphy); if (ret < 0) goto free; if (ret >= PHY_MAX_ADDR) { netdev_err(dev->net, "Invalid PHY address %#x\n", ret); ret = -ENODEV; goto free; } priv->phy_addr = ret; ax88172a_reset_phy(dev, priv->use_embdphy); /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support jumbo eth frames */ dev->rx_urb_size = 2048; } /* init MDIO bus */ ret = ax88172a_init_mdio(dev); if (ret) goto free; return 0; free: kfree(priv); return ret; } static int ax88172a_stop(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; netdev_dbg(dev->net, "Stopping interface\n"); if (priv->phydev) { netdev_info(dev->net, "Disconnecting from phy %s\n", priv->phy_name); phy_stop(priv->phydev); phy_disconnect(priv->phydev); } return 0; } static void ax88172a_unbind(struct usbnet *dev, struct usb_interface *intf) { struct ax88172a_private *priv = dev->driver_priv; ax88172a_remove_mdio(dev); kfree(priv); } static int ax88172a_reset(struct usbnet *dev) { struct asix_data *data = (struct asix_data *)&dev->data; struct ax88172a_private *priv = dev->driver_priv; int ret; u16 rx_ctl; ax88172a_reset_phy(dev, priv->use_embdphy); msleep(150); rx_ctl = asix_read_rx_ctl(dev, 0); netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl); ret = asix_write_rx_ctl(dev, 0x0000, 0); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev, 0); netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl); msleep(150); ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0, AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL, 0); if (ret < 0) { netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; } /* Rewrite MAC address */ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN); ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN, data->mac_addr, 0); if (ret < 0) goto out; /* Set RX_CTL to default values with 2k buffer, and enable cactus */ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL, 0); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev, 0); netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", rx_ctl); rx_ctl = asix_read_medium_status(dev, 0); netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n", rx_ctl); /* Connect to PHY */ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr); priv->phydev = phy_connect(dev->net, priv->phy_name, &ax88172a_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(priv->phydev)) { netdev_err(dev->net, "Could not connect to PHY device %s\n", priv->phy_name); ret = PTR_ERR(priv->phydev); goto out; } netdev_info(dev->net, "Connected to phy %s\n", priv->phy_name); /* During power-up, the AX88172A set the power down (BMCR_PDOWN) * bit of the PHY. Bring the PHY up again. */ genphy_resume(priv->phydev); phy_start(priv->phydev); return 0; out: return ret; } static int ax88172a_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct ax88172a_private *dp = dev->driver_priv; struct asix_rx_fixup_info *rx = &dp->rx_fixup_info; return asix_rx_fixup_internal(dev, skb, rx); } const struct driver_info ax88172a_info = { .description = "ASIX AX88172A USB 2.0 Ethernet", .bind = ax88172a_bind, .reset = ax88172a_reset, .stop = ax88172a_stop, .unbind = ax88172a_unbind, .status = ax88172a_status, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = ax88172a_rx_fixup, .tx_fixup = asix_tx_fixup, };
17 15 54 55 15 43 43 43 42 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 #include <linux/gfp.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/swapfile.h> #include <linux/swapops.h> #include <linux/kmemleak.h> #include <linux/sched/task.h> #include <linux/execmem.h> #include <asm/set_memory.h> #include <asm/cpu_device_id.h> #include <asm/e820/api.h> #include <asm/init.h> #include <asm/page.h> #include <asm/page_types.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/proto.h> #include <asm/dma.h> /* for MAX_DMA_PFN */ #include <asm/kaslr.h> #include <asm/hypervisor.h> #include <asm/cpufeature.h> #include <asm/pti.h> #include <asm/text-patching.h> #include <asm/memtype.h> #include <asm/paravirt.h> #include <asm/mmu_context.h> /* * We need to define the tracepoints somewhere, and tlb.c * is only compiled when SMP=y. */ #include <trace/events/tlb.h> #include "mm_internal.h" /* * Tables translating between page_cache_type_t and pte encoding. * * The default values are defined statically as minimal supported mode; * WC and WT fall back to UC-. pat_init() updates these values to support * more cache modes, WC and WT, when it is safe to do so. See pat_init() * for the details. Note, __early_ioremap() used during early boot-time * takes pgprot_t (pte encoding) and does not use these tables. * * Index into __cachemode2pte_tbl[] is the cachemode. * * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2. */ static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = { [_PAGE_CACHE_MODE_WB ] = 0 | 0 , [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD, [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD, [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD, }; unsigned long cachemode2protval(enum page_cache_mode pcm) { if (likely(pcm == 0)) return 0; return __cachemode2pte_tbl[pcm]; } EXPORT_SYMBOL(cachemode2protval); static uint8_t __pte2cachemode_tbl[8] = { [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC, [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB, [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS, [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, }; /* * Check that the write-protect PAT entry is set for write-protect. * To do this without making assumptions how PAT has been set up (Xen has * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache * mode via the __cachemode2pte_tbl[] into protection bits (those protection * bits will select a cache mode of WP or better), and then translate the * protection bits back into the cache mode using __pte2cm_idx() and the * __pte2cachemode_tbl[] array. This will return the really used cache mode. */ bool x86_has_pat_wp(void) { uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP]; return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP; } enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) { unsigned long masked; masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK; if (likely(masked == 0)) return 0; return __pte2cachemode_tbl[__pte2cm_idx(masked)]; } static unsigned long __initdata pgt_buf_start; static unsigned long __initdata pgt_buf_end; static unsigned long __initdata pgt_buf_top; static unsigned long min_pfn_mapped; static bool __initdata can_use_brk_pgt = true; /* * Pages returned are already directly mapped. * * Changing that is likely to break Xen, see commit: * * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve * * for detailed information. */ __ref void *alloc_low_pages(unsigned int num) { unsigned long pfn; int i; if (after_bootmem) { unsigned int order; order = get_order((unsigned long)num << PAGE_SHIFT); return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order); } if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { unsigned long ret = 0; if (min_pfn_mapped < max_pfn_mapped) { ret = memblock_phys_alloc_range( PAGE_SIZE * num, PAGE_SIZE, min_pfn_mapped << PAGE_SHIFT, max_pfn_mapped << PAGE_SHIFT); } if (!ret && can_use_brk_pgt) ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE)); if (!ret) panic("alloc_low_pages: can not alloc memory"); pfn = ret >> PAGE_SHIFT; } else { pfn = pgt_buf_end; pgt_buf_end += num; } for (i = 0; i < num; i++) { void *adr; adr = __va((pfn + i) << PAGE_SHIFT); clear_page(adr); } return __va(pfn << PAGE_SHIFT); } /* * By default need to be able to allocate page tables below PGD firstly for * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping. * With KASLR memory randomization, depending on the machine e820 memory and the * PUD alignment, twice that many pages may be needed when KASLR memory * randomization is enabled. */ #define INIT_PGD_PAGE_TABLES 4 #ifndef CONFIG_RANDOMIZE_MEMORY #define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES) #else #define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES) #endif #define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE) RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); void __init early_alloc_pgt_buf(void) { unsigned long tables = INIT_PGT_BUF_SIZE; phys_addr_t base; base = __pa(extend_brk(tables, PAGE_SIZE)); pgt_buf_start = base >> PAGE_SHIFT; pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); } int after_bootmem; early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES); struct map_range { unsigned long start; unsigned long end; unsigned page_size_mask; }; static int page_size_mask; /* * Save some of cr4 feature set we're using (e.g. Pentium 4MB * enable and PPro Global page enable), so that any CPU's that boot * up after us can get the correct flags. Invoked on the boot CPU. */ static inline void cr4_set_bits_and_update_boot(unsigned long mask) { mmu_cr4_features |= mask; if (trampoline_cr4_features) *trampoline_cr4_features = mmu_cr4_features; cr4_set_bits(mask); } static void __init probe_page_size_mask(void) { /* * For pagealloc debugging, identity mapping will use small pages. * This will simplify cpa(), which otherwise needs to support splitting * large pages into small in interrupt context, etc. */ if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) page_size_mask |= 1 << PG_LEVEL_2M; else direct_gbpages = 0; /* Enable PSE if available */ if (boot_cpu_has(X86_FEATURE_PSE)) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ __supported_pte_mask &= ~_PAGE_GLOBAL; if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } /* By the default is everything supported: */ __default_kernel_pte_mask = __supported_pte_mask; /* Except when with PTI where the kernel is mostly non-Global: */ if (cpu_feature_enabled(X86_FEATURE_PTI)) __default_kernel_pte_mask &= ~_PAGE_GLOBAL; /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { printk(KERN_INFO "Using GB pages for direct mapping\n"); page_size_mask |= 1 << PG_LEVEL_1G; } else { direct_gbpages = 0; } } /* * INVLPG may not properly flush Global entries on * these CPUs. New microcode fixes the issue. */ static const struct x86_cpu_id invlpg_miss_ids[] = { X86_MATCH_VFM(INTEL_ALDERLAKE, 0x2e), X86_MATCH_VFM(INTEL_ALDERLAKE_L, 0x42c), X86_MATCH_VFM(INTEL_ATOM_GRACEMONT, 0x11), X86_MATCH_VFM(INTEL_RAPTORLAKE, 0x118), X86_MATCH_VFM(INTEL_RAPTORLAKE_P, 0x4117), X86_MATCH_VFM(INTEL_RAPTORLAKE_S, 0x2e), {} }; static void setup_pcid(void) { const struct x86_cpu_id *invlpg_miss_match; if (!IS_ENABLED(CONFIG_X86_64)) return; if (!boot_cpu_has(X86_FEATURE_PCID)) return; invlpg_miss_match = x86_match_cpu(invlpg_miss_ids); if (invlpg_miss_match && boot_cpu_data.microcode < invlpg_miss_match->driver_data) { pr_info("Incomplete global flushes, disabling PCID"); setup_clear_cpu_cap(X86_FEATURE_PCID); return; } if (boot_cpu_has(X86_FEATURE_PGE)) { /* * This can't be cr4_set_bits_and_update_boot() -- the * trampoline code can't handle CR4.PCIDE and it wouldn't * do any good anyway. Despite the name, * cr4_set_bits_and_update_boot() doesn't actually cause * the bits in question to remain set all the way through * the secondary boot asm. * * Instead, we brute-force it and set CR4.PCIDE manually in * start_secondary(). */ cr4_set_bits(X86_CR4_PCIDE); } else { /* * flush_tlb_all(), as currently implemented, won't work if * PCID is on but PGE is not. Since that combination * doesn't exist on real hardware, there's no reason to try * to fully support it, but it's polite to avoid corrupting * data if we're on an improperly configured VM. */ setup_clear_cpu_cap(X86_FEATURE_PCID); } } #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ #define NR_RANGE_MR 5 #endif static int __meminit save_mr(struct map_range *mr, int nr_range, unsigned long start_pfn, unsigned long end_pfn, unsigned long page_size_mask) { if (start_pfn < end_pfn) { if (nr_range >= NR_RANGE_MR) panic("run out of range for init_memory_mapping\n"); mr[nr_range].start = start_pfn<<PAGE_SHIFT; mr[nr_range].end = end_pfn<<PAGE_SHIFT; mr[nr_range].page_size_mask = page_size_mask; nr_range++; } return nr_range; } /* * adjust the page_size_mask for small range to go with * big page size instead small one if nearby are ram too. */ static void __ref adjust_range_page_size_mask(struct map_range *mr, int nr_range) { int i; for (i = 0; i < nr_range; i++) { if ((page_size_mask & (1<<PG_LEVEL_2M)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) { unsigned long start = round_down(mr[i].start, PMD_SIZE); unsigned long end = round_up(mr[i].end, PMD_SIZE); #ifdef CONFIG_X86_32 if ((end >> PAGE_SHIFT) > max_low_pfn) continue; #endif if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_2M; } if ((page_size_mask & (1<<PG_LEVEL_1G)) && !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) { unsigned long start = round_down(mr[i].start, PUD_SIZE); unsigned long end = round_up(mr[i].end, PUD_SIZE); if (memblock_is_region_memory(start, end - start)) mr[i].page_size_mask |= 1<<PG_LEVEL_1G; } } } static const char *page_size_string(struct map_range *mr) { static const char str_1g[] = "1G"; static const char str_2m[] = "2M"; static const char str_4m[] = "4M"; static const char str_4k[] = "4k"; if (mr->page_size_mask & (1<<PG_LEVEL_1G)) return str_1g; /* * 32-bit without PAE has a 4M large page size. * PG_LEVEL_2M is misnamed, but we can at least * print out the right size in the string. */ if (IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_X86_PAE) && mr->page_size_mask & (1<<PG_LEVEL_2M)) return str_4m; if (mr->page_size_mask & (1<<PG_LEVEL_2M)) return str_2m; return str_4k; } static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long start, unsigned long end) { unsigned long start_pfn, end_pfn, limit_pfn; unsigned long pfn; int i; limit_pfn = PFN_DOWN(end); /* head if not big page alignment ? */ pfn = start_pfn = PFN_DOWN(start); #ifdef CONFIG_X86_32 /* * Don't use a large page for the first 2/4MB of memory * because there are often fixed size MTRRs in there * and overlapping MTRRs into large pages can cause * slowdowns. */ if (pfn == 0) end_pfn = PFN_DOWN(PMD_SIZE); else end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #endif if (end_pfn > limit_pfn) end_pfn = limit_pfn; if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); pfn = end_pfn; } /* big page (2M) range */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #ifdef CONFIG_X86_32 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); #endif if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #ifdef CONFIG_X86_64 /* big page (1G) range */ start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); pfn = end_pfn; } /* tail is not big page (1G) alignment */ start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); pfn = end_pfn; } #endif /* tail is not big page (2M) alignment */ start_pfn = pfn; end_pfn = limit_pfn; nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); if (!after_bootmem) adjust_range_page_size_mask(mr, nr_range); /* try to merge same page size and continuous */ for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { unsigned long old_start; if (mr[i].end != mr[i+1].start || mr[i].page_size_mask != mr[i+1].page_size_mask) continue; /* move it */ old_start = mr[i].start; memmove(&mr[i], &mr[i+1], (nr_range - 1 - i) * sizeof(struct map_range)); mr[i--].start = old_start; nr_range--; } for (i = 0; i < nr_range; i++) pr_debug(" [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, page_size_string(&mr[i])); return nr_range; } struct range pfn_mapped[E820_MAX_ENTRIES]; int nr_pfn_mapped; static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn) { nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES, nr_pfn_mapped, start_pfn, end_pfn); nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES); max_pfn_mapped = max(max_pfn_mapped, end_pfn); if (start_pfn < (1UL<<(32-PAGE_SHIFT))) max_low_pfn_mapped = max(max_low_pfn_mapped, min(end_pfn, 1UL<<(32-PAGE_SHIFT))); } bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn) { int i; for (i = 0; i < nr_pfn_mapped; i++) if ((start_pfn >= pfn_mapped[i].start) && (end_pfn <= pfn_mapped[i].end)) return true; return false; } /* * Setup the direct mapping of the physical memory at PAGE_OFFSET. * This runs before bootmem is initialized and gets pages directly from * the physical memory. To access them they are temporarily mapped. */ unsigned long __ref init_memory_mapping(unsigned long start, unsigned long end, pgprot_t prot) { struct map_range mr[NR_RANGE_MR]; unsigned long ret = 0; int nr_range, i; pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", start, end - 1); memset(mr, 0, sizeof(mr)); nr_range = split_mem_range(mr, 0, start, end); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, mr[i].page_size_mask, prot); add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); return ret >> PAGE_SHIFT; } /* * We need to iterate through the E820 memory map and create direct mappings * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply * create direct mappings for all pfns from [0 to max_low_pfn) and * [4GB to max_pfn) because of possible memory holes in high addresses * that cannot be marked as UC by fixed/variable range MTRRs. * Depending on the alignment of E820 ranges, this may possibly result * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. * * init_mem_mapping() calls init_range_memory_mapping() with big range. * That range would have hole in the middle or ends, and only ram parts * will be mapped in init_range_memory_mapping(). */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, unsigned long r_end) { unsigned long start_pfn, end_pfn; unsigned long mapped_ram_size = 0; int i; for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end); u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end); if (start >= end) continue; /* * if it is overlapping with brk pgt, we need to * alloc pgt buf from memblock instead. */ can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >= min(end, (u64)pgt_buf_top<<PAGE_SHIFT); init_memory_mapping(start, end, PAGE_KERNEL); mapped_ram_size += end - start; can_use_brk_pgt = true; } return mapped_ram_size; } static unsigned long __init get_new_step_size(unsigned long step_size) { /* * Initial mapped size is PMD_SIZE (2M). * We can not set step_size to be PUD_SIZE (1G) yet. * In worse case, when we cross the 1G boundary, and * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) * to map 1G range with PTE. Hence we use one less than the * difference of page table level shifts. * * Don't need to worry about overflow in the top-down case, on 32bit, * when step_size is 0, round_down() returns 0 for start, and that * turns it into 0x100000000ULL. * In the bottom-up case, round_up(x, 0) returns 0 though too, which * needs to be taken into consideration by the code below. */ return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); } /** * memory_map_top_down - Map [map_start, map_end) top down * @map_start: start address of the target memory range * @map_end: end address of the target memory range * * This function will setup direct mapping for memory range * [map_start, map_end) in top-down. That said, the page tables * will be allocated at the end of the memory, and we map the * memory in top-down. */ static void __init memory_map_top_down(unsigned long map_start, unsigned long map_end) { unsigned long real_end, last_start; unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; /* * Systems that have many reserved areas near top of the memory, * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will * require lots of 4K mappings which may exhaust pgt_buf. * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure * there is enough mapped memory that can be allocated from * memblock. */ addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, map_end); if (!addr) { pr_warn("Failed to release memory for alloc_low_pages()"); real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE)); } else { memblock_phys_free(addr, PMD_SIZE); real_end = addr + PMD_SIZE; } /* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = real_end; /* * We start from the top (end of memory) and go to the bottom. * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (last_start > map_start) { unsigned long start; if (last_start > step_size) { start = round_down(last_start - 1, step_size); if (start < map_start) start = map_start; } else start = map_start; mapped_ram_size += init_range_memory_mapping(start, last_start); last_start = start; min_pfn_mapped = last_start >> PAGE_SHIFT; if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); } if (real_end < map_end) init_range_memory_mapping(real_end, map_end); } /** * memory_map_bottom_up - Map [map_start, map_end) bottom up * @map_start: start address of the target memory range * @map_end: end address of the target memory range * * This function will setup direct mapping for memory range * [map_start, map_end) in bottom-up. Since we have limited the * bottom-up allocation above the kernel, the page tables will * be allocated just above the kernel and we map the memory * in [map_start, map_end) in bottom-up. */ static void __init memory_map_bottom_up(unsigned long map_start, unsigned long map_end) { unsigned long next, start; unsigned long mapped_ram_size = 0; /* step_size need to be small so pgt_buf from BRK could cover it */ unsigned long step_size = PMD_SIZE; start = map_start; min_pfn_mapped = start >> PAGE_SHIFT; /* * We start from the bottom (@map_start) and go to the top (@map_end). * The memblock_find_in_range() gets us a block of RAM from the * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ while (start < map_end) { if (step_size && map_end - start > step_size) { next = round_up(start + 1, step_size); if (next > map_end) next = map_end; } else { next = map_end; } mapped_ram_size += init_range_memory_mapping(start, next); start = next; if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); } } /* * The real mode trampoline, which is required for bootstrapping CPUs * occupies only a small area under the low 1MB. See reserve_real_mode() * for details. * * If KASLR is disabled the first PGD entry of the direct mapping is copied * to map the real mode trampoline. * * If KASLR is enabled, copy only the PUD which covers the low 1MB * area. This limits the randomization granularity to 1GB for both 4-level * and 5-level paging. */ static void __init init_trampoline(void) { #ifdef CONFIG_X86_64 /* * The code below will alias kernel page-tables in the user-range of the * address space, including the Global bit. So global TLB entries will * be created when using the trampoline page-table. */ if (!kaslr_memory_enabled()) trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; else init_trampoline_kaslr(); #endif } void __init init_mem_mapping(void) { unsigned long end; pti_check_boottime_disable(); probe_page_size_mask(); setup_pcid(); #ifdef CONFIG_X86_64 end = max_pfn << PAGE_SHIFT; #else end = max_low_pfn << PAGE_SHIFT; #endif /* the ISA range is always mapped regardless of memory holes */ init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL); /* Init the trampoline, possibly with KASLR memory offset */ init_trampoline(); /* * If the allocation is in bottom-up direction, we setup direct mapping * in bottom-up, otherwise we setup direct mapping in top-down. */ if (memblock_bottom_up()) { unsigned long kernel_end = __pa_symbol(_end); /* * we need two separate calls here. This is because we want to * allocate page tables above the kernel. So we first map * [kernel_end, end) to make memory above the kernel be mapped * as soon as possible. And then use page tables allocated above * the kernel to map [ISA_END_ADDRESS, kernel_end). */ memory_map_bottom_up(kernel_end, end); memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); } else { memory_map_top_down(ISA_END_ADDRESS, end); } #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { /* can we preserve max_low_pfn ?*/ max_low_pfn = max_pfn; } #else early_ioremap_page_table_range_init(); #endif load_cr3(swapper_pg_dir); __flush_tlb_all(); x86_init.hyper.init_mem_mapping(); early_memtest(0, max_pfn_mapped << PAGE_SHIFT); } /* * Initialize an mm_struct to be used during poking and a pointer to be used * during patching. */ void __init poking_init(void) { spinlock_t *ptl; pte_t *ptep; text_poke_mm = mm_alloc(); BUG_ON(!text_poke_mm); /* Xen PV guests need the PGD to be pinned. */ paravirt_enter_mmap(text_poke_mm); set_notrack_mm(text_poke_mm); /* * Randomize the poking address, but make sure that the following page * will be mapped at the same PMD. We need 2 pages, so find space for 3, * and adjust the address if the PMD ends after the first one. */ text_poke_mm_addr = TASK_UNMAPPED_BASE; if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) text_poke_mm_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) % (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE); if (((text_poke_mm_addr + PAGE_SIZE) & ~PMD_MASK) == 0) text_poke_mm_addr += PAGE_SIZE; /* * We need to trigger the allocation of the page-tables that will be * needed for poking now. Later, poking may be performed in an atomic * section, which might cause allocation to fail. */ ptep = get_locked_pte(text_poke_mm, text_poke_mm_addr, &ptl); BUG_ON(!ptep); pte_unmap_unlock(ptep, ptl); } /* * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * On x86, access has to be given to the first megabyte of RAM because that * area traditionally contains BIOS code and data regions used by X, dosemu, * and similar apps. Since they map the entire memory range, the whole range * must be allowed (for mapping), but any areas that would otherwise be * disallowed are flagged as being "zero filled" instead of rejected. * Access has to be given to non-kernel-ram areas as well, these contain the * PCI mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) != REGION_DISJOINT) { /* * For disallowed memory regions in the low 1MB range, * request that the page be shown as all zeros. */ if (pagenr < 256) return 2; return 0; } /* * This must follow RAM test, since System RAM is considered a * restricted resource under CONFIG_STRICT_DEVMEM. */ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { /* Low 1MB bypasses iomem restrictions. */ if (pagenr < 256) return 1; return 0; } return 1; } void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long begin_aligned, end_aligned; /* Make sure boundaries are page aligned */ begin_aligned = PAGE_ALIGN(begin); end_aligned = end & PAGE_MASK; if (WARN_ON(begin_aligned != begin || end_aligned != end)) { begin = begin_aligned; end = end_aligned; } if (begin >= end) return; /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will * create a kernel page fault: */ if (debug_pagealloc_enabled()) { pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n", begin, end - 1); /* * Inform kmemleak about the hole in the memory since the * corresponding pages will be unmapped. */ kmemleak_free_part((void *)begin, end - begin); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); } else { /* * We just marked the kernel text read only above, now that * we are going to free part of that, we need to make that * writeable and non-executable first. */ set_memory_nx(begin, (end - begin) >> PAGE_SHIFT); set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); free_reserved_area((void *)begin, (void *)end, POISON_FREE_INITMEM, what); } } /* * begin/end can be in the direct map or the "high kernel mapping" * used for the kernel image only. free_init_pages() will do the * right thing for either kind of address. */ void free_kernel_image_pages(const char *what, void *begin, void *end) { unsigned long begin_ul = (unsigned long)begin; unsigned long end_ul = (unsigned long)end; unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; free_init_pages(what, begin_ul, end_ul); /* * PTI maps some of the kernel into userspace. For performance, * this includes some kernel areas that do not contain secrets. * Those areas might be adjacent to the parts of the kernel image * being freed, which may contain secrets. Remove the "high kernel * image mapping" for these freed areas, ensuring they are not even * potentially vulnerable to Meltdown regardless of the specific * optimizations PTI is currently using. * * The "noalias" prevents unmapping the direct map alias which is * needed to access the freed pages. * * This is only valid for 64bit kernels. 32bit has only one mapping * which can't be treated in this way for obvious reasons. */ if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI)) set_memory_np_noalias(begin_ul, len_pages); } void __ref free_initmem(void) { e820__reallocate_tables(); mem_encrypt_free_decrypted_mem(); free_kernel_image_pages("unused kernel image (initmem)", &__init_begin, &__init_end); } #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, * decompressor could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() * - relocate_initrd() * So here We can do PAGE_ALIGN() safely to get partial page to be freed */ free_init_pages("initrd", start, PAGE_ALIGN(end)); } #endif void __init zone_sizes_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = max_pfn; #endif free_area_init(max_zone_pfns); } __visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; #ifdef CONFIG_ADDRESS_MASKING DEFINE_PER_CPU(u64, tlbstate_untag_mask); EXPORT_PER_CPU_SYMBOL(tlbstate_untag_mask); #endif void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { /* entry 0 MUST be WB (hardwired to speed up translations) */ BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); __pte2cachemode_tbl[entry] = cache; } #ifdef CONFIG_SWAP unsigned long arch_max_swapfile_size(void) { unsigned long pages; pages = generic_max_swapfile_size(); if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) { /* Limit the swap file size to MAX_PA/2 for L1TF workaround */ unsigned long long l1tf_limit = l1tf_pfn_limit(); /* * We encode swap offsets also with 3 bits below those for pfn * which makes the usable limit higher. */ #if CONFIG_PGTABLE_LEVELS > 2 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT; #endif pages = min_t(unsigned long long, l1tf_limit, pages); } return pages; } #endif #ifdef CONFIG_EXECMEM static struct execmem_info execmem_info __ro_after_init; #ifdef CONFIG_ARCH_HAS_EXECMEM_ROX void execmem_fill_trapping_insns(void *ptr, size_t size, bool writeable) { /* fill memory with INT3 instructions */ if (writeable) memset(ptr, INT3_INSN_OPCODE, size); else text_poke_set(ptr, INT3_INSN_OPCODE, size); } #endif struct execmem_info __init *execmem_arch_setup(void) { unsigned long start, offset = 0; enum execmem_range_flags flags; pgprot_t pgprot; if (kaslr_enabled()) offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE; start = MODULES_VADDR + offset; if (IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) && cpu_feature_enabled(X86_FEATURE_PSE)) { pgprot = PAGE_KERNEL_ROX; flags = EXECMEM_KASAN_SHADOW | EXECMEM_ROX_CACHE; } else { pgprot = PAGE_KERNEL; flags = EXECMEM_KASAN_SHADOW; } execmem_info = (struct execmem_info){ .ranges = { [EXECMEM_MODULE_TEXT] = { .flags = flags, .start = start, .end = MODULES_END, .pgprot = pgprot, .alignment = MODULE_ALIGN, }, [EXECMEM_KPROBES ... EXECMEM_BPF] = { .flags = EXECMEM_KASAN_SHADOW, .start = start, .end = MODULES_END, .pgprot = PAGE_KERNEL, .alignment = MODULE_ALIGN, }, [EXECMEM_MODULE_DATA] = { .flags = EXECMEM_KASAN_SHADOW, .start = start, .end = MODULES_END, .pgprot = PAGE_KERNEL, .alignment = MODULE_ALIGN, }, }, }; return &execmem_info; } #endif /* CONFIG_EXECMEM */
144 8 8 8 8 2 2 2 2 2 8 8 8 3 8 2 2 2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_TC_PED_H #define __NET_TC_PED_H #include <net/act_api.h> #include <linux/tc_act/tc_pedit.h> #include <linux/types.h> struct tcf_pedit_key_ex { enum pedit_header_type htype; enum pedit_cmd cmd; }; struct tcf_pedit_parms { struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; u32 tcfp_off_max_hint; unsigned char tcfp_nkeys; unsigned char tcfp_flags; struct rcu_head rcu; }; struct tcf_pedit { struct tc_action common; struct tcf_pedit_parms __rcu *parms; }; #define to_pedit(a) ((struct tcf_pedit *)a) #define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms)) static inline bool is_tcf_pedit(const struct tc_action *a) { #ifdef CONFIG_NET_CLS_ACT if (a->ops && a->ops->id == TCA_ID_PEDIT) return true; #endif return false; } static inline int tcf_pedit_nkeys(const struct tc_action *a) { struct tcf_pedit_parms *parms; int nkeys; rcu_read_lock(); parms = to_pedit_parms(a); nkeys = parms->tcfp_nkeys; rcu_read_unlock(); return nkeys; } static inline u32 tcf_pedit_htype(const struct tc_action *a, int index) { u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; struct tcf_pedit_parms *parms; rcu_read_lock(); parms = to_pedit_parms(a); if (parms->tcfp_keys_ex) htype = parms->tcfp_keys_ex[index].htype; rcu_read_unlock(); return htype; } static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index) { struct tcf_pedit_parms *parms; u32 cmd = __PEDIT_CMD_MAX; rcu_read_lock(); parms = to_pedit_parms(a); if (parms->tcfp_keys_ex) cmd = parms->tcfp_keys_ex[index].cmd; rcu_read_unlock(); return cmd; } static inline u32 tcf_pedit_mask(const struct tc_action *a, int index) { struct tcf_pedit_parms *parms; u32 mask; rcu_read_lock(); parms = to_pedit_parms(a); mask = parms->tcfp_keys[index].mask; rcu_read_unlock(); return mask; } static inline u32 tcf_pedit_val(const struct tc_action *a, int index) { struct tcf_pedit_parms *parms; u32 val; rcu_read_lock(); parms = to_pedit_parms(a); val = parms->tcfp_keys[index].val; rcu_read_unlock(); return val; } static inline u32 tcf_pedit_offset(const struct tc_action *a, int index) { struct tcf_pedit_parms *parms; u32 off; rcu_read_lock(); parms = to_pedit_parms(a); off = parms->tcfp_keys[index].off; rcu_read_unlock(); return off; } #endif /* __NET_TC_PED_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _LINUX_KCOV_IOCTLS_H #define _LINUX_KCOV_IOCTLS_H #include <linux/types.h> /* * Argument for KCOV_REMOTE_ENABLE ioctl, see Documentation/dev-tools/kcov.rst * and the comment before kcov_remote_start() for usage details. */ struct kcov_remote_arg { __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */ __u32 area_size; /* Length of coverage buffer in words */ __u32 num_handles; /* Size of handles array */ __aligned_u64 common_handle; __aligned_u64 handles[]; }; #define KCOV_REMOTE_MAX_HANDLES 0x100 #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long) #define KCOV_ENABLE _IO('c', 100) #define KCOV_DISABLE _IO('c', 101) #define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg) enum { /* * Tracing coverage collection mode. * Covered PCs are collected in a per-task buffer. * In new KCOV version the mode is chosen by calling * ioctl(fd, KCOV_ENABLE, mode). In older versions the mode argument * was supposed to be 0 in such a call. So, for reasons of backward * compatibility, we have chosen the value KCOV_TRACE_PC to be 0. */ KCOV_TRACE_PC = 0, /* Collecting comparison operands mode. */ KCOV_TRACE_CMP = 1, }; /* * The format for the types of collected comparisons. * * Bit 0 shows whether one of the arguments is a compile-time constant. * Bits 1 & 2 contain log2 of the argument size, up to 8 bytes. */ #define KCOV_CMP_CONST (1 << 0) #define KCOV_CMP_SIZE(n) ((n) << 1) #define KCOV_CMP_MASK KCOV_CMP_SIZE(3) #define KCOV_SUBSYSTEM_COMMON (0x00ull << 56) #define KCOV_SUBSYSTEM_USB (0x01ull << 56) #define KCOV_SUBSYSTEM_MASK (0xffull << 56) #define KCOV_INSTANCE_MASK (0xffffffffull) static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst) { if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK) return 0; return subsys | inst; } #endif /* _LINUX_KCOV_IOCTLS_H */
26 26 26 26 26 26 26 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 // SPDX-License-Identifier: GPL-2.0-or-later /* mpi-sub-ui.c - Subtract an unsigned integer from an MPI. * * Copyright 1991, 1993, 1994, 1996, 1999-2002, 2004, 2012, 2013, 2015 * Free Software Foundation, Inc. * * This file was based on the GNU MP Library source file: * https://gmplib.org/repo/gmp-6.2/file/510b83519d1c/mpz/aors_ui.h * * The GNU MP Library is free software; you can redistribute it and/or modify * it under the terms of either: * * * the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your * option) any later version. * * or * * * the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any * later version. * * or both in parallel, as here. * * The GNU MP Library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received copies of the GNU General Public License and the * GNU Lesser General Public License along with the GNU MP Library. If not, * see https://www.gnu.org/licenses/. */ #include "mpi-internal.h" int mpi_sub_ui(MPI w, MPI u, unsigned long vval) { if (u->nlimbs == 0) { if (mpi_resize(w, 1) < 0) return -ENOMEM; w->d[0] = vval; w->nlimbs = (vval != 0); w->sign = (vval != 0); return 0; } /* If not space for W (and possible carry), increase space. */ if (mpi_resize(w, u->nlimbs + 1)) return -ENOMEM; if (u->sign) { mpi_limb_t cy; cy = mpihelp_add_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); w->d[u->nlimbs] = cy; w->nlimbs = u->nlimbs + cy; w->sign = 1; } else { /* The signs are different. Need exact comparison to determine * which operand to subtract from which. */ if (u->nlimbs == 1 && u->d[0] < vval) { w->d[0] = vval - u->d[0]; w->nlimbs = 1; w->sign = 1; } else { mpihelp_sub_1(w->d, u->d, u->nlimbs, (mpi_limb_t) vval); /* Size can decrease with at most one limb. */ w->nlimbs = (u->nlimbs - (w->d[u->nlimbs - 1] == 0)); w->sign = 0; } } mpi_normalize(w); return 0; } EXPORT_SYMBOL_GPL(mpi_sub_ui);
3 3 2 2 4 1 3 4 2 1 1 1 1 1 1 1 1 1 1 1 3 9 3 6 4 2 5 1 1 1 1 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 // SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) "IPsec: " fmt #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/esp.h> #include <linux/scatterlist.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/in6.h> #include <net/icmp.h> #include <net/protocol.h> #include <net/udp.h> #include <net/tcp.h> #include <net/espintcp.h> #include <linux/skbuff_ref.h> #include <linux/highmem.h> struct esp_skb_cb { struct xfrm_skb_cb xfrm; void *tmp; }; struct esp_output_extra { __be32 seqhi; u32 esphoff; }; #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) /* * Allocate an AEAD request structure with extra space for SG and IV. * * For alignment considerations the IV is placed at the front, followed * by the request and finally the SG list. * * TODO: Use spare space in skb for this where possible. */ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen) { unsigned int len; len = extralen; len += crypto_aead_ivsize(aead); if (len) { len += crypto_aead_alignmask(aead) & ~(crypto_tfm_ctx_alignment() - 1); len = ALIGN(len, crypto_tfm_ctx_alignment()); } len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); len = ALIGN(len, __alignof__(struct scatterlist)); len += sizeof(struct scatterlist) * nfrags; return kmalloc(len, GFP_ATOMIC); } static inline void *esp_tmp_extra(void *tmp) { return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); } static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen) { return crypto_aead_ivsize(aead) ? PTR_ALIGN((u8 *)tmp + extralen, crypto_aead_alignmask(aead) + 1) : tmp + extralen; } static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) { struct aead_request *req; req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), crypto_tfm_ctx_alignment()); aead_request_set_tfm(req, aead); return req; } static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, struct aead_request *req) { return (void *)ALIGN((unsigned long)(req + 1) + crypto_aead_reqsize(aead), __alignof__(struct scatterlist)); } static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) { struct crypto_aead *aead = x->data; int extralen = 0; u8 *iv; struct aead_request *req; struct scatterlist *sg; if (x->props.flags & XFRM_STATE_ESN) extralen += sizeof(struct esp_output_extra); iv = esp_tmp_iv(aead, tmp, extralen); req = esp_tmp_req(aead, iv); /* Unref skb_frag_pages in the src scatterlist if necessary. * Skip the first sg which comes from skb->data. */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) skb_page_unref(page_to_netmem(sg_page(sg)), skb->pp_recycle); } #ifdef CONFIG_INET_ESPINTCP static struct sock *esp_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); __be16 sport, dport; struct sock *sk; spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; spin_unlock_bh(&x->lock); sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, dport, x->props.saddr.a4, sport, 0); if (!sk) return ERR_PTR(-ENOENT); if (!tcp_is_ulp_esp(sk)) { sock_put(sk); return ERR_PTR(-EINVAL); } return sk; } static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) { struct sock *sk; int err; rcu_read_lock(); sk = esp_find_tcp_sk(x); err = PTR_ERR_OR_ZERO(sk); if (err) { kfree_skb(skb); goto out; } bh_lock_sock(sk); if (sock_owned_by_user(sk)) err = espintcp_queue_out(sk, skb); else err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); sock_put(sk); out: rcu_read_unlock(); return err; } static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; return esp_output_tcp_finish(x, skb); } static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) { int err; local_bh_disable(); err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb); local_bh_enable(); /* EINPROGRESS just happens to do the right thing. It * actually means that the skb has been consumed and * isn't coming back. */ return err ?: -EINPROGRESS; } #else static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) { WARN_ON(1); return -EOPNOTSUPP; } #endif static void esp_output_done(void *data, int err) { struct sk_buff *skb = data; struct xfrm_offload *xo = xfrm_offload(skb); void *tmp; struct xfrm_state *x; if (xo && (xo->flags & XFRM_DEV_RESUME)) { struct sec_path *sp = skb_sec_path(skb); x = sp->xvec[sp->len - 1]; } else { x = skb_dst(skb)->xfrm; } tmp = ESP_SKB_CB(skb)->tmp; esp_ssg_unref(x, tmp, skb); kfree(tmp); if (xo && (xo->flags & XFRM_DEV_RESUME)) { if (err) { XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); kfree_skb(skb); return; } skb_push(skb, skb->data - skb_mac_header(skb)); secpath_reset(skb); xfrm_dev_resume(skb); } else { if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) esp_output_tail_tcp(x, skb); else xfrm_output_resume(skb_to_full_sk(skb), skb, err); } } /* Move ESP header back into place. */ static void esp_restore_header(struct sk_buff *skb, unsigned int offset) { struct ip_esp_hdr *esph = (void *)(skb->data + offset); void *tmp = ESP_SKB_CB(skb)->tmp; __be32 *seqhi = esp_tmp_extra(tmp); esph->seq_no = esph->spi; esph->spi = *seqhi; } static void esp_output_restore_header(struct sk_buff *skb) { void *tmp = ESP_SKB_CB(skb)->tmp; struct esp_output_extra *extra = esp_tmp_extra(tmp); esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - sizeof(__be32)); } static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, struct xfrm_state *x, struct ip_esp_hdr *esph, struct esp_output_extra *extra) { /* For ESN we move the header forward by 4 bytes to * accommodate the high bits. We will move it back after * encryption. */ if ((x->props.flags & XFRM_STATE_ESN)) { __u32 seqhi; struct xfrm_offload *xo = xfrm_offload(skb); if (xo) seqhi = xo->seq.hi; else seqhi = XFRM_SKB_CB(skb)->seq.output.hi; extra->esphoff = (unsigned char *)esph - skb_transport_header(skb); esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); extra->seqhi = esph->spi; esph->seq_no = htonl(seqhi); } esph->spi = x->id.spi; return esph; } static void esp_output_done_esn(void *data, int err) { struct sk_buff *skb = data; esp_output_restore_header(skb); esp_output_done(data, err); } static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, int encap_type, struct esp_info *esp, __be16 sport, __be16 dport) { struct udphdr *uh; unsigned int len; struct xfrm_offload *xo = xfrm_offload(skb); len = skb->len + esp->tailen - skb_transport_offset(skb); if (len + sizeof(struct iphdr) > IP_MAX_MTU) return ERR_PTR(-EMSGSIZE); uh = (struct udphdr *)esp->esph; uh->source = sport; uh->dest = dport; uh->len = htons(len); uh->check = 0; /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload * data path, which means that esp_output_udp_encap is called outside of the XFRM stack. * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it. */ if (!xo || encap_type != UDP_ENCAP_ESPINUDP) *skb_mac_header(skb) = IPPROTO_UDP; return (struct ip_esp_hdr *)(uh + 1); } #ifdef CONFIG_INET_ESPINTCP static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { __be16 *lenp = (void *)esp->esph; struct ip_esp_hdr *esph; unsigned int len; struct sock *sk; len = skb->len + esp->tailen - skb_transport_offset(skb); if (len > IP_MAX_MTU) return ERR_PTR(-EMSGSIZE); rcu_read_lock(); sk = esp_find_tcp_sk(x); rcu_read_unlock(); if (IS_ERR(sk)) return ERR_CAST(sk); sock_put(sk); *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); return esph; } #else static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { return ERR_PTR(-EOPNOTSUPP); } #endif static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { struct xfrm_encap_tmpl *encap = x->encap; struct ip_esp_hdr *esph; __be16 sport, dport; int encap_type; spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; encap_type = encap->encap_type; spin_unlock_bh(&x->lock); switch (encap_type) { default: case UDP_ENCAP_ESPINUDP: esph = esp_output_udp_encap(skb, encap_type, esp, sport, dport); break; case TCP_ENCAP_ESPINTCP: esph = esp_output_tcp_encap(x, skb, esp); break; } if (IS_ERR(esph)) return PTR_ERR(esph); esp->esph = esph; return 0; } int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { u8 *tail; int nfrags; int esph_offset; struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; /* this is non-NULL only with TCP/UDP Encapsulation */ if (x->encap) { int err = esp_output_encap(x, skb, esp); if (err < 0) return err; } if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE || ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE) goto cow; if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { nfrags = 1; trailer = skb; tail = skb_tail_pointer(trailer); goto skip_cow; } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) && !skb_has_frag_list(skb)) { int allocsize; struct sock *sk = skb->sk; struct page_frag *pfrag = &x->xfrag; esp->inplace = false; allocsize = ALIGN(tailen, L1_CACHE_BYTES); spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); goto cow; } page = pfrag->page; get_page(page); tail = page_address(page) + pfrag->offset; esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); nfrags = skb_shinfo(skb)->nr_frags; __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, tailen); skb_shinfo(skb)->nr_frags = ++nfrags; pfrag->offset = pfrag->offset + allocsize; spin_unlock_bh(&x->lock); nfrags++; skb_len_add(skb, tailen); if (sk && sk_fullsock(sk)) refcount_add(tailen, &sk->sk_wmem_alloc); goto out; } } cow: esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); nfrags = skb_cow_data(skb, tailen, &trailer); if (nfrags < 0) goto out; tail = skb_tail_pointer(trailer); esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); skip_cow: esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); pskb_put(skb, trailer, tailen); out: return nfrags; } EXPORT_SYMBOL_GPL(esp_output_head); int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) { u8 *iv; int alen; void *tmp; int ivlen; int assoclen; int extralen; struct page *page; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct aead_request *req; struct scatterlist *sg, *dsg; struct esp_output_extra *extra; int err = -ENOMEM; assoclen = sizeof(struct ip_esp_hdr); extralen = 0; if (x->props.flags & XFRM_STATE_ESN) { extralen += sizeof(*extra); assoclen += sizeof(__be32); } aead = x->data; alen = crypto_aead_authsize(aead); ivlen = crypto_aead_ivsize(aead); tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); if (!tmp) goto error; extra = esp_tmp_extra(tmp); iv = esp_tmp_iv(aead, tmp, extralen); req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); if (esp->inplace) dsg = sg; else dsg = &sg[esp->nfrags]; esph = esp_output_set_extra(skb, x, esp->esph, extra); esp->esph = esph; sg_init_table(sg, esp->nfrags); err = skb_to_sgvec(skb, sg, (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) goto error_free; if (!esp->inplace) { int allocsize; struct page_frag *pfrag = &x->xfrag; allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); spin_lock_bh(&x->lock); if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { spin_unlock_bh(&x->lock); goto error_free; } skb_shinfo(skb)->nr_frags = 1; page = pfrag->page; get_page(page); /* replace page frags in skb with new page */ __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); pfrag->offset = pfrag->offset + allocsize; spin_unlock_bh(&x->lock); sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); err = skb_to_sgvec(skb, dsg, (unsigned char *)esph - skb->data, assoclen + ivlen + esp->clen + alen); if (unlikely(err < 0)) goto error_free; } if ((x->props.flags & XFRM_STATE_ESN)) aead_request_set_callback(req, 0, esp_output_done_esn, skb); else aead_request_set_callback(req, 0, esp_output_done, skb); aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); aead_request_set_ad(req, assoclen); memset(iv, 0, ivlen); memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), min(ivlen, 8)); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_encrypt(req); switch (err) { case -EINPROGRESS: goto error; case -ENOSPC: err = NET_XMIT_DROP; break; case 0: if ((x->props.flags & XFRM_STATE_ESN)) esp_output_restore_header(skb); } if (sg != dsg) esp_ssg_unref(x, tmp, skb); if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) err = esp_output_tail_tcp(x, skb); error_free: kfree(tmp); error: return err; } EXPORT_SYMBOL_GPL(esp_output_tail); static int esp_output(struct xfrm_state *x, struct sk_buff *skb) { int alen; int blksize; struct ip_esp_hdr *esph; struct crypto_aead *aead; struct esp_info esp; esp.inplace = true; esp.proto = *skb_mac_header(skb); *skb_mac_header(skb) = IPPROTO_ESP; /* skb is pure payload to encrypt */ aead = x->data; alen = crypto_aead_authsize(aead); esp.tfclen = 0; if (x->tfcpad) { struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) esp.tfclen = padto - skb->len; } blksize = ALIGN(crypto_aead_blocksize(aead), 4); esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); esp.plen = esp.clen - skb->len - esp.tfclen; esp.tailen = esp.tfclen + esp.plen + alen; esp.esph = ip_esp_hdr(skb); esp.nfrags = esp_output_head(x, skb, &esp); if (esp.nfrags < 0) return esp.nfrags; esph = esp.esph; esph->spi = x->id.spi; esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); skb_push(skb, -skb_network_offset(skb)); return esp_output_tail(x, skb, &esp); } static inline int esp_remove_trailer(struct sk_buff *skb) { struct xfrm_state *x = xfrm_input_state(skb); struct crypto_aead *aead = x->data; int alen, hlen, elen; int padlen, trimlen; __wsum csumdiff; u8 nexthdr[2]; int ret; alen = crypto_aead_authsize(aead); hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); elen = skb->len - hlen; if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2)) BUG(); ret = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) { net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen + 2, elen - alen); goto out; } trimlen = alen + padlen + 2; if (skb->ip_summed == CHECKSUM_COMPLETE) { csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); skb->csum = csum_block_sub(skb->csum, csumdiff, skb->len - trimlen); } ret = pskb_trim(skb, skb->len - trimlen); if (unlikely(ret)) return ret; ret = nexthdr[1]; out: return ret; } int esp_input_done2(struct sk_buff *skb, int err) { const struct iphdr *iph; struct xfrm_state *x = xfrm_input_state(skb); struct xfrm_offload *xo = xfrm_offload(skb); struct crypto_aead *aead = x->data; int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int ihl; if (!xo || !(xo->flags & CRYPTO_DONE)) kfree(ESP_SKB_CB(skb)->tmp); if (unlikely(err)) goto out; err = esp_remove_trailer(skb); if (unlikely(err < 0)) goto out; iph = ip_hdr(skb); ihl = iph->ihl * 4; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; struct tcphdr *th = (void *)(skb_network_header(skb) + ihl); struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); __be16 source; switch (x->encap->encap_type) { case TCP_ENCAP_ESPINTCP: source = th->source; break; case UDP_ENCAP_ESPINUDP: source = uh->source; break; default: WARN_ON_ONCE(1); err = -EINVAL; goto out; } /* * 1) if the NAT-T peer's IP or port changed then * advertise the change to the keying daemon. * This is an inbound SA, so just compare * SRC ports. */ if (iph->saddr != x->props.saddr.a4 || source != encap->encap_sport) { xfrm_address_t ipaddr; ipaddr.a4 = iph->saddr; km_new_mapping(x, &ipaddr, source); /* XXX: perhaps add an extra * policy check here, to see * if we should allow or * reject a packet from a * different source * address/port. */ } /* * 2) ignore UDP/TCP checksums in case * of NAT-T in Transport Mode, or * perform other post-processing fixes * as per draft-ietf-ipsec-udp-encaps-06, * section 3.1.2 */ if (x->props.mode == XFRM_MODE_TRANSPORT) skb->ip_summed = CHECKSUM_UNNECESSARY; } skb_pull_rcsum(skb, hlen); if (x->props.mode == XFRM_MODE_TUNNEL || x->props.mode == XFRM_MODE_IPTFS) skb_reset_transport_header(skb); else skb_set_transport_header(skb, -ihl); /* RFC4303: Drop dummy packets without any error */ if (err == IPPROTO_NONE) err = -EINVAL; out: return err; } EXPORT_SYMBOL_GPL(esp_input_done2); static void esp_input_done(void *data, int err) { struct sk_buff *skb = data; xfrm_input_resume(skb, esp_input_done2(skb, err)); } static void esp_input_restore_header(struct sk_buff *skb) { esp_restore_header(skb, 0); __skb_pull(skb, 4); } static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) { struct xfrm_state *x = xfrm_input_state(skb); struct ip_esp_hdr *esph; /* For ESN we move the header forward by 4 bytes to * accommodate the high bits. We will move it back after * decryption. */ if ((x->props.flags & XFRM_STATE_ESN)) { esph = skb_push(skb, 4); *seqhi = esph->spi; esph->spi = esph->seq_no; esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; } } static void esp_input_done_esn(void *data, int err) { struct sk_buff *skb = data; esp_input_restore_header(skb); esp_input_done(data, err); } /* * Note: detecting truncated vs. non-truncated authentication data is very * expensive, so we only support truncated data, which is the recommended * and common case. */ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct crypto_aead *aead = x->data; struct aead_request *req; struct sk_buff *trailer; int ivlen = crypto_aead_ivsize(aead); int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; int nfrags; int assoclen; int seqhilen; __be32 *seqhi; void *tmp; u8 *iv; struct scatterlist *sg; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) goto out; if (elen <= 0) goto out; assoclen = sizeof(struct ip_esp_hdr); seqhilen = 0; if (x->props.flags & XFRM_STATE_ESN) { seqhilen += sizeof(__be32); assoclen += seqhilen; } if (!skb_cloned(skb)) { if (!skb_is_nonlinear(skb)) { nfrags = 1; goto skip_cow; } else if (!skb_has_frag_list(skb)) { nfrags = skb_shinfo(skb)->nr_frags; nfrags++; goto skip_cow; } } err = skb_cow_data(skb, 0, &trailer); if (err < 0) goto out; nfrags = err; skip_cow: err = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags, seqhilen); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; seqhi = esp_tmp_extra(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); req = esp_tmp_req(aead, iv); sg = esp_req_sg(aead, req); esp_input_set_header(skb, seqhi); sg_init_table(sg, nfrags); err = skb_to_sgvec(skb, sg, 0, skb->len); if (unlikely(err < 0)) { kfree(tmp); goto out; } skb->ip_summed = CHECKSUM_NONE; if ((x->props.flags & XFRM_STATE_ESN)) aead_request_set_callback(req, 0, esp_input_done_esn, skb); else aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); aead_request_set_ad(req, assoclen); err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out; if ((x->props.flags & XFRM_STATE_ESN)) esp_input_restore_header(skb); err = esp_input_done2(skb, err); out: return err; } static int esp4_err(struct sk_buff *skb, u32 info) { struct net *net = dev_net(skb->dev); const struct iphdr *iph = (const struct iphdr *)skb->data; struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); struct xfrm_state *x; switch (icmp_hdr(skb)->type) { case ICMP_DEST_UNREACH: if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) return 0; break; case ICMP_REDIRECT: break; default: return 0; } x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); if (!x) return 0; if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP); else ipv4_redirect(skb, net, 0, IPPROTO_ESP); xfrm_state_put(x); return 0; } static void esp_destroy(struct xfrm_state *x) { struct crypto_aead *aead = x->data; if (!aead) return; crypto_free_aead(aead); } static int esp_init_aead(struct xfrm_state *x, struct netlink_ext_ack *extack) { char aead_name[CRYPTO_MAX_ALG_NAME]; struct crypto_aead *aead; int err; if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) { NL_SET_ERR_MSG(extack, "Algorithm name is too long"); return -ENAMETOOLONG; } aead = crypto_alloc_aead(aead_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; x->data = aead; err = crypto_aead_setkey(aead, x->aead->alg_key, (x->aead->alg_key_len + 7) / 8); if (err) goto error; err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); if (err) goto error; return 0; error: NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); return err; } static int esp_init_authenc(struct xfrm_state *x, struct netlink_ext_ack *extack) { struct crypto_aead *aead; struct crypto_authenc_key_param *param; struct rtattr *rta; char *key; char *p; char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; err = -ENAMETOOLONG; if ((x->props.flags & XFRM_STATE_ESN)) { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "%s%sauthencesn(%s,%s)%s", x->geniv ?: "", x->geniv ? "(" : "", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name, x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) { NL_SET_ERR_MSG(extack, "Algorithm name is too long"); goto error; } } else { if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "%s%sauthenc(%s,%s)%s", x->geniv ?: "", x->geniv ? "(" : "", x->aalg ? x->aalg->alg_name : "digest_null", x->ealg->alg_name, x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) { NL_SET_ERR_MSG(extack, "Algorithm name is too long"); goto error; } } aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) { NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); goto error; } x->data = aead; keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); err = -ENOMEM; key = kmalloc(keylen, GFP_KERNEL); if (!key) goto error; p = key; rta = (void *)p; rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; rta->rta_len = RTA_LENGTH(sizeof(*param)); param = RTA_DATA(rta); p += RTA_SPACE(sizeof(*param)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); p += (x->aalg->alg_key_len + 7) / 8; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); err = -EINVAL; if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_aead_authsize(aead)) { NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); goto free_key; } err = crypto_aead_setauthsize( aead, x->aalg->alg_trunc_len / 8); if (err) { NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations"); goto free_key; } } param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); err = crypto_aead_setkey(aead, key, keylen); free_key: kfree_sensitive(key); error: return err; } static int esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { struct crypto_aead *aead; u32 align; int err; x->data = NULL; if (x->aead) { err = esp_init_aead(x, extack); } else if (x->ealg) { err = esp_init_authenc(x, extack); } else { NL_SET_ERR_MSG(extack, "ESP: AEAD or CRYPT must be provided"); err = -EINVAL; } if (err) goto error; aead = x->data; x->props.header_len = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; switch (encap->encap_type) { default: NL_SET_ERR_MSG(extack, "Unsupported encapsulation type for ESP"); err = -EINVAL; goto error; case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; #ifdef CONFIG_INET_ESPINTCP case TCP_ENCAP_ESPINTCP: /* only the length field, TCP encap is done by * the socket */ x->props.header_len += 2; break; #endif } } align = ALIGN(crypto_aead_blocksize(aead), 4); x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); error: return err; } static int esp4_rcv_cb(struct sk_buff *skb, int err) { return 0; } static const struct xfrm_type esp_type = { .owner = THIS_MODULE, .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp_init_state, .destructor = esp_destroy, .input = esp_input, .output = esp_output, }; static struct xfrm4_protocol esp4_protocol = { .handler = xfrm4_rcv, .input_handler = xfrm_input, .cb_handler = esp4_rcv_cb, .err_handler = esp4_err, .priority = 0, }; static int __init esp4_init(void) { if (xfrm_register_type(&esp_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) { pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp_type, AF_INET); return -EAGAIN; } return 0; } static void __exit esp4_fini(void) { if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0) pr_info("%s: can't remove protocol\n", __func__); xfrm_unregister_type(&esp_type, AF_INET); } module_init(esp4_init); module_exit(esp4_fini); MODULE_DESCRIPTION("IPv4 ESP transformation library"); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
12169 12169 12131 12171 12 12061 96 96 8 12066 95 96 96 21 96 8 1 6 2 2 2 2 2 2 8 8 6 3 2 2 2 2 8 12 9 8 6 7 8 12 12 12 6 6 4 3 61 61 61 61 2 12033 12033 12025 12036 12070 12034 12028 12047 12053 12094 12055 12061 103 6 6 6 97 37 96 12072 12028 12040 12066 1 3 7 7 12038 12037 12070 12038 1 1 1 12029 12023 12071 12037 12026 12032 63 63 63 5 5 62 63 63 12032 12036 12067 2 12026 12069 12065 12070 62 12024 12021 4401 4400 4408 4400 3 4398 4397 229 229 229 227 174 174 174 1 2 2 2 2 2 2 2 2 2 81 80 12028 12041 12031 12033 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 // SPDX-License-Identifier: GPL-2.0-or-later /* audit.c -- Auditing support * Gateway between the kernel (e.g., selinux) and the user-space audit daemon. * System-call specific features have moved to auditsc.c * * Copyright 2003-2007 Red Hat Inc., Durham, North Carolina. * All Rights Reserved. * * Written by Rickard E. (Rik) Faith <faith@redhat.com> * * Goals: 1) Integrate fully with Security Modules. * 2) Minimal run-time overhead: * a) Minimal when syscall auditing is disabled (audit_enable=0). * b) Small when syscall auditing is enabled and no audit record * is generated (defer as much work as possible to record * generation time): * i) context is allocated, * ii) names from getname are stored without a copy, and * iii) inode information stored from path_lookup. * 3) Ability to disable syscall auditing at boot time (audit=0). * 4) Usable by other parts of the kernel (if audit_log* is called, * then a syscall record will be generated automatically for the * current syscall). * 5) Netlink interface to user-space. * 6) Support low-overhead kernel-based filtering to minimize the * information that must be passed to user-space. * * Audit userspace, documentation, tests, and bug/issue trackers: * https://github.com/linux-audit */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/file.h> #include <linux/init.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/mm.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/kthread.h> #include <linux/kernel.h> #include <linux/syscalls.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/mutex.h> #include <linux/gfp.h> #include <linux/pid.h> #include <linux/audit.h> #include <net/sock.h> #include <net/netlink.h> #include <linux/skbuff.h> #include <linux/security.h> #include <linux/freezer.h> #include <linux/pid_namespace.h> #include <net/netns/generic.h> #include "audit.h" /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED. * (Initialization happens after skb_init is called.) */ #define AUDIT_DISABLED -1 #define AUDIT_UNINITIALIZED 0 #define AUDIT_INITIALIZED 1 static int audit_initialized = AUDIT_UNINITIALIZED; u32 audit_enabled = AUDIT_OFF; bool audit_ever_enabled = !!AUDIT_OFF; EXPORT_SYMBOL_GPL(audit_enabled); /* Default state when kernel boots without any parameters. */ static u32 audit_default = AUDIT_OFF; /* If auditing cannot proceed, audit_failure selects what happens. */ static u32 audit_failure = AUDIT_FAIL_PRINTK; /* private audit network namespace index */ static unsigned int audit_net_id; /** * struct audit_net - audit private network namespace data * @sk: communication socket */ struct audit_net { struct sock *sk; }; /** * struct auditd_connection - kernel/auditd connection state * @pid: auditd PID * @portid: netlink portid * @net: the associated network namespace * @rcu: RCU head * * Description: * This struct is RCU protected; you must either hold the RCU lock for reading * or the associated spinlock for writing. */ struct auditd_connection { struct pid *pid; u32 portid; struct net *net; struct rcu_head rcu; }; static struct auditd_connection __rcu *auditd_conn; static DEFINE_SPINLOCK(auditd_conn_lock); /* If audit_rate_limit is non-zero, limit the rate of sending audit records * to that number per second. This prevents DoS attacks, but results in * audit records being dropped. */ static u32 audit_rate_limit; /* Number of outstanding audit_buffers allowed. * When set to zero, this means unlimited. */ static u32 audit_backlog_limit = 64; #define AUDIT_BACKLOG_WAIT_TIME (60 * HZ) static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME; /* The identity of the user shutting down the audit system. */ static kuid_t audit_sig_uid = INVALID_UID; static pid_t audit_sig_pid = -1; static struct lsm_prop audit_sig_lsm; /* Records can be lost in several ways: 0) [suppressed in audit_alloc] 1) out of memory in audit_log_start [kmalloc of struct audit_buffer] 2) out of memory in audit_log_move [alloc_skb] 3) suppressed due to audit_rate_limit 4) suppressed due to audit_backlog_limit */ static atomic_t audit_lost = ATOMIC_INIT(0); /* Monotonically increasing sum of time the kernel has spent * waiting while the backlog limit is exceeded. */ static atomic_t audit_backlog_wait_time_actual = ATOMIC_INIT(0); /* Hash for inode-based rules */ struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; static struct kmem_cache *audit_buffer_cache; /* queue msgs to send via kauditd_task */ static struct sk_buff_head audit_queue; /* queue msgs due to temporary unicast send problems */ static struct sk_buff_head audit_retry_queue; /* queue msgs waiting for new auditd connection */ static struct sk_buff_head audit_hold_queue; /* queue servicing thread */ static struct task_struct *kauditd_task; static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); /* waitqueue for callers who are blocked on the audit backlog */ static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION, .mask = -1, .features = 0, .lock = 0,}; static char *audit_feature_names[2] = { "only_unset_loginuid", "loginuid_immutable", }; /** * struct audit_ctl_mutex - serialize requests from userspace * @lock: the mutex used for locking * @owner: the task which owns the lock * * Description: * This is the lock struct used to ensure we only process userspace requests * in an orderly fashion. We can't simply use a mutex/lock here because we * need to track lock ownership so we don't end up blocking the lock owner in * audit_log_start() or similar. */ static struct audit_ctl_mutex { struct mutex lock; void *owner; } audit_cmd_mutex; /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting * audit records. Since printk uses a 1024 byte buffer, this buffer * should be at least that large. */ #define AUDIT_BUFSIZ 1024 /* The audit_buffer is used when formatting an audit record. The caller * locks briefly to get the record off the freelist or to allocate the * buffer, and locks briefly to send the buffer to the netlink layer or * to place it on a transmit queue. Multiple audit_buffers can be in * use simultaneously. */ struct audit_buffer { struct sk_buff *skb; /* formatted skb ready to send */ struct audit_context *ctx; /* NULL or associated context */ gfp_t gfp_mask; }; struct audit_reply { __u32 portid; struct net *net; struct sk_buff *skb; }; /** * auditd_test_task - Check to see if a given task is an audit daemon * @task: the task to check * * Description: * Return 1 if the task is a registered audit daemon, 0 otherwise. */ int auditd_test_task(struct task_struct *task) { int rc; struct auditd_connection *ac; rcu_read_lock(); ac = rcu_dereference(auditd_conn); rc = (ac && ac->pid == task_tgid(task) ? 1 : 0); rcu_read_unlock(); return rc; } /** * audit_ctl_lock - Take the audit control lock */ void audit_ctl_lock(void) { mutex_lock(&audit_cmd_mutex.lock); audit_cmd_mutex.owner = current; } /** * audit_ctl_unlock - Drop the audit control lock */ void audit_ctl_unlock(void) { audit_cmd_mutex.owner = NULL; mutex_unlock(&audit_cmd_mutex.lock); } /** * audit_ctl_owner_current - Test to see if the current task owns the lock * * Description: * Return true if the current task owns the audit control lock, false if it * doesn't own the lock. */ static bool audit_ctl_owner_current(void) { return (current == audit_cmd_mutex.owner); } /** * auditd_pid_vnr - Return the auditd PID relative to the namespace * * Description: * Returns the PID in relation to the namespace, 0 on failure. */ static pid_t auditd_pid_vnr(void) { pid_t pid; const struct auditd_connection *ac; rcu_read_lock(); ac = rcu_dereference(auditd_conn); if (!ac || !ac->pid) pid = 0; else pid = pid_vnr(ac->pid); rcu_read_unlock(); return pid; } /** * audit_get_sk - Return the audit socket for the given network namespace * @net: the destination network namespace * * Description: * Returns the sock pointer if valid, NULL otherwise. The caller must ensure * that a reference is held for the network namespace while the sock is in use. */ static struct sock *audit_get_sk(const struct net *net) { struct audit_net *aunet; if (!net) return NULL; aunet = net_generic(net, audit_net_id); return aunet->sk; } void audit_panic(const char *message) { switch (audit_failure) { case AUDIT_FAIL_SILENT: break; case AUDIT_FAIL_PRINTK: if (printk_ratelimit()) pr_err("%s\n", message); break; case AUDIT_FAIL_PANIC: panic("audit: %s\n", message); break; } } static inline int audit_rate_check(void) { static unsigned long last_check = 0; static int messages = 0; static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned long now; int retval = 0; if (!audit_rate_limit) return 1; spin_lock_irqsave(&lock, flags); if (++messages < audit_rate_limit) { retval = 1; } else { now = jiffies; if (time_after(now, last_check + HZ)) { last_check = now; messages = 0; retval = 1; } } spin_unlock_irqrestore(&lock, flags); return retval; } /** * audit_log_lost - conditionally log lost audit message event * @message: the message stating reason for lost audit message * * Emit at least 1 message per second, even if audit_rate_check is * throttling. * Always increment the lost messages counter. */ void audit_log_lost(const char *message) { static unsigned long last_msg = 0; static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned long now; int print; atomic_inc(&audit_lost); print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit); if (!print) { spin_lock_irqsave(&lock, flags); now = jiffies; if (time_after(now, last_msg + HZ)) { print = 1; last_msg = now; } spin_unlock_irqrestore(&lock, flags); } if (print) { if (printk_ratelimit()) pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n", atomic_read(&audit_lost), audit_rate_limit, audit_backlog_limit); audit_panic(message); } } static int audit_log_config_change(char *function_name, u32 new, u32 old, int allow_changes) { struct audit_buffer *ab; int rc = 0; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_CONFIG_CHANGE); if (unlikely(!ab)) return rc; audit_log_format(ab, "op=set %s=%u old=%u ", function_name, new, old); audit_log_session_info(ab); rc = audit_log_task_context(ab); if (rc) allow_changes = 0; /* Something weird, deny request */ audit_log_format(ab, " res=%d", allow_changes); audit_log_end(ab); return rc; } static int audit_do_config_change(char *function_name, u32 *to_change, u32 new) { int allow_changes, rc = 0; u32 old = *to_change; /* check if we are locked */ if (audit_enabled == AUDIT_LOCKED) allow_changes = 0; else allow_changes = 1; if (audit_enabled != AUDIT_OFF) { rc = audit_log_config_change(function_name, new, old, allow_changes); if (rc) allow_changes = 0; } /* If we are allowed, make the change */ if (allow_changes == 1) *to_change = new; /* Not allowed, update reason */ else if (rc == 0) rc = -EPERM; return rc; } static int audit_set_rate_limit(u32 limit) { return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit); } static int audit_set_backlog_limit(u32 limit) { return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit); } static int audit_set_backlog_wait_time(u32 timeout) { return audit_do_config_change("audit_backlog_wait_time", &audit_backlog_wait_time, timeout); } static int audit_set_enabled(u32 state) { int rc; if (state > AUDIT_LOCKED) return -EINVAL; rc = audit_do_config_change("audit_enabled", &audit_enabled, state); if (!rc) audit_ever_enabled |= !!state; return rc; } static int audit_set_failure(u32 state) { if (state != AUDIT_FAIL_SILENT && state != AUDIT_FAIL_PRINTK && state != AUDIT_FAIL_PANIC) return -EINVAL; return audit_do_config_change("audit_failure", &audit_failure, state); } /** * auditd_conn_free - RCU helper to release an auditd connection struct * @rcu: RCU head * * Description: * Drop any references inside the auditd connection tracking struct and free * the memory. */ static void auditd_conn_free(struct rcu_head *rcu) { struct auditd_connection *ac; ac = container_of(rcu, struct auditd_connection, rcu); put_pid(ac->pid); put_net(ac->net); kfree(ac); } /** * auditd_set - Set/Reset the auditd connection state * @pid: auditd PID * @portid: auditd netlink portid * @net: auditd network namespace pointer * @skb: the netlink command from the audit daemon * @ack: netlink ack flag, cleared if ack'd here * * Description: * This function will obtain and drop network namespace references as * necessary. Returns zero on success, negative values on failure. */ static int auditd_set(struct pid *pid, u32 portid, struct net *net, struct sk_buff *skb, bool *ack) { unsigned long flags; struct auditd_connection *ac_old, *ac_new; struct nlmsghdr *nlh; if (!pid || !net) return -EINVAL; ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL); if (!ac_new) return -ENOMEM; ac_new->pid = get_pid(pid); ac_new->portid = portid; ac_new->net = get_net(net); /* send the ack now to avoid a race with the queue backlog */ if (*ack) { nlh = nlmsg_hdr(skb); netlink_ack(skb, nlh, 0, NULL); *ack = false; } spin_lock_irqsave(&auditd_conn_lock, flags); ac_old = rcu_dereference_protected(auditd_conn, lockdep_is_held(&auditd_conn_lock)); rcu_assign_pointer(auditd_conn, ac_new); spin_unlock_irqrestore(&auditd_conn_lock, flags); if (ac_old) call_rcu(&ac_old->rcu, auditd_conn_free); return 0; } /** * kauditd_printk_skb - Print the audit record to the ring buffer * @skb: audit record * * Whatever the reason, this packet may not make it to the auditd connection * so write it via printk so the information isn't completely lost. */ static void kauditd_printk_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); char *data = nlmsg_data(nlh); if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit()) pr_notice("type=%d %s\n", nlh->nlmsg_type, data); } /** * kauditd_rehold_skb - Handle a audit record send failure in the hold queue * @skb: audit record * @error: error code (unused) * * Description: * This should only be used by the kauditd_thread when it fails to flush the * hold queue. */ static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error) { /* put the record back in the queue */ skb_queue_tail(&audit_hold_queue, skb); } /** * kauditd_hold_skb - Queue an audit record, waiting for auditd * @skb: audit record * @error: error code * * Description: * Queue the audit record, waiting for an instance of auditd. When this * function is called we haven't given up yet on sending the record, but things * are not looking good. The first thing we want to do is try to write the * record via printk and then see if we want to try and hold on to the record * and queue it, if we have room. If we want to hold on to the record, but we * don't have room, record a record lost message. */ static void kauditd_hold_skb(struct sk_buff *skb, int error) { /* at this point it is uncertain if we will ever send this to auditd so * try to send the message via printk before we go any further */ kauditd_printk_skb(skb); /* can we just silently drop the message? */ if (!audit_default) goto drop; /* the hold queue is only for when the daemon goes away completely, * not -EAGAIN failures; if we are in a -EAGAIN state requeue the * record on the retry queue unless it's full, in which case drop it */ if (error == -EAGAIN) { if (!audit_backlog_limit || skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { skb_queue_tail(&audit_retry_queue, skb); return; } audit_log_lost("kauditd retry queue overflow"); goto drop; } /* if we have room in the hold queue, queue the message */ if (!audit_backlog_limit || skb_queue_len(&audit_hold_queue) < audit_backlog_limit) { skb_queue_tail(&audit_hold_queue, skb); return; } /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); drop: kfree_skb(skb); } /** * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd * @skb: audit record * @error: error code (unused) * * Description: * Not as serious as kauditd_hold_skb() as we still have a connected auditd, * but for some reason we are having problems sending it audit records so * queue the given record and attempt to resend. */ static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error) { if (!audit_backlog_limit || skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { skb_queue_tail(&audit_retry_queue, skb); return; } /* we have to drop the record, send it via printk as a last effort */ kauditd_printk_skb(skb); audit_log_lost("kauditd retry queue overflow"); kfree_skb(skb); } /** * auditd_reset - Disconnect the auditd connection * @ac: auditd connection state * * Description: * Break the auditd/kauditd connection and move all the queued records into the * hold queue in case auditd reconnects. It is important to note that the @ac * pointer should never be dereferenced inside this function as it may be NULL * or invalid, you can only compare the memory address! If @ac is NULL then * the connection will always be reset. */ static void auditd_reset(const struct auditd_connection *ac) { unsigned long flags; struct sk_buff *skb; struct auditd_connection *ac_old; /* if it isn't already broken, break the connection */ spin_lock_irqsave(&auditd_conn_lock, flags); ac_old = rcu_dereference_protected(auditd_conn, lockdep_is_held(&auditd_conn_lock)); if (ac && ac != ac_old) { /* someone already registered a new auditd connection */ spin_unlock_irqrestore(&auditd_conn_lock, flags); return; } rcu_assign_pointer(auditd_conn, NULL); spin_unlock_irqrestore(&auditd_conn_lock, flags); if (ac_old) call_rcu(&ac_old->rcu, auditd_conn_free); /* flush the retry queue to the hold queue, but don't touch the main * queue since we need to process that normally for multicast */ while ((skb = skb_dequeue(&audit_retry_queue))) kauditd_hold_skb(skb, -ECONNREFUSED); } /** * auditd_send_unicast_skb - Send a record via unicast to auditd * @skb: audit record * * Description: * Send a skb to the audit daemon, returns positive/zero values on success and * negative values on failure; in all cases the skb will be consumed by this * function. If the send results in -ECONNREFUSED the connection with auditd * will be reset. This function may sleep so callers should not hold any locks * where this would cause a problem. */ static int auditd_send_unicast_skb(struct sk_buff *skb) { int rc; u32 portid; struct net *net; struct sock *sk; struct auditd_connection *ac; /* NOTE: we can't call netlink_unicast while in the RCU section so * take a reference to the network namespace and grab local * copies of the namespace, the sock, and the portid; the * namespace and sock aren't going to go away while we hold a * reference and if the portid does become invalid after the RCU * section netlink_unicast() should safely return an error */ rcu_read_lock(); ac = rcu_dereference(auditd_conn); if (!ac) { rcu_read_unlock(); kfree_skb(skb); rc = -ECONNREFUSED; goto err; } net = get_net(ac->net); sk = audit_get_sk(net); portid = ac->portid; rcu_read_unlock(); rc = netlink_unicast(sk, skb, portid, 0); put_net(net); if (rc < 0) goto err; return rc; err: if (ac && rc == -ECONNREFUSED) auditd_reset(ac); return rc; } /** * kauditd_send_queue - Helper for kauditd_thread to flush skb queues * @sk: the sending sock * @portid: the netlink destination * @queue: the skb queue to process * @retry_limit: limit on number of netlink unicast failures * @skb_hook: per-skb hook for additional processing * @err_hook: hook called if the skb fails the netlink unicast send * * Description: * Run through the given queue and attempt to send the audit records to auditd, * returns zero on success, negative values on failure. It is up to the caller * to ensure that the @sk is valid for the duration of this function. * */ static int kauditd_send_queue(struct sock *sk, u32 portid, struct sk_buff_head *queue, unsigned int retry_limit, void (*skb_hook)(struct sk_buff *skb), void (*err_hook)(struct sk_buff *skb, int error)) { int rc = 0; struct sk_buff *skb = NULL; struct sk_buff *skb_tail; unsigned int failed = 0; /* NOTE: kauditd_thread takes care of all our locking, we just use * the netlink info passed to us (e.g. sk and portid) */ skb_tail = skb_peek_tail(queue); while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { /* call the skb_hook for each skb we touch */ if (skb_hook) (*skb_hook)(skb); /* can we send to anyone via unicast? */ if (!sk) { if (err_hook) (*err_hook)(skb, -ECONNREFUSED); continue; } retry: /* grab an extra skb reference in case of error */ skb_get(skb); rc = netlink_unicast(sk, skb, portid, 0); if (rc < 0) { /* send failed - try a few times unless fatal error */ if (++failed >= retry_limit || rc == -ECONNREFUSED || rc == -EPERM) { sk = NULL; if (err_hook) (*err_hook)(skb, rc); if (rc == -EAGAIN) rc = 0; /* continue to drain the queue */ continue; } else goto retry; } else { /* skb sent - drop the extra reference and continue */ consume_skb(skb); failed = 0; } } return (rc >= 0 ? 0 : rc); } /* * kauditd_send_multicast_skb - Send a record to any multicast listeners * @skb: audit record * * Description: * Write a multicast message to anyone listening in the initial network * namespace. This function doesn't consume an skb as might be expected since * it has to copy it anyways. */ static void kauditd_send_multicast_skb(struct sk_buff *skb) { struct sk_buff *copy; struct sock *sock = audit_get_sk(&init_net); struct nlmsghdr *nlh; /* NOTE: we are not taking an additional reference for init_net since * we don't have to worry about it going away */ if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) return; /* * The seemingly wasteful skb_copy() rather than bumping the refcount * using skb_get() is necessary because non-standard mods are made to * the skb by the original kaudit unicast socket send routine. The * existing auditd daemon assumes this breakage. Fixing this would * require co-ordinating a change in the established protocol between * the kaudit kernel subsystem and the auditd userspace code. There is * no reason for new multicast clients to continue with this * non-compliance. */ copy = skb_copy(skb, GFP_KERNEL); if (!copy) return; nlh = nlmsg_hdr(copy); nlh->nlmsg_len = skb->len; nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL); } /** * kauditd_thread - Worker thread to send audit records to userspace * @dummy: unused */ static int kauditd_thread(void *dummy) { int rc; u32 portid = 0; struct net *net = NULL; struct sock *sk = NULL; struct auditd_connection *ac; #define UNICAST_RETRIES 5 set_freezable(); while (!kthread_should_stop()) { /* NOTE: see the lock comments in auditd_send_unicast_skb() */ rcu_read_lock(); ac = rcu_dereference(auditd_conn); if (!ac) { rcu_read_unlock(); goto main_queue; } net = get_net(ac->net); sk = audit_get_sk(net); portid = ac->portid; rcu_read_unlock(); /* attempt to flush the hold queue */ rc = kauditd_send_queue(sk, portid, &audit_hold_queue, UNICAST_RETRIES, NULL, kauditd_rehold_skb); if (rc < 0) { sk = NULL; auditd_reset(ac); goto main_queue; } /* attempt to flush the retry queue */ rc = kauditd_send_queue(sk, portid, &audit_retry_queue, UNICAST_RETRIES, NULL, kauditd_hold_skb); if (rc < 0) { sk = NULL; auditd_reset(ac); goto main_queue; } main_queue: /* process the main queue - do the multicast send and attempt * unicast, dump failed record sends to the retry queue; if * sk == NULL due to previous failures we will just do the * multicast send and move the record to the hold queue */ rc = kauditd_send_queue(sk, portid, &audit_queue, 1, kauditd_send_multicast_skb, (sk ? kauditd_retry_skb : kauditd_hold_skb)); if (ac && rc < 0) auditd_reset(ac); sk = NULL; /* drop our netns reference, no auditd sends past this line */ if (net) { put_net(net); net = NULL; } /* we have processed all the queues so wake everyone */ wake_up(&audit_backlog_wait); /* NOTE: we want to wake up if there is anything on the queue, * regardless of if an auditd is connected, as we need to * do the multicast send and rotate records from the * main queue to the retry/hold queues */ wait_event_freezable(kauditd_wait, (skb_queue_len(&audit_queue) ? 1 : 0)); } return 0; } int audit_send_list_thread(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; struct sock *sk = audit_get_sk(dest->net); /* wait for parent to finish and send an ACK */ audit_ctl_lock(); audit_ctl_unlock(); while ((skb = __skb_dequeue(&dest->q)) != NULL) netlink_unicast(sk, skb, dest->portid, 0); put_net(dest->net); kfree(dest); return 0; } struct sk_buff *audit_make_reply(int seq, int type, int done, int multi, const void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; void *data; int flags = multi ? NLM_F_MULTI : 0; int t = done ? NLMSG_DONE : type; skb = nlmsg_new(size, GFP_KERNEL); if (!skb) return NULL; nlh = nlmsg_put(skb, 0, seq, t, size, flags); if (!nlh) goto out_kfree_skb; data = nlmsg_data(nlh); memcpy(data, payload, size); return skb; out_kfree_skb: kfree_skb(skb); return NULL; } static void audit_free_reply(struct audit_reply *reply) { if (!reply) return; kfree_skb(reply->skb); if (reply->net) put_net(reply->net); kfree(reply); } static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; audit_ctl_lock(); audit_ctl_unlock(); /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0); reply->skb = NULL; audit_free_reply(reply); return 0; } /** * audit_send_reply - send an audit reply message via netlink * @request_skb: skb of request we are replying to (used to target the reply) * @seq: sequence number * @type: audit message type * @done: done (last) flag * @multi: multi-part message flag * @payload: payload data * @size: payload size * * Allocates a skb, builds the netlink message, and sends it to the port id. */ static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, int multi, const void *payload, int size) { struct task_struct *tsk; struct audit_reply *reply; reply = kzalloc(sizeof(*reply), GFP_KERNEL); if (!reply) return; reply->skb = audit_make_reply(seq, type, done, multi, payload, size); if (!reply->skb) goto err; reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); reply->portid = NETLINK_CB(request_skb).portid; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); if (IS_ERR(tsk)) goto err; return; err: audit_free_reply(reply); } /* * Check for appropriate CAP_AUDIT_ capabilities on incoming audit * control messages. */ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) { int err = 0; /* Only support initial user namespace for now. */ /* * We return ECONNREFUSED because it tricks userspace into thinking * that audit was not configured into the kernel. Lots of users * configure their PAM stack (because that's what the distro does) * to reject login if unable to send messages to audit. If we return * ECONNREFUSED the PAM stack thinks the kernel does not have audit * configured in and will let login proceed. If we return EPERM * userspace will reject all logins. This should be removed when we * support non init namespaces!! */ if (current_user_ns() != &init_user_ns) return -ECONNREFUSED; switch (msg_type) { case AUDIT_LIST: case AUDIT_ADD: case AUDIT_DEL: return -EOPNOTSUPP; case AUDIT_GET: case AUDIT_SET: case AUDIT_GET_FEATURE: case AUDIT_SET_FEATURE: case AUDIT_LIST_RULES: case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: case AUDIT_SIGNAL_INFO: case AUDIT_TTY_GET: case AUDIT_TTY_SET: case AUDIT_TRIM: case AUDIT_MAKE_EQUIV: /* Only support auditd and auditctl in initial pid namespace * for now. */ if (task_active_pid_ns(current) != &init_pid_ns) return -EPERM; if (!netlink_capable(skb, CAP_AUDIT_CONTROL)) err = -EPERM; break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (!netlink_capable(skb, CAP_AUDIT_WRITE)) err = -EPERM; break; default: /* bad msg */ err = -EINVAL; } return err; } static void audit_log_common_recv_msg(struct audit_context *context, struct audit_buffer **ab, u16 msg_type) { uid_t uid = from_kuid(&init_user_ns, current_uid()); pid_t pid = task_tgid_nr(current); if (!audit_enabled && msg_type != AUDIT_USER_AVC) { *ab = NULL; return; } *ab = audit_log_start(context, GFP_KERNEL, msg_type); if (unlikely(!*ab)) return; audit_log_format(*ab, "pid=%d uid=%u ", pid, uid); audit_log_session_info(*ab); audit_log_task_context(*ab); } static inline void audit_log_user_recv_msg(struct audit_buffer **ab, u16 msg_type) { audit_log_common_recv_msg(NULL, ab, msg_type); } static int is_audit_feature_set(int i) { return af.features & AUDIT_FEATURE_TO_MASK(i); } static int audit_get_feature(struct sk_buff *skb) { u32 seq; seq = nlmsg_hdr(skb)->nlmsg_seq; audit_send_reply(skb, seq, AUDIT_GET_FEATURE, 0, 0, &af, sizeof(af)); return 0; } static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature, u32 old_lock, u32 new_lock, int res) { struct audit_buffer *ab; if (audit_enabled == AUDIT_OFF) return; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_FEATURE_CHANGE); if (!ab) return; audit_log_task_info(ab); audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", audit_feature_names[which], !!old_feature, !!new_feature, !!old_lock, !!new_lock, res); audit_log_end(ab); } static int audit_set_feature(struct audit_features *uaf) { int i; BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names)); /* if there is ever a version 2 we should handle that here */ for (i = 0; i <= AUDIT_LAST_FEATURE; i++) { u32 feature = AUDIT_FEATURE_TO_MASK(i); u32 old_feature, new_feature, old_lock, new_lock; /* if we are not changing this feature, move along */ if (!(feature & uaf->mask)) continue; old_feature = af.features & feature; new_feature = uaf->features & feature; new_lock = (uaf->lock | af.lock) & feature; old_lock = af.lock & feature; /* are we changing a locked feature? */ if (old_lock && (new_feature != old_feature)) { audit_log_feature_change(i, old_feature, new_feature, old_lock, new_lock, 0); return -EPERM; } } /* nothing invalid, do the changes */ for (i = 0; i <= AUDIT_LAST_FEATURE; i++) { u32 feature = AUDIT_FEATURE_TO_MASK(i); u32 old_feature, new_feature, old_lock, new_lock; /* if we are not changing this feature, move along */ if (!(feature & uaf->mask)) continue; old_feature = af.features & feature; new_feature = uaf->features & feature; old_lock = af.lock & feature; new_lock = (uaf->lock | af.lock) & feature; if (new_feature != old_feature) audit_log_feature_change(i, old_feature, new_feature, old_lock, new_lock, 1); if (new_feature) af.features |= feature; else af.features &= ~feature; af.lock |= new_lock; } return 0; } static int audit_replace(struct pid *pid) { pid_t pvnr; struct sk_buff *skb; pvnr = pid_vnr(pid); skb = audit_make_reply(0, AUDIT_REPLACE, 0, 0, &pvnr, sizeof(pvnr)); if (!skb) return -ENOMEM; return auditd_send_unicast_skb(skb); } static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh, bool *ack) { u32 seq; void *data; int data_len; int err; struct audit_buffer *ab; u16 msg_type = nlh->nlmsg_type; struct audit_sig_info *sig_data; struct lsm_context lsmctx = { NULL, 0, 0 }; err = audit_netlink_ok(skb, msg_type); if (err) return err; seq = nlh->nlmsg_seq; data = nlmsg_data(nlh); data_len = nlmsg_len(nlh); switch (msg_type) { case AUDIT_GET: { struct audit_status s; memset(&s, 0, sizeof(s)); s.enabled = audit_enabled; s.failure = audit_failure; /* NOTE: use pid_vnr() so the PID is relative to the current * namespace */ s.pid = auditd_pid_vnr(); s.rate_limit = audit_rate_limit; s.backlog_limit = audit_backlog_limit; s.lost = atomic_read(&audit_lost); s.backlog = skb_queue_len(&audit_queue); s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL; s.backlog_wait_time = audit_backlog_wait_time; s.backlog_wait_time_actual = atomic_read(&audit_backlog_wait_time_actual); audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_SET: { struct audit_status s; memset(&s, 0, sizeof(s)); /* guard against past and future API changes */ memcpy(&s, data, min_t(size_t, sizeof(s), data_len)); if (s.mask & AUDIT_STATUS_ENABLED) { err = audit_set_enabled(s.enabled); if (err < 0) return err; } if (s.mask & AUDIT_STATUS_FAILURE) { err = audit_set_failure(s.failure); if (err < 0) return err; } if (s.mask & AUDIT_STATUS_PID) { /* NOTE: we are using the vnr PID functions below * because the s.pid value is relative to the * namespace of the caller; at present this * doesn't matter much since you can really only * run auditd from the initial pid namespace, but * something to keep in mind if this changes */ pid_t new_pid = s.pid; pid_t auditd_pid; struct pid *req_pid = task_tgid(current); /* Sanity check - PID values must match. Setting * pid to 0 is how auditd ends auditing. */ if (new_pid && (new_pid != pid_vnr(req_pid))) return -EINVAL; /* test the auditd connection */ audit_replace(req_pid); auditd_pid = auditd_pid_vnr(); if (auditd_pid) { /* replacing a healthy auditd is not allowed */ if (new_pid) { audit_log_config_change("audit_pid", new_pid, auditd_pid, 0); return -EEXIST; } /* only current auditd can unregister itself */ if (pid_vnr(req_pid) != auditd_pid) { audit_log_config_change("audit_pid", new_pid, auditd_pid, 0); return -EACCES; } } if (new_pid) { /* register a new auditd connection */ err = auditd_set(req_pid, NETLINK_CB(skb).portid, sock_net(NETLINK_CB(skb).sk), skb, ack); if (audit_enabled != AUDIT_OFF) audit_log_config_change("audit_pid", new_pid, auditd_pid, err ? 0 : 1); if (err) return err; /* try to process any backlog */ wake_up_interruptible(&kauditd_wait); } else { if (audit_enabled != AUDIT_OFF) audit_log_config_change("audit_pid", new_pid, auditd_pid, 1); /* unregister the auditd connection */ auditd_reset(NULL); } } if (s.mask & AUDIT_STATUS_RATE_LIMIT) { err = audit_set_rate_limit(s.rate_limit); if (err < 0) return err; } if (s.mask & AUDIT_STATUS_BACKLOG_LIMIT) { err = audit_set_backlog_limit(s.backlog_limit); if (err < 0) return err; } if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) { if (sizeof(s) > (size_t)nlh->nlmsg_len) return -EINVAL; if (s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME) return -EINVAL; err = audit_set_backlog_wait_time(s.backlog_wait_time); if (err < 0) return err; } if (s.mask == AUDIT_STATUS_LOST) { u32 lost = atomic_xchg(&audit_lost, 0); audit_log_config_change("lost", 0, lost, 1); return lost; } if (s.mask == AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL) { u32 actual = atomic_xchg(&audit_backlog_wait_time_actual, 0); audit_log_config_change("backlog_wait_time_actual", 0, actual, 1); return actual; } break; } case AUDIT_GET_FEATURE: err = audit_get_feature(skb); if (err) return err; break; case AUDIT_SET_FEATURE: if (data_len < sizeof(struct audit_features)) return -EINVAL; err = audit_set_feature(data); if (err) return err; break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: if (!audit_enabled && msg_type != AUDIT_USER_AVC) return 0; /* exit early if there isn't at least one character to print */ if (data_len < 2) return -EINVAL; err = audit_filter(msg_type, AUDIT_FILTER_USER); if (err == 1) { /* match or error */ char *str = data; err = 0; if (msg_type == AUDIT_USER_TTY) { err = tty_audit_push(); if (err) break; } audit_log_user_recv_msg(&ab, msg_type); if (msg_type != AUDIT_USER_TTY) { /* ensure NULL termination */ str[data_len - 1] = '\0'; audit_log_format(ab, " msg='%.*s'", AUDIT_MESSAGE_TEXT_MAX, str); } else { audit_log_format(ab, " data="); if (str[data_len - 1] == '\0') data_len--; audit_log_n_untrustedstring(ab, str, data_len); } audit_log_end(ab); } break; case AUDIT_ADD_RULE: case AUDIT_DEL_RULE: if (data_len < sizeof(struct audit_rule_data)) return -EINVAL; if (audit_enabled == AUDIT_LOCKED) { audit_log_common_recv_msg(audit_context(), &ab, AUDIT_CONFIG_CHANGE); audit_log_format(ab, " op=%s audit_enabled=%d res=0", msg_type == AUDIT_ADD_RULE ? "add_rule" : "remove_rule", audit_enabled); audit_log_end(ab); return -EPERM; } err = audit_rule_change(msg_type, seq, data, data_len); break; case AUDIT_LIST_RULES: err = audit_list_rules_send(skb, seq); break; case AUDIT_TRIM: audit_trim_trees(); audit_log_common_recv_msg(audit_context(), &ab, AUDIT_CONFIG_CHANGE); audit_log_format(ab, " op=trim res=1"); audit_log_end(ab); break; case AUDIT_MAKE_EQUIV: { void *bufp = data; u32 sizes[2]; size_t msglen = data_len; char *old, *new; err = -EINVAL; if (msglen < 2 * sizeof(u32)) break; memcpy(sizes, bufp, 2 * sizeof(u32)); bufp += 2 * sizeof(u32); msglen -= 2 * sizeof(u32); old = audit_unpack_string(&bufp, &msglen, sizes[0]); if (IS_ERR(old)) { err = PTR_ERR(old); break; } new = audit_unpack_string(&bufp, &msglen, sizes[1]); if (IS_ERR(new)) { err = PTR_ERR(new); kfree(old); break; } /* OK, here comes... */ err = audit_tag_tree(old, new); audit_log_common_recv_msg(audit_context(), &ab, AUDIT_CONFIG_CHANGE); audit_log_format(ab, " op=make_equiv old="); audit_log_untrustedstring(ab, old); audit_log_format(ab, " new="); audit_log_untrustedstring(ab, new); audit_log_format(ab, " res=%d", !err); audit_log_end(ab); kfree(old); kfree(new); break; } case AUDIT_SIGNAL_INFO: if (lsmprop_is_set(&audit_sig_lsm)) { err = security_lsmprop_to_secctx(&audit_sig_lsm, &lsmctx); if (err < 0) return err; } sig_data = kmalloc(struct_size(sig_data, ctx, lsmctx.len), GFP_KERNEL); if (!sig_data) { if (lsmprop_is_set(&audit_sig_lsm)) security_release_secctx(&lsmctx); return -ENOMEM; } sig_data->uid = from_kuid(&init_user_ns, audit_sig_uid); sig_data->pid = audit_sig_pid; if (lsmprop_is_set(&audit_sig_lsm)) { memcpy(sig_data->ctx, lsmctx.context, lsmctx.len); security_release_secctx(&lsmctx); } audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, sig_data, struct_size(sig_data, ctx, lsmctx.len)); kfree(sig_data); break; case AUDIT_TTY_GET: { struct audit_tty_status s; unsigned int t; t = READ_ONCE(current->signal->audit_tty); s.enabled = t & AUDIT_TTY_ENABLE; s.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD); audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_TTY_SET: { struct audit_tty_status s, old; struct audit_buffer *ab; unsigned int t; memset(&s, 0, sizeof(s)); /* guard against past and future API changes */ memcpy(&s, data, min_t(size_t, sizeof(s), data_len)); /* check if new data is valid */ if ((s.enabled != 0 && s.enabled != 1) || (s.log_passwd != 0 && s.log_passwd != 1)) err = -EINVAL; if (err) t = READ_ONCE(current->signal->audit_tty); else { t = s.enabled | (-s.log_passwd & AUDIT_TTY_LOG_PASSWD); t = xchg(&current->signal->audit_tty, t); } old.enabled = t & AUDIT_TTY_ENABLE; old.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD); audit_log_common_recv_msg(audit_context(), &ab, AUDIT_CONFIG_CHANGE); audit_log_format(ab, " op=tty_set old-enabled=%d new-enabled=%d" " old-log_passwd=%d new-log_passwd=%d res=%d", old.enabled, s.enabled, old.log_passwd, s.log_passwd, !err); audit_log_end(ab); break; } default: err = -EINVAL; break; } return err < 0 ? err : 0; } /** * audit_receive - receive messages from a netlink control socket * @skb: the message buffer * * Parse the provided skb and deal with any messages that may be present, * malformed skbs are discarded. */ static void audit_receive(struct sk_buff *skb) { struct nlmsghdr *nlh; bool ack; /* * len MUST be signed for nlmsg_next to be able to dec it below 0 * if the nlmsg_len was not aligned */ int len; int err; nlh = nlmsg_hdr(skb); len = skb->len; audit_ctl_lock(); while (nlmsg_ok(nlh, len)) { ack = nlh->nlmsg_flags & NLM_F_ACK; err = audit_receive_msg(skb, nlh, &ack); /* send an ack if the user asked for one and audit_receive_msg * didn't already do it, or if there was an error. */ if (ack || err) netlink_ack(skb, nlh, err, NULL); nlh = nlmsg_next(nlh, &len); } audit_ctl_unlock(); /* can't block with the ctrl lock, so penalize the sender now */ if (audit_backlog_limit && (skb_queue_len(&audit_queue) > audit_backlog_limit)) { DECLARE_WAITQUEUE(wait, current); /* wake kauditd to try and flush the queue */ wake_up_interruptible(&kauditd_wait); add_wait_queue_exclusive(&audit_backlog_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(audit_backlog_wait_time); remove_wait_queue(&audit_backlog_wait, &wait); } } /* Log information about who is connecting to the audit multicast socket */ static void audit_log_multicast(int group, const char *op, int err) { const struct cred *cred; struct tty_struct *tty; char comm[sizeof(current->comm)]; struct audit_buffer *ab; if (!audit_enabled) return; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_EVENT_LISTENER); if (!ab) return; cred = current_cred(); tty = audit_get_tty(); audit_log_format(ab, "pid=%u uid=%u auid=%u tty=%s ses=%u", task_tgid_nr(current), from_kuid(&init_user_ns, cred->uid), from_kuid(&init_user_ns, audit_get_loginuid(current)), tty ? tty_name(tty) : "(none)", audit_get_sessionid(current)); audit_put_tty(tty); audit_log_task_context(ab); /* subj= */ audit_log_format(ab, " comm="); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); /* exe= */ audit_log_format(ab, " nl-mcgrp=%d op=%s res=%d", group, op, !err); audit_log_end(ab); } /* Run custom bind function on netlink socket group connect or bind requests. */ static int audit_multicast_bind(struct net *net, int group) { int err = 0; if (!capable(CAP_AUDIT_READ)) err = -EPERM; audit_log_multicast(group, "connect", err); return err; } static void audit_multicast_unbind(struct net *net, int group) { audit_log_multicast(group, "disconnect", 0); } static int __net_init audit_net_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = audit_receive, .bind = audit_multicast_bind, .unbind = audit_multicast_unbind, .flags = NL_CFG_F_NONROOT_RECV, .groups = AUDIT_NLGRP_MAX, }; struct audit_net *aunet = net_generic(net, audit_net_id); aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); if (aunet->sk == NULL) { audit_panic("cannot initialize netlink socket in namespace"); return -ENOMEM; } /* limit the timeout in case auditd is blocked/stopped */ aunet->sk->sk_sndtimeo = HZ / 10; return 0; } static void __net_exit audit_net_exit(struct net *net) { struct audit_net *aunet = net_generic(net, audit_net_id); /* NOTE: you would think that we would want to check the auditd * connection and potentially reset it here if it lives in this * namespace, but since the auditd connection tracking struct holds a * reference to this namespace (see auditd_set()) we are only ever * going to get here after that connection has been released */ netlink_kernel_release(aunet->sk); } static struct pernet_operations audit_net_ops __net_initdata = { .init = audit_net_init, .exit = audit_net_exit, .id = &audit_net_id, .size = sizeof(struct audit_net), }; /* Initialize audit support at boot time. */ static int __init audit_init(void) { int i; if (audit_initialized == AUDIT_DISABLED) return 0; audit_buffer_cache = KMEM_CACHE(audit_buffer, SLAB_PANIC); skb_queue_head_init(&audit_queue); skb_queue_head_init(&audit_retry_queue); skb_queue_head_init(&audit_hold_queue); for (i = 0; i < AUDIT_INODE_BUCKETS; i++) INIT_LIST_HEAD(&audit_inode_hash[i]); mutex_init(&audit_cmd_mutex.lock); audit_cmd_mutex.owner = NULL; pr_info("initializing netlink subsys (%s)\n", str_enabled_disabled(audit_default)); register_pernet_subsys(&audit_net_ops); audit_initialized = AUDIT_INITIALIZED; kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); if (IS_ERR(kauditd_task)) { int err = PTR_ERR(kauditd_task); panic("audit: failed to start the kauditd thread (%d)\n", err); } audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "state=initialized audit_enabled=%u res=1", audit_enabled); return 0; } postcore_initcall(audit_init); /* * Process kernel command-line parameter at boot time. * audit={0|off} or audit={1|on}. */ static int __init audit_enable(char *str) { if (!strcasecmp(str, "off") || !strcmp(str, "0")) audit_default = AUDIT_OFF; else if (!strcasecmp(str, "on") || !strcmp(str, "1")) audit_default = AUDIT_ON; else { pr_err("audit: invalid 'audit' parameter value (%s)\n", str); audit_default = AUDIT_ON; } if (audit_default == AUDIT_OFF) audit_initialized = AUDIT_DISABLED; if (audit_set_enabled(audit_default)) pr_err("audit: error setting audit state (%d)\n", audit_default); pr_info("%s\n", audit_default ? "enabled (after initialization)" : "disabled (until reboot)"); return 1; } __setup("audit=", audit_enable); /* Process kernel command-line parameter at boot time. * audit_backlog_limit=<n> */ static int __init audit_backlog_limit_set(char *str) { u32 audit_backlog_limit_arg; pr_info("audit_backlog_limit: "); if (kstrtouint(str, 0, &audit_backlog_limit_arg)) { pr_cont("using default of %u, unable to parse %s\n", audit_backlog_limit, str); return 1; } audit_backlog_limit = audit_backlog_limit_arg; pr_cont("%d\n", audit_backlog_limit); return 1; } __setup("audit_backlog_limit=", audit_backlog_limit_set); static void audit_buffer_free(struct audit_buffer *ab) { if (!ab) return; kfree_skb(ab->skb); kmem_cache_free(audit_buffer_cache, ab); } static struct audit_buffer *audit_buffer_alloc(struct audit_context *ctx, gfp_t gfp_mask, int type) { struct audit_buffer *ab; ab = kmem_cache_alloc(audit_buffer_cache, gfp_mask); if (!ab) return NULL; ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); if (!ab->skb) goto err; if (!nlmsg_put(ab->skb, 0, 0, type, 0, 0)) goto err; ab->ctx = ctx; ab->gfp_mask = gfp_mask; return ab; err: audit_buffer_free(ab); return NULL; } /** * audit_serial - compute a serial number for the audit record * * Compute a serial number for the audit record. Audit records are * written to user-space as soon as they are generated, so a complete * audit record may be written in several pieces. The timestamp of the * record and this serial number are used by the user-space tools to * determine which pieces belong to the same audit record. The * (timestamp,serial) tuple is unique for each syscall and is live from * syscall entry to syscall exit. * * NOTE: Another possibility is to store the formatted records off the * audit context (for those records that have a context), and emit them * all at syscall exit. However, this could delay the reporting of * significant errors until syscall exit (or never, if the system * halts). */ unsigned int audit_serial(void) { static atomic_t serial = ATOMIC_INIT(0); return atomic_inc_return(&serial); } static inline void audit_get_stamp(struct audit_context *ctx, struct timespec64 *t, unsigned int *serial) { if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { ktime_get_coarse_real_ts64(t); *serial = audit_serial(); } } /** * audit_log_start - obtain an audit buffer * @ctx: audit_context (may be NULL) * @gfp_mask: type of allocation * @type: audit message type * * Returns audit_buffer pointer on success or NULL on error. * * Obtain an audit buffer. This routine does locking to obtain the * audit buffer, but then no locking is required for calls to * audit_log_*format. If the task (ctx) is a task that is currently in a * syscall, then the syscall is marked as auditable and an audit record * will be written at syscall exit. If there is no associated task, then * task context (ctx) should be NULL. */ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) { struct audit_buffer *ab; struct timespec64 t; unsigned int serial; if (audit_initialized != AUDIT_INITIALIZED) return NULL; if (unlikely(!audit_filter(type, AUDIT_FILTER_EXCLUDE))) return NULL; /* NOTE: don't ever fail/sleep on these two conditions: * 1. auditd generated record - since we need auditd to drain the * queue; also, when we are checking for auditd, compare PIDs using * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() * using a PID anchored in the caller's namespace * 2. generator holding the audit_cmd_mutex - we don't want to block * while holding the mutex, although we do penalize the sender * later in audit_receive() when it is safe to block */ if (!(auditd_test_task(current) || audit_ctl_owner_current())) { long stime = audit_backlog_wait_time; while (audit_backlog_limit && (skb_queue_len(&audit_queue) > audit_backlog_limit)) { /* wake kauditd to try and flush the queue */ wake_up_interruptible(&kauditd_wait); /* sleep if we are allowed and we haven't exhausted our * backlog wait limit */ if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) { long rtime = stime; DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&audit_backlog_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); stime = schedule_timeout(rtime); atomic_add(rtime - stime, &audit_backlog_wait_time_actual); remove_wait_queue(&audit_backlog_wait, &wait); } else { if (audit_rate_check() && printk_ratelimit()) pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n", skb_queue_len(&audit_queue), audit_backlog_limit); audit_log_lost("backlog limit exceeded"); return NULL; } } } ab = audit_buffer_alloc(ctx, gfp_mask, type); if (!ab) { audit_log_lost("out of memory in audit_log_start"); return NULL; } audit_get_stamp(ab->ctx, &t, &serial); /* cancel dummy context to enable supporting records */ if (ctx) ctx->dummy = 0; audit_log_format(ab, "audit(%llu.%03lu:%u): ", (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial); return ab; } /** * audit_expand - expand skb in the audit buffer * @ab: audit_buffer * @extra: space to add at tail of the skb * * Returns 0 (no space) on failed expansion, or available space if * successful. */ static inline int audit_expand(struct audit_buffer *ab, int extra) { struct sk_buff *skb = ab->skb; int oldtail = skb_tailroom(skb); int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask); int newtail = skb_tailroom(skb); if (ret < 0) { audit_log_lost("out of memory in audit_expand"); return 0; } skb->truesize += newtail - oldtail; return newtail; } /* * Format an audit message into the audit buffer. If there isn't enough * room in the audit buffer, more room will be allocated and vsnprint * will be called a second time. Currently, we assume that a printk * can't format message larger than 1024 bytes, so we don't either. */ static __printf(2, 0) void audit_log_vformat(struct audit_buffer *ab, const char *fmt, va_list args) { int len, avail; struct sk_buff *skb; va_list args2; if (!ab) return; BUG_ON(!ab->skb); skb = ab->skb; avail = skb_tailroom(skb); if (avail == 0) { avail = audit_expand(ab, AUDIT_BUFSIZ); if (!avail) goto out; } va_copy(args2, args); len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args); if (len >= avail) { /* The printk buffer is 1024 bytes long, so if we get * here and AUDIT_BUFSIZ is at least 1024, then we can * log everything that printk could have logged. */ avail = audit_expand(ab, max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); if (!avail) goto out_va_end; len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); } if (len > 0) skb_put(skb, len); out_va_end: va_end(args2); out: return; } /** * audit_log_format - format a message into the audit buffer. * @ab: audit_buffer * @fmt: format string * @...: optional parameters matching @fmt string * * All the work is done in audit_log_vformat. */ void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) { va_list args; if (!ab) return; va_start(args, fmt); audit_log_vformat(ab, fmt, args); va_end(args); } /** * audit_log_n_hex - convert a buffer to hex and append it to the audit skb * @ab: the audit_buffer * @buf: buffer to convert to hex * @len: length of @buf to be converted * * No return value; failure to expand is silently ignored. * * This function will take the passed buf and convert it into a string of * ascii hex digits. The new string is placed onto the skb. */ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len) { int i, avail, new_len; unsigned char *ptr; struct sk_buff *skb; if (!ab) return; BUG_ON(!ab->skb); skb = ab->skb; avail = skb_tailroom(skb); new_len = len<<1; if (new_len >= avail) { /* Round the buffer request up to the next multiple */ new_len = AUDIT_BUFSIZ*(((new_len-avail)/AUDIT_BUFSIZ) + 1); avail = audit_expand(ab, new_len); if (!avail) return; } ptr = skb_tail_pointer(skb); for (i = 0; i < len; i++) ptr = hex_byte_pack_upper(ptr, buf[i]); *ptr = 0; skb_put(skb, len << 1); /* new string is twice the old string */ } /* * Format a string of no more than slen characters into the audit buffer, * enclosed in quote marks. */ void audit_log_n_string(struct audit_buffer *ab, const char *string, size_t slen) { int avail, new_len; unsigned char *ptr; struct sk_buff *skb; if (!ab) return; BUG_ON(!ab->skb); skb = ab->skb; avail = skb_tailroom(skb); new_len = slen + 3; /* enclosing quotes + null terminator */ if (new_len > avail) { avail = audit_expand(ab, new_len); if (!avail) return; } ptr = skb_tail_pointer(skb); *ptr++ = '"'; memcpy(ptr, string, slen); ptr += slen; *ptr++ = '"'; *ptr = 0; skb_put(skb, slen + 2); /* don't include null terminator */ } /** * audit_string_contains_control - does a string need to be logged in hex * @string: string to be checked * @len: max length of the string to check */ bool audit_string_contains_control(const char *string, size_t len) { const unsigned char *p; for (p = string; p < (const unsigned char *)string + len; p++) { if (*p == '"' || *p < 0x21 || *p > 0x7e) return true; } return false; } /** * audit_log_n_untrustedstring - log a string that may contain random characters * @ab: audit_buffer * @string: string to be logged * @len: length of string (not including trailing null) * * This code will escape a string that is passed to it if the string * contains a control character, unprintable character, double quote mark, * or a space. Unescaped strings will start and end with a double quote mark. * Strings that are escaped are printed in hex (2 digits per char). * * The caller specifies the number of characters in the string to log, which may * or may not be the entire string. */ void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string, size_t len) { if (audit_string_contains_control(string, len)) audit_log_n_hex(ab, string, len); else audit_log_n_string(ab, string, len); } /** * audit_log_untrustedstring - log a string that may contain random characters * @ab: audit_buffer * @string: string to be logged * * Same as audit_log_n_untrustedstring(), except that strlen is used to * determine string length. */ void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) { audit_log_n_untrustedstring(ab, string, strlen(string)); } /* This is a helper-function to print the escaped d_path */ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, const struct path *path) { char *p, *pathname; if (prefix) audit_log_format(ab, "%s", prefix); /* We will allow 11 spaces for ' (deleted)' to be appended */ pathname = kmalloc(PATH_MAX+11, ab->gfp_mask); if (!pathname) { audit_log_format(ab, "\"<no_memory>\""); return; } p = d_path(path, pathname, PATH_MAX+11); if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ /* FIXME: can we save some information here? */ audit_log_format(ab, "\"<too_long>\""); } else audit_log_untrustedstring(ab, p); kfree(pathname); } void audit_log_session_info(struct audit_buffer *ab) { unsigned int sessionid = audit_get_sessionid(current); uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); audit_log_format(ab, "auid=%u ses=%u", auid, sessionid); } void audit_log_key(struct audit_buffer *ab, char *key) { audit_log_format(ab, " key="); if (key) audit_log_untrustedstring(ab, key); else audit_log_format(ab, "(null)"); } int audit_log_task_context(struct audit_buffer *ab) { struct lsm_prop prop; struct lsm_context ctx; int error; security_current_getlsmprop_subj(&prop); if (!lsmprop_is_set(&prop)) return 0; error = security_lsmprop_to_secctx(&prop, &ctx); if (error < 0) { if (error != -EINVAL) goto error_path; return 0; } audit_log_format(ab, " subj=%s", ctx.context); security_release_secctx(&ctx); return 0; error_path: audit_panic("error in audit_log_task_context"); return error; } EXPORT_SYMBOL(audit_log_task_context); void audit_log_d_path_exe(struct audit_buffer *ab, struct mm_struct *mm) { struct file *exe_file; if (!mm) goto out_null; exe_file = get_mm_exe_file(mm); if (!exe_file) goto out_null; audit_log_d_path(ab, " exe=", &exe_file->f_path); fput(exe_file); return; out_null: audit_log_format(ab, " exe=(null)"); } struct tty_struct *audit_get_tty(void) { struct tty_struct *tty = NULL; unsigned long flags; spin_lock_irqsave(&current->sighand->siglock, flags); if (current->signal) tty = tty_kref_get(current->signal->tty); spin_unlock_irqrestore(&current->sighand->siglock, flags); return tty; } void audit_put_tty(struct tty_struct *tty) { tty_kref_put(tty); } void audit_log_task_info(struct audit_buffer *ab) { const struct cred *cred; char comm[sizeof(current->comm)]; struct tty_struct *tty; if (!ab) return; cred = current_cred(); tty = audit_get_tty(); audit_log_format(ab, " ppid=%d pid=%d auid=%u uid=%u gid=%u" " euid=%u suid=%u fsuid=%u" " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", task_ppid_nr(current), task_tgid_nr(current), from_kuid(&init_user_ns, audit_get_loginuid(current)), from_kuid(&init_user_ns, cred->uid), from_kgid(&init_user_ns, cred->gid), from_kuid(&init_user_ns, cred->euid), from_kuid(&init_user_ns, cred->suid), from_kuid(&init_user_ns, cred->fsuid), from_kgid(&init_user_ns, cred->egid), from_kgid(&init_user_ns, cred->sgid), from_kgid(&init_user_ns, cred->fsgid), tty ? tty_name(tty) : "(none)", audit_get_sessionid(current)); audit_put_tty(tty); audit_log_format(ab, " comm="); audit_log_untrustedstring(ab, get_task_comm(comm, current)); audit_log_d_path_exe(ab, current->mm); audit_log_task_context(ab); } EXPORT_SYMBOL(audit_log_task_info); /** * audit_log_path_denied - report a path restriction denial * @type: audit message type (AUDIT_ANOM_LINK, AUDIT_ANOM_CREAT, etc) * @operation: specific operation name */ void audit_log_path_denied(int type, const char *operation) { struct audit_buffer *ab; if (!audit_enabled) return; /* Generate log with subject, operation, outcome. */ ab = audit_log_start(audit_context(), GFP_KERNEL, type); if (!ab) return; audit_log_format(ab, "op=%s", operation); audit_log_task_info(ab); audit_log_format(ab, " res=0"); audit_log_end(ab); } /* global counter which is incremented every time something logs in */ static atomic_t session_id = ATOMIC_INIT(0); static int audit_set_loginuid_perm(kuid_t loginuid) { /* if we are unset, we don't need privs */ if (!audit_loginuid_set(current)) return 0; /* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/ if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE)) return -EPERM; /* it is set, you need permission */ if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; /* reject if this is not an unset and we don't allow that */ if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID) && uid_valid(loginuid)) return -EPERM; return 0; } static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, unsigned int oldsessionid, unsigned int sessionid, int rc) { struct audit_buffer *ab; uid_t uid, oldloginuid, loginuid; struct tty_struct *tty; if (!audit_enabled) return; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; uid = from_kuid(&init_user_ns, task_uid(current)); oldloginuid = from_kuid(&init_user_ns, koldloginuid); loginuid = from_kuid(&init_user_ns, kloginuid); tty = audit_get_tty(); audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid); audit_log_task_context(ab); audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d", oldloginuid, loginuid, tty ? tty_name(tty) : "(none)", oldsessionid, sessionid, !rc); audit_put_tty(tty); audit_log_end(ab); } /** * audit_set_loginuid - set current task's loginuid * @loginuid: loginuid value * * Returns 0. * * Called (set) from fs/proc/base.c::proc_loginuid_write(). */ int audit_set_loginuid(kuid_t loginuid) { unsigned int oldsessionid, sessionid = AUDIT_SID_UNSET; kuid_t oldloginuid; int rc; oldloginuid = audit_get_loginuid(current); oldsessionid = audit_get_sessionid(current); rc = audit_set_loginuid_perm(loginuid); if (rc) goto out; /* are we setting or clearing? */ if (uid_valid(loginuid)) { sessionid = (unsigned int)atomic_inc_return(&session_id); if (unlikely(sessionid == AUDIT_SID_UNSET)) sessionid = (unsigned int)atomic_inc_return(&session_id); } current->sessionid = sessionid; current->loginuid = loginuid; out: audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc); return rc; } /** * audit_signal_info - record signal info for shutting down audit subsystem * @sig: signal value * @t: task being signaled * * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ int audit_signal_info(int sig, struct task_struct *t) { kuid_t uid = current_uid(), auid; if (auditd_test_task(t) && (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2)) { audit_sig_pid = task_tgid_nr(current); auid = audit_get_loginuid(current); if (uid_valid(auid)) audit_sig_uid = auid; else audit_sig_uid = uid; security_current_getlsmprop_subj(&audit_sig_lsm); } return audit_signal_info_syscall(t); } /** * audit_log_end - end one audit record * @ab: the audit_buffer * * We can not do a netlink send inside an irq context because it blocks (last * arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed on a * queue and a kthread is scheduled to remove them from the queue outside the * irq context. May be called in any context. */ void audit_log_end(struct audit_buffer *ab) { struct sk_buff *skb; struct nlmsghdr *nlh; if (!ab) return; if (audit_rate_check()) { skb = ab->skb; ab->skb = NULL; /* setup the netlink header, see the comments in * kauditd_send_multicast_skb() for length quirks */ nlh = nlmsg_hdr(skb); nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; /* queue the netlink packet and poke the kauditd thread */ skb_queue_tail(&audit_queue, skb); wake_up_interruptible(&kauditd_wait); } else audit_log_lost("rate limit exceeded"); audit_buffer_free(ab); } /** * audit_log - Log an audit record * @ctx: audit context * @gfp_mask: type of allocation * @type: audit message type * @fmt: format string to use * @...: variable parameters matching the format string * * This is a convenience function that calls audit_log_start, * audit_log_vformat, and audit_log_end. It may be called * in any context. */ void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { struct audit_buffer *ab; va_list args; ab = audit_log_start(ctx, gfp_mask, type); if (ab) { va_start(args, fmt); audit_log_vformat(ab, fmt, args); va_end(args); audit_log_end(ab); } } EXPORT_SYMBOL(audit_log_start); EXPORT_SYMBOL(audit_log_end); EXPORT_SYMBOL(audit_log_format); EXPORT_SYMBOL(audit_log);
2 1 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netfilter.h> #include <linux/rhashtable.h> #include <net/netfilter/nf_flow_table.h> #include <net/netfilter/nf_tables.h> #include <linux/if_vlan.h> static unsigned int nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct vlan_ethhdr *veth; __be16 proto; switch (skb->protocol) { case htons(ETH_P_8021Q): if (!pskb_may_pull(skb, skb_mac_offset(skb) + sizeof(*veth))) return NF_ACCEPT; veth = (struct vlan_ethhdr *)skb_mac_header(skb); proto = veth->h_vlan_encapsulated_proto; break; case htons(ETH_P_PPP_SES): if (!nf_flow_pppoe_proto(skb, &proto)) return NF_ACCEPT; break; default: proto = skb->protocol; break; } switch (proto) { case htons(ETH_P_IP): return nf_flow_offload_ip_hook(priv, skb, state); case htons(ETH_P_IPV6): return nf_flow_offload_ipv6_hook(priv, skb, state); } return NF_ACCEPT; } static int nf_flow_rule_route_inet(struct net *net, struct flow_offload *flow, enum flow_offload_tuple_dir dir, struct nf_flow_rule *flow_rule) { const struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; int err; switch (flow_tuple->l3proto) { case NFPROTO_IPV4: err = nf_flow_rule_route_ipv4(net, flow, dir, flow_rule); break; case NFPROTO_IPV6: err = nf_flow_rule_route_ipv6(net, flow, dir, flow_rule); break; default: err = -1; break; } return err; } static struct nf_flowtable_type flowtable_inet = { .family = NFPROTO_INET, .init = nf_flow_table_init, .setup = nf_flow_table_offload_setup, .action = nf_flow_rule_route_inet, .free = nf_flow_table_free, .hook = nf_flow_offload_inet_hook, .owner = THIS_MODULE, }; static struct nf_flowtable_type flowtable_ipv4 = { .family = NFPROTO_IPV4, .init = nf_flow_table_init, .setup = nf_flow_table_offload_setup, .action = nf_flow_rule_route_ipv4, .free = nf_flow_table_free, .hook = nf_flow_offload_ip_hook, .owner = THIS_MODULE, }; static struct nf_flowtable_type flowtable_ipv6 = { .family = NFPROTO_IPV6, .init = nf_flow_table_init, .setup = nf_flow_table_offload_setup, .action = nf_flow_rule_route_ipv6, .free = nf_flow_table_free, .hook = nf_flow_offload_ipv6_hook, .owner = THIS_MODULE, }; static int __init nf_flow_inet_module_init(void) { nft_register_flowtable_type(&flowtable_ipv4); nft_register_flowtable_type(&flowtable_ipv6); nft_register_flowtable_type(&flowtable_inet); return 0; } static void __exit nf_flow_inet_module_exit(void) { nft_unregister_flowtable_type(&flowtable_inet); nft_unregister_flowtable_type(&flowtable_ipv6); nft_unregister_flowtable_type(&flowtable_ipv4); } module_init(nf_flow_inet_module_init); module_exit(nf_flow_inet_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_ALIAS_NF_FLOWTABLE(AF_INET); MODULE_ALIAS_NF_FLOWTABLE(AF_INET6); MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */ MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
1018 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 // SPDX-License-Identifier: GPL-2.0 /* * High memory handling common code and variables. * * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de * * * Redesigned the x86 32-bit VM architecture to deal with * 64-bit physical space. With current x86 CPUs this * means up to 64 Gigabytes physical RAM. * * Rewrote high memory support to move the page cache into * high memory. Implemented permanent (schedulable) kmaps * based on Linus' idea. * * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> */ #include <linux/mm.h> #include <linux/export.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/pagemap.h> #include <linux/mempool.h> #include <linux/init.h> #include <linux/hash.h> #include <linux/highmem.h> #include <linux/kgdb.h> #include <asm/tlbflush.h> #include <linux/vmalloc.h> #ifdef CONFIG_KMAP_LOCAL static inline int kmap_local_calc_idx(int idx) { return idx + KM_MAX_IDX * smp_processor_id(); } #ifndef arch_kmap_local_map_idx #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) #endif #endif /* CONFIG_KMAP_LOCAL */ /* * Virtual_count is not a pure "count". * 0 means that it is not mapped, and has not been mapped * since a TLB flush - it is usable. * 1 means that there are no users, but it has been mapped * since the last TLB flush - so we can't use it. * n means that there are (n-1) current users of it. */ #ifdef CONFIG_HIGHMEM /* * Architecture with aliasing data cache may define the following family of * helper functions in its asm/highmem.h to control cache color of virtual * addresses where physical memory pages are mapped by kmap. */ #ifndef get_pkmap_color /* * Determine color of virtual address where the page should be mapped. */ static inline unsigned int get_pkmap_color(struct page *page) { return 0; } #define get_pkmap_color get_pkmap_color /* * Get next index for mapping inside PKMAP region for page with given color. */ static inline unsigned int get_next_pkmap_nr(unsigned int color) { static unsigned int last_pkmap_nr; last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK; return last_pkmap_nr; } /* * Determine if page index inside PKMAP region (pkmap_nr) of given color * has wrapped around PKMAP region end. When this happens an attempt to * flush all unused PKMAP slots is made. */ static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) { return pkmap_nr == 0; } /* * Get the number of PKMAP entries of the given color. If no free slot is * found after checking that many entries, kmap will sleep waiting for * someone to call kunmap and free PKMAP slot. */ static inline int get_pkmap_entries_count(unsigned int color) { return LAST_PKMAP; } /* * Get head of a wait queue for PKMAP entries of the given color. * Wait queues for different mapping colors should be independent to avoid * unnecessary wakeups caused by freeing of slots of other colors. */ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) { static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait); return &pkmap_map_wait; } #endif unsigned long __nr_free_highpages(void) { unsigned long pages = 0; struct zone *zone; for_each_populated_zone(zone) { if (is_highmem(zone)) pages += zone_page_state(zone, NR_FREE_PAGES); } return pages; } unsigned long __totalhigh_pages(void) { unsigned long pages = 0; struct zone *zone; for_each_populated_zone(zone) { if (is_highmem(zone)) pages += zone_managed_pages(zone); } return pages; } EXPORT_SYMBOL(__totalhigh_pages); static int pkmap_count[LAST_PKMAP]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); pte_t *pkmap_page_table; /* * Most architectures have no use for kmap_high_get(), so let's abstract * the disabling of IRQ out of the locking in that case to save on a * potential useless overhead. */ #ifdef ARCH_NEEDS_KMAP_HIGH_GET #define lock_kmap() spin_lock_irq(&kmap_lock) #define unlock_kmap() spin_unlock_irq(&kmap_lock) #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags) #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags) #else #define lock_kmap() spin_lock(&kmap_lock) #define unlock_kmap() spin_unlock(&kmap_lock) #define lock_kmap_any(flags) \ do { spin_lock(&kmap_lock); (void)(flags); } while (0) #define unlock_kmap_any(flags) \ do { spin_unlock(&kmap_lock); (void)(flags); } while (0) #endif struct page *__kmap_to_page(void *vaddr) { unsigned long base = (unsigned long) vaddr & PAGE_MASK; struct kmap_ctrl *kctrl = &current->kmap_ctrl; unsigned long addr = (unsigned long)vaddr; int i; /* kmap() mappings */ if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP))) return pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(addr)])); /* kmap_local_page() mappings */ if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && base < __fix_to_virt(FIX_KMAP_BEGIN))) { for (i = 0; i < kctrl->idx; i++) { unsigned long base_addr; int idx; idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); if (base_addr == base) return pte_page(kctrl->pteval[i]); } } return virt_to_page(vaddr); } EXPORT_SYMBOL(__kmap_to_page); static void flush_all_zero_pkmaps(void) { int i; int need_flush = 0; flush_cache_kmaps(); for (i = 0; i < LAST_PKMAP; i++) { struct page *page; pte_t ptent; /* * zero means we don't have anything to do, * >1 means that it is still in use. Only * a count of 1 means that it is free but * needs to be unmapped */ if (pkmap_count[i] != 1) continue; pkmap_count[i] = 0; /* sanity check */ ptent = ptep_get(&pkmap_page_table[i]); BUG_ON(pte_none(ptent)); /* * Don't need an atomic fetch-and-clear op here; * no-one has the page mapped, and cannot get at * its virtual address (and hence PTE) without first * getting the kmap_lock (which is held here). * So no dangers, even with speculative execution. */ page = pte_page(ptent); pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); set_page_address(page, NULL); need_flush = 1; } if (need_flush) flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP)); } void __kmap_flush_unused(void) { lock_kmap(); flush_all_zero_pkmaps(); unlock_kmap(); } static inline unsigned long map_new_virtual(struct page *page) { unsigned long vaddr; int count; unsigned int last_pkmap_nr; unsigned int color = get_pkmap_color(page); start: count = get_pkmap_entries_count(color); /* Find an empty entry */ for (;;) { last_pkmap_nr = get_next_pkmap_nr(color); if (no_more_pkmaps(last_pkmap_nr, color)) { flush_all_zero_pkmaps(); count = get_pkmap_entries_count(color); } if (!pkmap_count[last_pkmap_nr]) break; /* Found a usable entry */ if (--count) continue; /* * Sleep for somebody else to unmap their entries */ { DECLARE_WAITQUEUE(wait, current); wait_queue_head_t *pkmap_map_wait = get_pkmap_wait_queue_head(color); __set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(pkmap_map_wait, &wait); unlock_kmap(); schedule(); remove_wait_queue(pkmap_map_wait, &wait); lock_kmap(); /* Somebody else might have mapped it while we slept */ if (page_address(page)) return (unsigned long)page_address(page); /* Re-start */ goto start; } } vaddr = PKMAP_ADDR(last_pkmap_nr); set_pte_at(&init_mm, vaddr, &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; set_page_address(page, (void *)vaddr); return vaddr; } /** * kmap_high - map a highmem page into memory * @page: &struct page to map * * Returns the page's virtual memory address. * * We cannot call this from interrupts, as it may block. */ void *kmap_high(struct page *page) { unsigned long vaddr; /* * For highmem pages, we can't trust "virtual" until * after we have the lock. */ lock_kmap(); vaddr = (unsigned long)page_address(page); if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); unlock_kmap(); return (void *) vaddr; } EXPORT_SYMBOL(kmap_high); #ifdef ARCH_NEEDS_KMAP_HIGH_GET /** * kmap_high_get - pin a highmem page into memory * @page: &struct page to pin * * Returns the page's current virtual memory address, or NULL if no mapping * exists. If and only if a non null address is returned then a * matching call to kunmap_high() is necessary. * * This can be called from any context. */ void *kmap_high_get(struct page *page) { unsigned long vaddr, flags; lock_kmap_any(flags); vaddr = (unsigned long)page_address(page); if (vaddr) { BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1); pkmap_count[PKMAP_NR(vaddr)]++; } unlock_kmap_any(flags); return (void *) vaddr; } #endif /** * kunmap_high - unmap a highmem page into memory * @page: &struct page to unmap * * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called * only from user context. */ void kunmap_high(struct page *page) { unsigned long vaddr; unsigned long nr; unsigned long flags; int need_wakeup; unsigned int color = get_pkmap_color(page); wait_queue_head_t *pkmap_map_wait; lock_kmap_any(flags); vaddr = (unsigned long)page_address(page); BUG_ON(!vaddr); nr = PKMAP_NR(vaddr); /* * A count must never go down to zero * without a TLB flush! */ need_wakeup = 0; switch (--pkmap_count[nr]) { case 0: BUG(); case 1: /* * Avoid an unnecessary wake_up() function call. * The common case is pkmap_count[] == 1, but * no waiters. * The tasks queued in the wait-queue are guarded * by both the lock in the wait-queue-head and by * the kmap_lock. As the kmap_lock is held here, * no need for the wait-queue-head's lock. Simply * test if the queue is empty. */ pkmap_map_wait = get_pkmap_wait_queue_head(color); need_wakeup = waitqueue_active(pkmap_map_wait); } unlock_kmap_any(flags); /* do wake-up, if needed, race-free outside of the spin lock */ if (need_wakeup) wake_up(pkmap_map_wait); } EXPORT_SYMBOL(kunmap_high); void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { unsigned int i; BUG_ON(end1 > page_size(page) || end2 > page_size(page)); if (start1 >= end1) start1 = end1 = 0; if (start2 >= end2) start2 = end2 = 0; for (i = 0; i < compound_nr(page); i++) { void *kaddr = NULL; if (start1 >= PAGE_SIZE) { start1 -= PAGE_SIZE; end1 -= PAGE_SIZE; } else { unsigned this_end = min_t(unsigned, end1, PAGE_SIZE); if (end1 > start1) { kaddr = kmap_local_page(page + i); memset(kaddr + start1, 0, this_end - start1); } end1 -= this_end; start1 = 0; } if (start2 >= PAGE_SIZE) { start2 -= PAGE_SIZE; end2 -= PAGE_SIZE; } else { unsigned this_end = min_t(unsigned, end2, PAGE_SIZE); if (end2 > start2) { if (!kaddr) kaddr = kmap_local_page(page + i); memset(kaddr + start2, 0, this_end - start2); } end2 -= this_end; start2 = 0; } if (kaddr) { kunmap_local(kaddr); flush_dcache_page(page + i); } if (!end1 && !end2) break; } BUG_ON((start1 | start2 | end1 | end2) != 0); } EXPORT_SYMBOL(zero_user_segments); #endif /* CONFIG_HIGHMEM */ #ifdef CONFIG_KMAP_LOCAL #include <asm/kmap_size.h> /* * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second * slot is unused which acts as a guard page */ #ifdef CONFIG_DEBUG_KMAP_LOCAL # define KM_INCR 2 #else # define KM_INCR 1 #endif static inline int kmap_local_idx_push(void) { WARN_ON_ONCE(in_hardirq() && !irqs_disabled()); current->kmap_ctrl.idx += KM_INCR; BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); return current->kmap_ctrl.idx - 1; } static inline int kmap_local_idx(void) { return current->kmap_ctrl.idx - 1; } static inline void kmap_local_idx_pop(void) { current->kmap_ctrl.idx -= KM_INCR; BUG_ON(current->kmap_ctrl.idx < 0); } #ifndef arch_kmap_local_post_map # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0) #endif #ifndef arch_kmap_local_pre_unmap # define arch_kmap_local_pre_unmap(vaddr) do { } while (0) #endif #ifndef arch_kmap_local_post_unmap # define arch_kmap_local_post_unmap(vaddr) do { } while (0) #endif #ifndef arch_kmap_local_unmap_idx #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) #endif #ifndef arch_kmap_local_high_get static inline void *arch_kmap_local_high_get(struct page *page) { return NULL; } #endif #ifndef arch_kmap_local_set_pte #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \ set_pte_at(mm, vaddr, ptep, ptev) #endif /* Unmap a local mapping which was obtained by kmap_high_get() */ static inline bool kmap_high_unmap_local(unsigned long vaddr) { #ifdef ARCH_NEEDS_KMAP_HIGH_GET if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)]))); return true; } #endif return false; } static pte_t *__kmap_pte; static pte_t *kmap_get_pte(unsigned long vaddr, int idx) { if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY)) /* * Set by the arch if __kmap_pte[-idx] does not produce * the correct entry. */ return virt_to_kpte(vaddr); if (!__kmap_pte) __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); return &__kmap_pte[-idx]; } void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) { pte_t pteval, *kmap_pte; unsigned long vaddr; int idx; /* * Disable migration so resulting virtual address is stable * across preemption. */ migrate_disable(); preempt_disable(); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_pte = kmap_get_pte(vaddr, idx); BUG_ON(!pte_none(ptep_get(kmap_pte))); pteval = pfn_pte(pfn, prot); arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); arch_kmap_local_post_map(vaddr, pteval); current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; preempt_enable(); return (void *)vaddr; } EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); void *__kmap_local_page_prot(struct page *page, pgprot_t prot) { void *kmap; /* * To broaden the usage of the actual kmap_local() machinery always map * pages when debugging is enabled and the architecture has no problems * with alias mappings. */ if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page)) return page_address(page); /* Try kmap_high_get() if architecture has it enabled */ kmap = arch_kmap_local_high_get(page); if (kmap) return kmap; return __kmap_local_pfn_prot(page_to_pfn(page), prot); } EXPORT_SYMBOL(__kmap_local_page_prot); void kunmap_local_indexed(const void *vaddr) { unsigned long addr = (unsigned long) vaddr & PAGE_MASK; pte_t *kmap_pte; int idx; if (addr < __fix_to_virt(FIX_KMAP_END) || addr > __fix_to_virt(FIX_KMAP_BEGIN)) { if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) { /* This _should_ never happen! See above. */ WARN_ON_ONCE(1); return; } /* * Handle mappings which were obtained by kmap_high_get() * first as the virtual address of such mappings is below * PAGE_OFFSET. Warn for all other addresses which are in * the user space part of the virtual address space. */ if (!kmap_high_unmap_local(addr)) WARN_ON_ONCE(addr < PAGE_OFFSET); return; } preempt_disable(); idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); kmap_local_idx_pop(); preempt_enable(); migrate_enable(); } EXPORT_SYMBOL(kunmap_local_indexed); /* * Invoked before switch_to(). This is safe even when during or after * clearing the maps an interrupt which needs a kmap_local happens because * the task::kmap_ctrl.idx is not modified by the unmapping code so a * nested kmap_local will use the next unused index and restore the index * on unmap. The already cleared kmaps of the outgoing task are irrelevant * because the interrupt context does not know about them. The same applies * when scheduling back in for an interrupt which happens before the * restore is complete. */ void __kmap_local_sched_out(void) { struct task_struct *tsk = current; pte_t *kmap_pte; int i; /* Clear kmaps */ for (i = 0; i < tsk->kmap_ctrl.idx; i++) { pte_t pteval = tsk->kmap_ctrl.pteval[i]; unsigned long addr; int idx; /* With debug all even slots are unmapped and act as guard */ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { WARN_ON_ONCE(pte_val(pteval) != 0); continue; } if (WARN_ON_ONCE(pte_none(pteval))) continue; /* * This is a horrible hack for XTENSA to calculate the * coloured PTE index. Uses the PFN encoded into the pteval * and the map index calculation because the actual mapped * virtual address is not stored in task::kmap_ctrl. * For any sane architecture this is optimized out. */ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); } } void __kmap_local_sched_in(void) { struct task_struct *tsk = current; pte_t *kmap_pte; int i; /* Restore kmaps */ for (i = 0; i < tsk->kmap_ctrl.idx; i++) { pte_t pteval = tsk->kmap_ctrl.pteval[i]; unsigned long addr; int idx; /* With debug all even slots are unmapped and act as guard */ if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) { WARN_ON_ONCE(pte_val(pteval) != 0); continue; } if (WARN_ON_ONCE(pte_none(pteval))) continue; /* See comment in __kmap_local_sched_out() */ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_pte = kmap_get_pte(addr, idx); set_pte_at(&init_mm, addr, kmap_pte, pteval); arch_kmap_local_post_map(addr, pteval); } } void kmap_local_fork(struct task_struct *tsk) { if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); } #endif #if defined(HASHED_PAGE_VIRTUAL) #define PA_HASH_ORDER 7 /* * Describes one page->virtual association */ struct page_address_map { struct page *page; void *virtual; struct list_head list; }; static struct page_address_map page_address_maps[LAST_PKMAP]; /* * Hash table bucket */ static struct page_address_slot { struct list_head lh; /* List of page_address_maps */ spinlock_t lock; /* Protect this bucket's list */ } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER]; static struct page_address_slot *page_slot(const struct page *page) { return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)]; } /** * page_address - get the mapped virtual address of a page * @page: &struct page to get the virtual address of * * Returns the page's virtual address. */ void *page_address(const struct page *page) { unsigned long flags; void *ret; struct page_address_slot *pas; if (!PageHighMem(page)) return lowmem_page_address(page); pas = page_slot(page); ret = NULL; spin_lock_irqsave(&pas->lock, flags); if (!list_empty(&pas->lh)) { struct page_address_map *pam; list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { ret = pam->virtual; break; } } } spin_unlock_irqrestore(&pas->lock, flags); return ret; } EXPORT_SYMBOL(page_address); /** * set_page_address - set a page's virtual address * @page: &struct page to set * @virtual: virtual address to use */ void set_page_address(struct page *page, void *virtual) { unsigned long flags; struct page_address_slot *pas; struct page_address_map *pam; BUG_ON(!PageHighMem(page)); pas = page_slot(page); if (virtual) { /* Add */ pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)]; pam->page = page; pam->virtual = virtual; spin_lock_irqsave(&pas->lock, flags); list_add_tail(&pam->list, &pas->lh); spin_unlock_irqrestore(&pas->lock, flags); } else { /* Remove */ spin_lock_irqsave(&pas->lock, flags); list_for_each_entry(pam, &pas->lh, list) { if (pam->page == page) { list_del(&pam->list); break; } } spin_unlock_irqrestore(&pas->lock, flags); } } void __init page_address_init(void) { int i; for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) { INIT_LIST_HEAD(&page_address_htable[i].lh); spin_lock_init(&page_address_htable[i].lock); } } #endif /* defined(HASHED_PAGE_VIRTUAL) */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 /* SPDX-License-Identifier: GPL-2.0 */ /* taskstats_kern.h - kernel header for per-task statistics interface * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 * (C) Balbir Singh, IBM Corp. 2006 */ #ifndef _LINUX_TASKSTATS_KERN_H #define _LINUX_TASKSTATS_KERN_H #include <linux/taskstats.h> #include <linux/sched/signal.h> #include <linux/slab.h> #ifdef CONFIG_TASKSTATS extern struct kmem_cache *taskstats_cache; extern struct mutex taskstats_exit_mutex; static inline void taskstats_tgid_free(struct signal_struct *sig) { if (sig->stats) kmem_cache_free(taskstats_cache, sig->stats); } extern void taskstats_exit(struct task_struct *, int group_dead); extern void taskstats_init_early(void); #else static inline void taskstats_exit(struct task_struct *tsk, int group_dead) {} static inline void taskstats_tgid_free(struct signal_struct *sig) {} static inline void taskstats_init_early(void) {} #endif /* CONFIG_TASKSTATS */ #endif
37 37 37 37 9 21 21 21 6 24 22 2 6 3 3 6 5 2 3 5 19 19 19 19 7 4 7 7 25 25 25 25 24 9 25 23 23 22 21 19 19 18 1 1 5 3 3 3 1 2 6 6 5 5 5 2 3 3 3 3 3 3 3 5 4 9 8 8 31 33 33 23 6 31 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 // SPDX-License-Identifier: GPL-2.0-only /* * drivers/dma-buf/sync_file.c * * Copyright (C) 2012 Google, Inc. */ #include <linux/dma-fence-unwrap.h> #include <linux/export.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/anon_inodes.h> #include <linux/sync_file.h> #include <uapi/linux/sync_file.h> static const struct file_operations sync_file_fops; static struct sync_file *sync_file_alloc(void) { struct sync_file *sync_file; sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL); if (!sync_file) return NULL; sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops, sync_file, 0); if (IS_ERR(sync_file->file)) goto err; init_waitqueue_head(&sync_file->wq); INIT_LIST_HEAD(&sync_file->cb.node); return sync_file; err: kfree(sync_file); return NULL; } static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { struct sync_file *sync_file; sync_file = container_of(cb, struct sync_file, cb); wake_up_all(&sync_file->wq); } /** * sync_file_create() - creates a sync file * @fence: fence to add to the sync_fence * * Creates a sync_file containg @fence. This function acquires and additional * reference of @fence for the newly-created &sync_file, if it succeeds. The * sync_file can be released with fput(sync_file->file). Returns the * sync_file or NULL in case of error. */ struct sync_file *sync_file_create(struct dma_fence *fence) { struct sync_file *sync_file; sync_file = sync_file_alloc(); if (!sync_file) return NULL; sync_file->fence = dma_fence_get(fence); return sync_file; } EXPORT_SYMBOL(sync_file_create); static struct sync_file *sync_file_fdget(int fd) { struct file *file = fget(fd); if (!file) return NULL; if (file->f_op != &sync_file_fops) goto err; return file->private_data; err: fput(file); return NULL; } /** * sync_file_get_fence - get the fence related to the sync_file fd * @fd: sync_file fd to get the fence from * * Ensures @fd references a valid sync_file and returns a fence that * represents all fence in the sync_file. On error NULL is returned. */ struct dma_fence *sync_file_get_fence(int fd) { struct sync_file *sync_file; struct dma_fence *fence; sync_file = sync_file_fdget(fd); if (!sync_file) return NULL; fence = dma_fence_get(sync_file->fence); fput(sync_file->file); return fence; } EXPORT_SYMBOL(sync_file_get_fence); /** * sync_file_get_name - get the name of the sync_file * @sync_file: sync_file to get the fence from * @buf: destination buffer to copy sync_file name into * @len: available size of destination buffer. * * Each sync_file may have a name assigned either by the user (when merging * sync_files together) or created from the fence it contains. In the latter * case construction of the name is deferred until use, and so requires * sync_file_get_name(). * * Returns: a string representing the name. */ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) { if (sync_file->user_name[0]) { strscpy(buf, sync_file->user_name, len); } else { struct dma_fence *fence = sync_file->fence; snprintf(buf, len, "%s-%s%llu-%lld", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno); } return buf; } /** * sync_file_merge() - merge two sync_files * @name: name of new fence * @a: sync_file a * @b: sync_file b * * Creates a new sync_file which contains copies of all the fences in both * @a and @b. @a and @b remain valid, independent sync_file. Returns the * new merged sync_file or NULL in case of error. */ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; struct dma_fence *fence; sync_file = sync_file_alloc(); if (!sync_file) return NULL; fence = dma_fence_unwrap_merge(a->fence, b->fence); if (!fence) { fput(sync_file->file); return NULL; } sync_file->fence = fence; strscpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; } static int sync_file_release(struct inode *inode, struct file *file) { struct sync_file *sync_file = file->private_data; if (test_bit(POLL_ENABLED, &sync_file->flags)) dma_fence_remove_callback(sync_file->fence, &sync_file->cb); dma_fence_put(sync_file->fence); kfree(sync_file); return 0; } static __poll_t sync_file_poll(struct file *file, poll_table *wait) { struct sync_file *sync_file = file->private_data; poll_wait(file, &sync_file->wq, wait); if (list_empty(&sync_file->cb.node) && !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) { if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, fence_check_cb_func) < 0) wake_up_all(&sync_file->wq); } return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0; } static long sync_file_ioctl_merge(struct sync_file *sync_file, unsigned long arg) { int fd = get_unused_fd_flags(O_CLOEXEC); int err; struct sync_file *fence2, *fence3; struct sync_merge_data data; if (fd < 0) return fd; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { err = -EFAULT; goto err_put_fd; } if (data.flags || data.pad) { err = -EINVAL; goto err_put_fd; } fence2 = sync_file_fdget(data.fd2); if (!fence2) { err = -ENOENT; goto err_put_fd; } data.name[sizeof(data.name) - 1] = '\0'; fence3 = sync_file_merge(data.name, sync_file, fence2); if (!fence3) { err = -ENOMEM; goto err_put_fence2; } data.fence = fd; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { err = -EFAULT; goto err_put_fence3; } fd_install(fd, fence3->file); fput(fence2->file); return 0; err_put_fence3: fput(fence3->file); err_put_fence2: fput(fence2->file); err_put_fd: put_unused_fd(fd); return err; } static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { strscpy(info->obj_name, fence->ops->get_timeline_name(fence), sizeof(info->obj_name)); strscpy(info->driver_name, fence->ops->get_driver_name(fence), sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); info->timestamp_ns = dma_fence_is_signaled(fence) ? ktime_to_ns(dma_fence_timestamp(fence)) : ktime_set(0, 0); return info->status; } static long sync_file_ioctl_fence_info(struct sync_file *sync_file, unsigned long arg) { struct sync_fence_info *fence_info = NULL; struct dma_fence_unwrap iter; struct sync_file_info info; unsigned int num_fences; struct dma_fence *fence; int ret; __u32 size; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) return -EFAULT; if (info.flags || info.pad) return -EINVAL; num_fences = 0; dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) ++num_fences; /* * Passing num_fences = 0 means that userspace doesn't want to * retrieve any sync_fence_info. If num_fences = 0 we skip filling * sync_fence_info and return the actual number of fences on * info->num_fences. */ if (!info.num_fences) { info.status = dma_fence_get_status(sync_file->fence); goto no_fences; } else { info.status = 1; } if (info.num_fences < num_fences) return -EINVAL; size = num_fences * sizeof(*fence_info); fence_info = kzalloc(size, GFP_KERNEL); if (!fence_info) return -ENOMEM; num_fences = 0; dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) { int status; status = sync_fill_fence_info(fence, &fence_info[num_fences++]); info.status = info.status <= 0 ? info.status : status; } if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, size)) { ret = -EFAULT; goto out; } no_fences: sync_file_get_name(sync_file, info.name, sizeof(info.name)); info.num_fences = num_fences; if (copy_to_user((void __user *)arg, &info, sizeof(info))) ret = -EFAULT; else ret = 0; out: kfree(fence_info); return ret; } static int sync_file_ioctl_set_deadline(struct sync_file *sync_file, unsigned long arg) { struct sync_set_deadline ts; if (copy_from_user(&ts, (void __user *)arg, sizeof(ts))) return -EFAULT; if (ts.pad) return -EINVAL; dma_fence_set_deadline(sync_file->fence, ns_to_ktime(ts.deadline_ns)); return 0; } static long sync_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sync_file *sync_file = file->private_data; switch (cmd) { case SYNC_IOC_MERGE: return sync_file_ioctl_merge(sync_file, arg); case SYNC_IOC_FILE_INFO: return sync_file_ioctl_fence_info(sync_file, arg); case SYNC_IOC_SET_DEADLINE: return sync_file_ioctl_set_deadline(sync_file, arg); default: return -ENOTTY; } } static const struct file_operations sync_file_fops = { .release = sync_file_release, .poll = sync_file_poll, .unlocked_ioctl = sync_file_ioctl, .compat_ioctl = compat_ptr_ioctl, };
88 88 88 104 104 104 87 104 9 16 14 103 103 104 104 104 104 104 83 104 44 103 43 43 43 104 104 104 104 104 104 104 93 94 31 91 91 86 15 86 86 80 73 73 84 84 84 84 84 102 84 84 104 104 104 104 104 104 104 104 102 102 103 103 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 104 102 104 104 104 100 103 103 103 102 98 103 93 94 102 102 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 // SPDX-License-Identifier: GPL-2.0-or-later /* * 842 Software Compression * * Copyright (C) 2015 Dan Streetman, IBM Corp * * See 842.h for details of the 842 compressed format. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "842_compress" #include <linux/hashtable.h> #include "842.h" #include "842_debugfs.h" #define SW842_HASHTABLE8_BITS (10) #define SW842_HASHTABLE4_BITS (11) #define SW842_HASHTABLE2_BITS (10) /* By default, we allow compressing input buffers of any length, but we must * use the non-standard "short data" template so the decompressor can correctly * reproduce the uncompressed data buffer at the right length. However the * hardware 842 compressor will not recognize the "short data" template, and * will fail to decompress any compressed buffer containing it (I have no idea * why anyone would want to use software to compress and hardware to decompress * but that's beside the point). This parameter forces the compression * function to simply reject any input buffer that isn't a multiple of 8 bytes * long, instead of using the "short data" template, so that all compressed * buffers produced by this function will be decompressable by the 842 hardware * decompressor. Unless you have a specific need for that, leave this disabled * so that any length buffer can be compressed. */ static bool sw842_strict; module_param_named(strict, sw842_strict, bool, 0644); static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */ { I8, N0, N0, N0, 0x19 }, /* 8 */ { I4, I4, N0, N0, 0x18 }, /* 18 */ { I4, I2, I2, N0, 0x17 }, /* 25 */ { I2, I2, I4, N0, 0x13 }, /* 25 */ { I2, I2, I2, I2, 0x12 }, /* 32 */ { I4, I2, D2, N0, 0x16 }, /* 33 */ { I4, D2, I2, N0, 0x15 }, /* 33 */ { I2, D2, I4, N0, 0x0e }, /* 33 */ { D2, I2, I4, N0, 0x09 }, /* 33 */ { I2, I2, I2, D2, 0x11 }, /* 40 */ { I2, I2, D2, I2, 0x10 }, /* 40 */ { I2, D2, I2, I2, 0x0d }, /* 40 */ { D2, I2, I2, I2, 0x08 }, /* 40 */ { I4, D4, N0, N0, 0x14 }, /* 41 */ { D4, I4, N0, N0, 0x04 }, /* 41 */ { I2, I2, D4, N0, 0x0f }, /* 48 */ { I2, D2, I2, D2, 0x0c }, /* 48 */ { I2, D4, I2, N0, 0x0b }, /* 48 */ { D2, I2, I2, D2, 0x07 }, /* 48 */ { D2, I2, D2, I2, 0x06 }, /* 48 */ { D4, I2, I2, N0, 0x03 }, /* 48 */ { I2, D2, D4, N0, 0x0a }, /* 56 */ { D2, I2, D4, N0, 0x05 }, /* 56 */ { D4, I2, D2, N0, 0x02 }, /* 56 */ { D4, D2, I2, N0, 0x01 }, /* 56 */ { D8, N0, N0, N0, 0x00 }, /* 64 */ }; struct sw842_hlist_node8 { struct hlist_node node; u64 data; u8 index; }; struct sw842_hlist_node4 { struct hlist_node node; u32 data; u16 index; }; struct sw842_hlist_node2 { struct hlist_node node; u16 data; u8 index; }; #define INDEX_NOT_FOUND (-1) #define INDEX_NOT_CHECKED (-2) struct sw842_param { u8 *in; u8 *instart; u64 ilen; u8 *out; u64 olen; u8 bit; u64 data8[1]; u32 data4[2]; u16 data2[4]; int index8[1]; int index4[2]; int index2[4]; DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS); DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS); DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS); struct sw842_hlist_node8 node8[1 << I8_BITS]; struct sw842_hlist_node4 node4[1 << I4_BITS]; struct sw842_hlist_node2 node2[1 << I2_BITS]; }; #define get_input_data(p, o, b) \ be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o)))) #define init_hashtable_nodes(p, b) do { \ int _i; \ hash_init((p)->htable##b); \ for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \ (p)->node##b[_i].index = _i; \ (p)->node##b[_i].data = 0; \ INIT_HLIST_NODE(&(p)->node##b[_i].node); \ } \ } while (0) #define find_index(p, b, n) ({ \ struct sw842_hlist_node##b *_n; \ p->index##b[n] = INDEX_NOT_FOUND; \ hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ if (p->data##b[n] == _n->data) { \ p->index##b[n] = _n->index; \ break; \ } \ } \ p->index##b[n] >= 0; \ }) #define check_index(p, b, n) \ ((p)->index##b[n] == INDEX_NOT_CHECKED \ ? find_index(p, b, n) \ : (p)->index##b[n] >= 0) #define replace_hash(p, b, i, d) do { \ struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ hash_del(&_n->node); \ _n->data = (p)->data##b[d]; \ pr_debug("add hash index%x %x pos %x data %lx\n", b, \ (unsigned int)_n->index, \ (unsigned int)((p)->in - (p)->instart), \ (unsigned long)_n->data); \ hash_add((p)->htable##b, &_n->node, _n->data); \ } while (0) static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; static int add_bits(struct sw842_param *p, u64 d, u8 n); static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) { int ret; if (n <= s) return -EINVAL; ret = add_bits(p, d >> s, n - s); if (ret) return ret; return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); } static int add_bits(struct sw842_param *p, u64 d, u8 n) { int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; u64 o; u8 *out = p->out; pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); if (n > 64) return -EINVAL; /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0), * or if we're at the end of the output buffer and would write past end */ if (bits > 64) return __split_add_bits(p, d, n, 32); else if (p->olen < 8 && bits > 32 && bits <= 56) return __split_add_bits(p, d, n, 16); else if (p->olen < 4 && bits > 16 && bits <= 24) return __split_add_bits(p, d, n, 8); if (DIV_ROUND_UP(bits, 8) > p->olen) return -ENOSPC; o = *out & bmask[b]; d <<= s; if (bits <= 8) *out = o | d; else if (bits <= 16) put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out); else if (bits <= 24) put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out); else if (bits <= 32) put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out); else if (bits <= 40) put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out); else if (bits <= 48) put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out); else if (bits <= 56) put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out); else put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out); p->bit += n; if (p->bit > 7) { p->out += p->bit / 8; p->olen -= p->bit / 8; p->bit %= 8; } return 0; } static int add_template(struct sw842_param *p, u8 c) { int ret, i, b = 0; u8 *t = comp_ops[c]; bool inv = false; if (c >= OPS_MAX) return -EINVAL; pr_debug("template %x\n", t[4]); ret = add_bits(p, t[4], OP_BITS); if (ret) return ret; for (i = 0; i < 4; i++) { pr_debug("op %x\n", t[i]); switch (t[i] & OP_AMOUNT) { case OP_AMOUNT_8: if (b) inv = true; else if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index8[0], I8_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data8[0], 64); else inv = true; break; case OP_AMOUNT_4: if (b == 2 && t[i] & OP_ACTION_DATA) ret = add_bits(p, get_input_data(p, 2, 32), 32); else if (b != 0 && b != 4) inv = true; else if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index4[b >> 2], I4_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data4[b >> 2], 32); else inv = true; break; case OP_AMOUNT_2: if (b != 0 && b != 2 && b != 4 && b != 6) inv = true; if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index2[b >> 1], I2_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data2[b >> 1], 16); else inv = true; break; case OP_AMOUNT_0: inv = (b != 8) || !(t[i] & OP_ACTION_NOOP); break; default: inv = true; break; } if (ret) return ret; if (inv) { pr_err("Invalid templ %x op %d : %x %x %x %x\n", c, i, t[0], t[1], t[2], t[3]); return -EINVAL; } b += t[i] & OP_AMOUNT; } if (b != 8) { pr_err("Invalid template %x len %x : %x %x %x %x\n", c, b, t[0], t[1], t[2], t[3]); return -EINVAL; } if (sw842_template_counts) atomic_inc(&template_count[t[4]]); return 0; } static int add_repeat_template(struct sw842_param *p, u8 r) { int ret; /* repeat param is 0-based */ if (!r || --r > REPEAT_BITS_MAX) return -EINVAL; ret = add_bits(p, OP_REPEAT, OP_BITS); if (ret) return ret; ret = add_bits(p, r, REPEAT_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_repeat_count); return 0; } static int add_short_data_template(struct sw842_param *p, u8 b) { int ret, i; if (!b || b > SHORT_DATA_BITS_MAX) return -EINVAL; ret = add_bits(p, OP_SHORT_DATA, OP_BITS); if (ret) return ret; ret = add_bits(p, b, SHORT_DATA_BITS); if (ret) return ret; for (i = 0; i < b; i++) { ret = add_bits(p, p->in[i], 8); if (ret) return ret; } if (sw842_template_counts) atomic_inc(&template_short_data_count); return 0; } static int add_zeros_template(struct sw842_param *p) { int ret = add_bits(p, OP_ZEROS, OP_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_zeros_count); return 0; } static int add_end_template(struct sw842_param *p) { int ret = add_bits(p, OP_END, OP_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_end_count); return 0; } static bool check_template(struct sw842_param *p, u8 c) { u8 *t = comp_ops[c]; int i, match, b = 0; if (c >= OPS_MAX) return false; for (i = 0; i < 4; i++) { if (t[i] & OP_ACTION_INDEX) { if (t[i] & OP_AMOUNT_2) match = check_index(p, 2, b >> 1); else if (t[i] & OP_AMOUNT_4) match = check_index(p, 4, b >> 2); else if (t[i] & OP_AMOUNT_8) match = check_index(p, 8, 0); else return false; if (!match) return false; } b += t[i] & OP_AMOUNT; } return true; } static void get_next_data(struct sw842_param *p) { p->data8[0] = get_input_data(p, 0, 64); p->data4[0] = get_input_data(p, 0, 32); p->data4[1] = get_input_data(p, 4, 32); p->data2[0] = get_input_data(p, 0, 16); p->data2[1] = get_input_data(p, 2, 16); p->data2[2] = get_input_data(p, 4, 16); p->data2[3] = get_input_data(p, 6, 16); } /* update the hashtable entries. * only call this after finding/adding the current template * the dataN fields for the current 8 byte block must be already updated */ static void update_hashtables(struct sw842_param *p) { u64 pos = p->in - p->instart; u64 n8 = (pos >> 3) % (1 << I8_BITS); u64 n4 = (pos >> 2) % (1 << I4_BITS); u64 n2 = (pos >> 1) % (1 << I2_BITS); replace_hash(p, 8, n8, 0); replace_hash(p, 4, n4, 0); replace_hash(p, 4, n4, 1); replace_hash(p, 2, n2, 0); replace_hash(p, 2, n2, 1); replace_hash(p, 2, n2, 2); replace_hash(p, 2, n2, 3); } /* find the next template to use, and add it * the p->dataN fields must already be set for the current 8 byte block */ static int process_next(struct sw842_param *p) { int ret, i; p->index8[0] = INDEX_NOT_CHECKED; p->index4[0] = INDEX_NOT_CHECKED; p->index4[1] = INDEX_NOT_CHECKED; p->index2[0] = INDEX_NOT_CHECKED; p->index2[1] = INDEX_NOT_CHECKED; p->index2[2] = INDEX_NOT_CHECKED; p->index2[3] = INDEX_NOT_CHECKED; /* check up to OPS_MAX - 1; last op is our fallback */ for (i = 0; i < OPS_MAX - 1; i++) { if (check_template(p, i)) break; } ret = add_template(p, i); if (ret) return ret; return 0; } /** * sw842_compress * * Compress the uncompressed buffer of length @ilen at @in to the output buffer * @out, using no more than @olen bytes, using the 842 compression format. * * Returns: 0 on success, error on failure. The @olen parameter * will contain the number of output bytes written on success, or * 0 on error. */ int sw842_compress(const u8 *in, unsigned int ilen, u8 *out, unsigned int *olen, void *wmem) { struct sw842_param *p = (struct sw842_param *)wmem; int ret; u64 last, next, pad, total; u8 repeat_count = 0; u32 crc; BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); init_hashtable_nodes(p, 8); init_hashtable_nodes(p, 4); init_hashtable_nodes(p, 2); p->in = (u8 *)in; p->instart = p->in; p->ilen = ilen; p->out = out; p->olen = *olen; p->bit = 0; total = p->olen; *olen = 0; /* if using strict mode, we can only compress a multiple of 8 */ if (sw842_strict && (ilen % 8)) { pr_err("Using strict mode, can't compress len %d\n", ilen); return -EINVAL; } /* let's compress at least 8 bytes, mkay? */ if (unlikely(ilen < 8)) goto skip_comp; /* make initial 'last' different so we don't match the first time */ last = ~get_unaligned((u64 *)p->in); while (p->ilen > 7) { next = get_unaligned((u64 *)p->in); /* must get the next data, as we need to update the hashtable * entries with the new data every time */ get_next_data(p); /* we don't care about endianness in last or next; * we're just comparing 8 bytes to another 8 bytes, * they're both the same endianness */ if (next == last) { /* repeat count bits are 0-based, so we stop at +1 */ if (++repeat_count <= REPEAT_BITS_MAX) goto repeat; } if (repeat_count) { ret = add_repeat_template(p, repeat_count); if (ret) return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; } if (next == 0) ret = add_zeros_template(p); else ret = process_next(p); if (ret) return ret; repeat: last = next; update_hashtables(p); p->in += 8; p->ilen -= 8; } if (repeat_count) { ret = add_repeat_template(p, repeat_count); if (ret) return ret; } skip_comp: if (p->ilen > 0) { ret = add_short_data_template(p, p->ilen); if (ret) return ret; p->in += p->ilen; p->ilen = 0; } ret = add_end_template(p); if (ret) return ret; /* * crc(0:31) is appended to target data starting with the next * bit after End of stream template. * nx842 calculates CRC for data in big-endian format. So doing * same here so that sw842 decompression can be used for both * compressed data. */ crc = crc32_be(0, in, ilen); ret = add_bits(p, crc, CRC_BITS); if (ret) return ret; if (p->bit) { p->out++; p->olen--; p->bit = 0; } /* pad compressed length to multiple of 8 */ pad = (8 - ((total - p->olen) % 8)) % 8; if (pad) { if (pad > p->olen) /* we were so close! */ return -ENOSPC; memset(p->out, 0, pad); p->out += pad; p->olen -= pad; } if (unlikely((total - p->olen) > UINT_MAX)) return -ENOSPC; *olen = total - p->olen; return 0; } EXPORT_SYMBOL_GPL(sw842_compress); static int __init sw842_init(void) { if (sw842_template_counts) sw842_debugfs_create(); return 0; } module_init(sw842_init); static void __exit sw842_exit(void) { if (sw842_template_counts) sw842_debugfs_remove(); } module_exit(sw842_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software 842 Compressor"); MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
1 3 7 12 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H #include <linux/device.h> #include <linux/dma-direction.h> #include <linux/init.h> #include <linux/types.h> #include <linux/limits.h> #include <linux/spinlock.h> #include <linux/workqueue.h> struct device; struct page; struct scatterlist; #define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ #define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */ /* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? * The complexity of {map,unmap}_single is linearly dependent on this value. */ #define IO_TLB_SEGSIZE 128 /* * log of the size of each IO TLB slab. The number of slabs is command line * controllable. */ #define IO_TLB_SHIFT 11 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT) /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20) unsigned long swiotlb_size_or_default(void); void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, int (*remap)(void *tlb, unsigned long nslabs)); int swiotlb_init_late(size_t size, gfp_t gfp_mask, int (*remap)(void *tlb, unsigned long nslabs)); extern void __init swiotlb_update_mem_attributes(void); #ifdef CONFIG_SWIOTLB /** * struct io_tlb_pool - IO TLB memory pool descriptor * @start: The start address of the swiotlb memory pool. Used to do a quick * range check to see if the memory was in fact allocated by this * API. * @end: The end address of the swiotlb memory pool. Used to do a quick * range check to see if the memory was in fact allocated by this * API. * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool * may be remapped in the memory encrypted case and store virtual * address for bounce buffer operation. * @nslabs: The number of IO TLB slots between @start and @end. For the * default swiotlb, this can be adjusted with a boot parameter, * see setup_io_tlb_npages(). * @late_alloc: %true if allocated using the page allocator. * @nareas: Number of areas in the pool. * @area_nslabs: Number of slots in each area. * @areas: Array of memory area descriptors. * @slots: Array of slot descriptors. * @node: Member of the IO TLB memory pool list. * @rcu: RCU head for swiotlb_dyn_free(). * @transient: %true if transient memory pool. */ struct io_tlb_pool { phys_addr_t start; phys_addr_t end; void *vaddr; unsigned long nslabs; bool late_alloc; unsigned int nareas; unsigned int area_nslabs; struct io_tlb_area *areas; struct io_tlb_slot *slots; #ifdef CONFIG_SWIOTLB_DYNAMIC struct list_head node; struct rcu_head rcu; bool transient; #endif }; /** * struct io_tlb_mem - Software IO TLB allocator * @defpool: Default (initial) IO TLB memory pool descriptor. * @pool: IO TLB memory pool descriptor (if not dynamic). * @nslabs: Total number of IO TLB slabs in all pools. * @debugfs: The dentry to debugfs. * @force_bounce: %true if swiotlb bouncing is forced * @for_alloc: %true if the pool is used for memory allocation * @can_grow: %true if more pools can be allocated dynamically. * @phys_limit: Maximum allowed physical address. * @lock: Lock to synchronize changes to the list. * @pools: List of IO TLB memory pool descriptors (if dynamic). * @dyn_alloc: Dynamic IO TLB pool allocation work. * @total_used: The total number of slots in the pool that are currently used * across all areas. Used only for calculating used_hiwater in * debugfs. * @used_hiwater: The high water mark for total_used. Used only for reporting * in debugfs. * @transient_nslabs: The total number of slots in all transient pools that * are currently used across all areas. */ struct io_tlb_mem { struct io_tlb_pool defpool; unsigned long nslabs; struct dentry *debugfs; bool force_bounce; bool for_alloc; #ifdef CONFIG_SWIOTLB_DYNAMIC bool can_grow; u64 phys_limit; spinlock_t lock; struct list_head pools; struct work_struct dyn_alloc; #endif #ifdef CONFIG_DEBUG_FS atomic_long_t total_used; atomic_long_t used_hiwater; atomic_long_t transient_nslabs; #endif }; struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr); /** * swiotlb_find_pool() - find swiotlb pool to which a physical address belongs * @dev: Device which has mapped the buffer. * @paddr: Physical address within the DMA buffer. * * Find the swiotlb pool that @paddr points into. * * Return: * * pool address if @paddr points into a bounce buffer * * NULL if @paddr does not point into a bounce buffer. As such, this function * can be used to determine if @paddr denotes a swiotlb bounce buffer. */ static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; if (!mem) return NULL; #ifdef CONFIG_SWIOTLB_DYNAMIC /* * All SWIOTLB buffer addresses must have been returned by * swiotlb_tbl_map_single() and passed to a device driver. * If a SWIOTLB address is checked on another CPU, then it was * presumably loaded by the device driver from an unspecified private * data structure. Make sure that this load is ordered before reading * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool(). * * This barrier pairs with smp_mb() in swiotlb_find_slots(). */ smp_rmb(); if (READ_ONCE(dev->dma_uses_io_tlb)) return __swiotlb_find_pool(dev, paddr); #else if (paddr >= mem->defpool.start && paddr < mem->defpool.end) return &mem->defpool; #endif return NULL; } static inline bool is_swiotlb_force_bounce(struct device *dev) { struct io_tlb_mem *mem = dev->dma_io_tlb_mem; return mem && mem->force_bounce; } void swiotlb_init(bool addressing_limited, unsigned int flags); void __init swiotlb_exit(void); void swiotlb_dev_init(struct device *dev); size_t swiotlb_max_mapping_size(struct device *dev); bool is_swiotlb_allocated(void); bool is_swiotlb_active(struct device *dev); void __init swiotlb_adjust_size(unsigned long size); phys_addr_t default_swiotlb_base(void); phys_addr_t default_swiotlb_limit(void); #else static inline void swiotlb_init(bool addressing_limited, unsigned int flags) { } static inline void swiotlb_dev_init(struct device *dev) { } static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr) { return NULL; } static inline bool is_swiotlb_force_bounce(struct device *dev) { return false; } static inline void swiotlb_exit(void) { } static inline size_t swiotlb_max_mapping_size(struct device *dev) { return SIZE_MAX; } static inline bool is_swiotlb_allocated(void) { return false; } static inline bool is_swiotlb_active(struct device *dev) { return false; } static inline void swiotlb_adjust_size(unsigned long size) { } static inline phys_addr_t default_swiotlb_base(void) { return 0; } static inline phys_addr_t default_swiotlb_limit(void) { return 0; } #endif /* CONFIG_SWIOTLB */ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, unsigned int alloc_aligned_mask, enum dma_data_direction dir, unsigned long attrs); dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); void __swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t mapping_size, enum dma_data_direction dir, unsigned long attrs, struct io_tlb_pool *pool); static inline void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); if (unlikely(pool)) __swiotlb_tbl_unmap_single(dev, addr, size, dir, attrs, pool); } void __swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir, struct io_tlb_pool *pool); static inline void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t addr, size_t size, enum dma_data_direction dir) { struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); if (unlikely(pool)) __swiotlb_sync_single_for_device(dev, addr, size, dir, pool); } void __swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir, struct io_tlb_pool *pool); static inline void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t addr, size_t size, enum dma_data_direction dir) { struct io_tlb_pool *pool = swiotlb_find_pool(dev, addr); if (unlikely(pool)) __swiotlb_sync_single_for_cpu(dev, addr, size, dir, pool); } extern void swiotlb_print_info(void); #ifdef CONFIG_DMA_RESTRICTED_POOL struct page *swiotlb_alloc(struct device *dev, size_t size); bool swiotlb_free(struct device *dev, struct page *page, size_t size); static inline bool is_swiotlb_for_alloc(struct device *dev) { return dev->dma_io_tlb_mem->for_alloc; } #else static inline struct page *swiotlb_alloc(struct device *dev, size_t size) { return NULL; } static inline bool swiotlb_free(struct device *dev, struct page *page, size_t size) { return false; } static inline bool is_swiotlb_for_alloc(struct device *dev) { return false; } #endif /* CONFIG_DMA_RESTRICTED_POOL */ #endif /* __LINUX_SWIOTLB_H */
1598 1596 193 192 194 194 55 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 // SPDX-License-Identifier: GPL-2.0-only /* net/atm/clip.c - RFC1577 Classical IP over ATM */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> /* for UINT_MAX */ #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/timer.h> #include <linux/if_arp.h> /* for some manifest constants */ #include <linux/notifier.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/atmclip.h> #include <linux/atmarp.h> #include <linux/capability.h> #include <linux/ip.h> /* for net/route.h */ #include <linux/in.h> /* for struct sockaddr_in */ #include <linux/if.h> /* for IFF_UP */ #include <linux/inetdevice.h> #include <linux/bitops.h> #include <linux/poison.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/rcupdate.h> #include <linux/jhash.h> #include <linux/slab.h> #include <net/route.h> /* for struct rtable and routing */ #include <net/icmp.h> /* icmp_send */ #include <net/arp.h> #include <linux/param.h> /* for HZ */ #include <linux/uaccess.h> #include <asm/byteorder.h> /* for htons etc. */ #include <linux/atomic.h> #include "common.h" #include "resources.h" #include <net/atmclip.h> static struct net_device *clip_devs; static struct atm_vcc *atmarpd; static struct timer_list idle_timer; static const struct neigh_ops clip_neigh_ops; static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) { struct sock *sk; struct atmarp_ctrl *ctrl; struct sk_buff *skb; pr_debug("(%d)\n", type); if (!atmarpd) return -EUNATCH; skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC); if (!skb) return -ENOMEM; ctrl = skb_put(skb, sizeof(struct atmarp_ctrl)); ctrl->type = type; ctrl->itf_num = itf; ctrl->ip = ip; atm_force_charge(atmarpd, skb->truesize); sk = sk_atm(atmarpd); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); return 0; } static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) { pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh); clip_vcc->entry = entry; clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ clip_vcc->next = entry->vccs; entry->vccs = clip_vcc; entry->neigh->used = jiffies; } static void unlink_clip_vcc(struct clip_vcc *clip_vcc) { struct atmarp_entry *entry = clip_vcc->entry; struct clip_vcc **walk; if (!entry) { pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); return; } netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ entry->neigh->used = jiffies; for (walk = &entry->vccs; *walk; walk = &(*walk)->next) if (*walk == clip_vcc) { int error; *walk = clip_vcc->next; /* atomic */ clip_vcc->entry = NULL; if (clip_vcc->xoff) netif_wake_queue(entry->neigh->dev); if (entry->vccs) goto out; entry->expires = jiffies - 1; /* force resolution or expiration */ error = neigh_update(entry->neigh, NULL, NUD_NONE, NEIGH_UPDATE_F_ADMIN, 0); if (error) pr_err("neigh_update failed with %d\n", error); goto out; } pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); out: netif_tx_unlock_bh(entry->neigh->dev); } /* The neighbour entry n->lock is held. */ static int neigh_check_cb(struct neighbour *n) { struct atmarp_entry *entry = neighbour_priv(n); struct clip_vcc *cv; if (n->ops != &clip_neigh_ops) return 0; for (cv = entry->vccs; cv; cv = cv->next) { unsigned long exp = cv->last_use + cv->idle_timeout; if (cv->idle_timeout && time_after(jiffies, exp)) { pr_debug("releasing vcc %p->%p of entry %p\n", cv, cv->vcc, entry); vcc_release_async(cv->vcc, -ETIMEDOUT); } } if (entry->vccs || time_before(jiffies, entry->expires)) return 0; if (refcount_read(&n->refcnt) > 1) { struct sk_buff *skb; pr_debug("destruction postponed with ref %d\n", refcount_read(&n->refcnt)); while ((skb = skb_dequeue(&n->arp_queue)) != NULL) dev_kfree_skb(skb); return 0; } pr_debug("expired neigh %p\n", n); return 1; } static void idle_timer_check(struct timer_list *unused) { write_lock(&arp_tbl.lock); __neigh_for_each_release(&arp_tbl, neigh_check_cb); mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); write_unlock(&arp_tbl.lock); } static int clip_arp_rcv(struct sk_buff *skb) { struct atm_vcc *vcc; pr_debug("\n"); vcc = ATM_SKB(skb)->vcc; if (!vcc || !atm_charge(vcc, skb->truesize)) { dev_kfree_skb_any(skb); return 0; } pr_debug("pushing to %p\n", vcc); pr_debug("using %p\n", CLIP_VCC(vcc)->old_push); CLIP_VCC(vcc)->old_push(vcc, skb); return 0; } static const unsigned char llc_oui[] = { 0xaa, /* DSAP: non-ISO */ 0xaa, /* SSAP: non-ISO */ 0x03, /* Ctrl: Unnumbered Information Command PDU */ 0x00, /* OUI: EtherType */ 0x00, 0x00 }; static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) { struct clip_vcc *clip_vcc = CLIP_VCC(vcc); pr_debug("\n"); if (!clip_devs) { atm_return(vcc, skb->truesize); kfree_skb(skb); return; } if (!skb) { pr_debug("removing VCC %p\n", clip_vcc); if (clip_vcc->entry) unlink_clip_vcc(clip_vcc); clip_vcc->old_push(vcc, NULL); /* pass on the bad news */ kfree(clip_vcc); return; } atm_return(vcc, skb->truesize); skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; /* clip_vcc->entry == NULL if we don't have an IP address yet */ if (!skb->dev) { dev_kfree_skb_any(skb); return; } ATM_SKB(skb)->vcc = vcc; skb_reset_mac_header(skb); if (!clip_vcc->encap || skb->len < RFC1483LLC_LEN || memcmp(skb->data, llc_oui, sizeof(llc_oui))) skb->protocol = htons(ETH_P_IP); else { skb->protocol = ((__be16 *)skb->data)[3]; skb_pull(skb, RFC1483LLC_LEN); if (skb->protocol == htons(ETH_P_ARP)) { skb->dev->stats.rx_packets++; skb->dev->stats.rx_bytes += skb->len; clip_arp_rcv(skb); return; } } clip_vcc->last_use = jiffies; skb->dev->stats.rx_packets++; skb->dev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); } /* * Note: these spinlocks _must_not_ block on non-SMP. The only goal is that * clip_pop is atomic with respect to the critical section in clip_start_xmit. */ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb) { struct clip_vcc *clip_vcc = CLIP_VCC(vcc); struct net_device *dev = skb->dev; int old; unsigned long flags; pr_debug("(vcc %p)\n", vcc); clip_vcc->old_pop(vcc, skb); /* skb->dev == NULL in outbound ARP packets */ if (!dev) return; spin_lock_irqsave(&PRIV(dev)->xoff_lock, flags); if (atm_may_send(vcc, 0)) { old = xchg(&clip_vcc->xoff, 0); if (old) netif_wake_queue(dev); } spin_unlock_irqrestore(&PRIV(dev)->xoff_lock, flags); } static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) { __be32 *ip = (__be32 *) neigh->primary_key; pr_debug("(neigh %p, skb %p)\n", neigh, skb); to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip); } static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb) { #ifndef CONFIG_ATM_CLIP_NO_ICMP icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); #endif kfree_skb(skb); } static const struct neigh_ops clip_neigh_ops = { .family = AF_INET, .solicit = clip_neigh_solicit, .error_report = clip_neigh_error, .output = neigh_direct_output, .connected_output = neigh_direct_output, }; static int clip_constructor(struct net_device *dev, struct neighbour *neigh) { struct atmarp_entry *entry = neighbour_priv(neigh); if (neigh->tbl->family != AF_INET) return -EINVAL; if (neigh->type != RTN_UNICAST) return -EINVAL; neigh->nud_state = NUD_NONE; neigh->ops = &clip_neigh_ops; neigh->output = neigh->ops->output; entry->neigh = neigh; entry->vccs = NULL; entry->expires = jiffies - 1; return 0; } /* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */ /* * We play with the resolve flag: 0 and 1 have the usual meaning, but -1 means * to allocate the neighbour entry but not to ask atmarpd for resolution. Also, * don't increment the usage count. This is used to create entries in * clip_setentry. */ static int clip_encap(struct atm_vcc *vcc, int mode) { if (!CLIP_VCC(vcc)) return -EBADFD; CLIP_VCC(vcc)->encap = mode; return 0; } static netdev_tx_t clip_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct clip_priv *clip_priv = PRIV(dev); struct dst_entry *dst = skb_dst(skb); struct atmarp_entry *entry; struct neighbour *n; struct atm_vcc *vcc; struct rtable *rt; __be32 *daddr; int old; unsigned long flags; pr_debug("(skb %p)\n", skb); if (!dst) { pr_err("skb_dst(skb) == NULL\n"); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } rt = dst_rtable(dst); if (rt->rt_gw_family == AF_INET) daddr = &rt->rt_gw4; else daddr = &ip_hdr(skb)->daddr; n = dst_neigh_lookup(dst, daddr); if (!n) { pr_err("NO NEIGHBOUR !\n"); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } entry = neighbour_priv(n); if (!entry->vccs) { if (time_after(jiffies, entry->expires)) { /* should be resolved */ entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ; to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key)); } if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) skb_queue_tail(&entry->neigh->arp_queue, skb); else { dev_kfree_skb(skb); dev->stats.tx_dropped++; } goto out_release_neigh; } pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; pr_debug("using neighbour %p, vcc %p\n", n, vcc); if (entry->vccs->encap) { void *here; here = skb_push(skb, RFC1483LLC_LEN); memcpy(here, llc_oui, sizeof(llc_oui)); ((__be16 *) here)[3] = skb->protocol; } atm_account_tx(vcc, skb); entry->vccs->last_use = jiffies; pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ if (old) { pr_warn("XOFF->XOFF transition\n"); goto out_release_neigh; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; vcc->send(vcc, skb); if (atm_may_send(vcc, 0)) { entry->vccs->xoff = 0; goto out_release_neigh; } spin_lock_irqsave(&clip_priv->xoff_lock, flags); netif_stop_queue(dev); /* XOFF -> throttle immediately */ barrier(); if (!entry->vccs->xoff) netif_start_queue(dev); /* Oh, we just raced with clip_pop. netif_start_queue should be good enough, because nothing should really be asleep because of the brief netif_stop_queue. If this isn't true or if it changes, use netif_wake_queue instead. */ spin_unlock_irqrestore(&clip_priv->xoff_lock, flags); out_release_neigh: neigh_release(n); return NETDEV_TX_OK; } static int clip_mkip(struct atm_vcc *vcc, int timeout) { struct clip_vcc *clip_vcc; if (!vcc->push) return -EBADFD; clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL); if (!clip_vcc) return -ENOMEM; pr_debug("%p vcc %p\n", clip_vcc, vcc); clip_vcc->vcc = vcc; vcc->user_back = clip_vcc; set_bit(ATM_VF_IS_CLIP, &vcc->flags); clip_vcc->entry = NULL; clip_vcc->xoff = 0; clip_vcc->encap = 1; clip_vcc->last_use = jiffies; clip_vcc->idle_timeout = timeout * HZ; clip_vcc->old_push = vcc->push; clip_vcc->old_pop = vcc->pop; vcc->push = clip_push; vcc->pop = clip_pop; /* re-process everything received between connection setup and MKIP */ vcc_process_recv_queue(vcc); return 0; } static int clip_setentry(struct atm_vcc *vcc, __be32 ip) { struct neighbour *neigh; struct atmarp_entry *entry; int error; struct clip_vcc *clip_vcc; struct rtable *rt; if (vcc->push != clip_push) { pr_warn("non-CLIP VCC\n"); return -EBADF; } clip_vcc = CLIP_VCC(vcc); if (!ip) { if (!clip_vcc->entry) { pr_err("hiding hidden ATMARP entry\n"); return 0; } pr_debug("remove\n"); unlink_clip_vcc(clip_vcc); return 0; } rt = ip_route_output(&init_net, ip, 0, 0, 0, RT_SCOPE_LINK); if (IS_ERR(rt)) return PTR_ERR(rt); neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1); ip_rt_put(rt); if (!neigh) return -ENOMEM; entry = neighbour_priv(neigh); if (entry != clip_vcc->entry) { if (!clip_vcc->entry) pr_debug("add\n"); else { pr_debug("update\n"); unlink_clip_vcc(clip_vcc); } link_vcc(clip_vcc, entry); } error = neigh_update(neigh, llc_oui, NUD_PERMANENT, NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN, 0); neigh_release(neigh); return error; } static const struct net_device_ops clip_netdev_ops = { .ndo_start_xmit = clip_start_xmit, .ndo_neigh_construct = clip_constructor, }; static void clip_setup(struct net_device *dev) { dev->netdev_ops = &clip_netdev_ops; dev->type = ARPHRD_ATM; dev->neigh_priv_len = sizeof(struct atmarp_entry); dev->hard_header_len = RFC1483LLC_LEN; dev->mtu = RFC1626_MTU; dev->tx_queue_len = 100; /* "normal" queue (packets) */ /* When using a "real" qdisc, the qdisc determines the queue */ /* length. tx_queue_len is only used for the default case, */ /* without any more elaborate queuing. 100 is a reasonable */ /* compromise between decent burst-tolerance and protection */ /* against memory hogs. */ netif_keep_dst(dev); } static int clip_create(int number) { struct net_device *dev; struct clip_priv *clip_priv; int error; if (number != -1) { for (dev = clip_devs; dev; dev = PRIV(dev)->next) if (PRIV(dev)->number == number) return -EEXIST; } else { number = 0; for (dev = clip_devs; dev; dev = PRIV(dev)->next) if (PRIV(dev)->number >= number) number = PRIV(dev)->number + 1; } dev = alloc_netdev(sizeof(struct clip_priv), "", NET_NAME_UNKNOWN, clip_setup); if (!dev) return -ENOMEM; clip_priv = PRIV(dev); sprintf(dev->name, "atm%d", number); spin_lock_init(&clip_priv->xoff_lock); clip_priv->number = number; error = register_netdev(dev); if (error) { free_netdev(dev); return error; } clip_priv->next = clip_devs; clip_devs = dev; pr_debug("registered (net:%s)\n", dev->name); return number; } static int clip_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event == NETDEV_UNREGISTER) return NOTIFY_DONE; /* ignore non-CLIP devices */ if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops) return NOTIFY_DONE; switch (event) { case NETDEV_UP: pr_debug("NETDEV_UP\n"); to_atmarpd(act_up, PRIV(dev)->number, 0); break; case NETDEV_GOING_DOWN: pr_debug("NETDEV_DOWN\n"); to_atmarpd(act_down, PRIV(dev)->number, 0); break; case NETDEV_CHANGE: case NETDEV_CHANGEMTU: pr_debug("NETDEV_CHANGE*\n"); to_atmarpd(act_change, PRIV(dev)->number, 0); break; } return NOTIFY_DONE; } static int clip_inet_event(struct notifier_block *this, unsigned long event, void *ifa) { struct in_device *in_dev; struct netdev_notifier_info info; in_dev = ((struct in_ifaddr *)ifa)->ifa_dev; /* * Transitions are of the down-change-up type, so it's sufficient to * handle the change on up. */ if (event != NETDEV_UP) return NOTIFY_DONE; netdev_notifier_info_init(&info, in_dev->dev); return clip_device_event(this, NETDEV_CHANGE, &info); } static struct notifier_block clip_dev_notifier = { .notifier_call = clip_device_event, }; static struct notifier_block clip_inet_notifier = { .notifier_call = clip_inet_event, }; static void atmarpd_close(struct atm_vcc *vcc) { pr_debug("\n"); rtnl_lock(); atmarpd = NULL; skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); rtnl_unlock(); pr_debug("(done)\n"); module_put(THIS_MODULE); } static const struct atmdev_ops atmarpd_dev_ops = { .close = atmarpd_close }; static struct atm_dev atmarpd_dev = { .ops = &atmarpd_dev_ops, .type = "arpd", .number = 999, .lock = __SPIN_LOCK_UNLOCKED(atmarpd_dev.lock) }; static int atm_init_atmarp(struct atm_vcc *vcc) { rtnl_lock(); if (atmarpd) { rtnl_unlock(); return -EADDRINUSE; } mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); atmarpd = vcc; set_bit(ATM_VF_META, &vcc->flags); set_bit(ATM_VF_READY, &vcc->flags); /* allow replies and avoid getting closed if signaling dies */ vcc->dev = &atmarpd_dev; vcc_insert_socket(sk_atm(vcc)); vcc->push = NULL; vcc->pop = NULL; /* crash */ vcc->push_oam = NULL; /* crash */ rtnl_unlock(); return 0; } static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct atm_vcc *vcc = ATM_SD(sock); int err = 0; switch (cmd) { case SIOCMKCLIP: case ATMARPD_CTRL: case ATMARP_MKIP: case ATMARP_SETENTRY: case ATMARP_ENCAP: if (!capable(CAP_NET_ADMIN)) return -EPERM; break; default: return -ENOIOCTLCMD; } switch (cmd) { case SIOCMKCLIP: err = clip_create(arg); break; case ATMARPD_CTRL: err = atm_init_atmarp(vcc); if (!err) { sock->state = SS_CONNECTED; __module_get(THIS_MODULE); } break; case ATMARP_MKIP: err = clip_mkip(vcc, arg); break; case ATMARP_SETENTRY: err = clip_setentry(vcc, (__force __be32)arg); break; case ATMARP_ENCAP: err = clip_encap(vcc, arg); break; } return err; } static struct atm_ioctl clip_ioctl_ops = { .owner = THIS_MODULE, .ioctl = clip_ioctl, }; #ifdef CONFIG_PROC_FS static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr) { static int code[] = { 1, 2, 10, 6, 1, 0 }; static int e164[] = { 1, 8, 4, 6, 1, 0 }; if (*addr->sas_addr.pub) { seq_printf(seq, "%s", addr->sas_addr.pub); if (*addr->sas_addr.prv) seq_putc(seq, '+'); } else if (!*addr->sas_addr.prv) { seq_printf(seq, "%s", "(none)"); return; } if (*addr->sas_addr.prv) { unsigned char *prv = addr->sas_addr.prv; int *fields; int i, j; fields = *prv == ATM_AFI_E164 ? e164 : code; for (i = 0; fields[i]; i++) { for (j = fields[i]; j; j--) seq_printf(seq, "%02X", *prv++); if (fields[i + 1]) seq_putc(seq, '.'); } } } /* This means the neighbour entry has no attached VCC objects. */ #define SEQ_NO_VCC_TOKEN ((void *) 2) static void atmarp_info(struct seq_file *seq, struct neighbour *n, struct atmarp_entry *entry, struct clip_vcc *clip_vcc) { struct net_device *dev = n->dev; unsigned long exp; char buf[17]; int svc, llc, off; svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || (sk_atm(clip_vcc->vcc)->sk_family == AF_ATMSVC)); llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) || clip_vcc->encap); if (clip_vcc == SEQ_NO_VCC_TOKEN) exp = entry->neigh->used; else exp = clip_vcc->last_use; exp = (jiffies - exp) / HZ; seq_printf(seq, "%-6s%-4s%-4s%5ld ", dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp); off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key); while (off < 16) buf[off++] = ' '; buf[off] = '\0'; seq_printf(seq, "%s", buf); if (clip_vcc == SEQ_NO_VCC_TOKEN) { if (time_before(jiffies, entry->expires)) seq_printf(seq, "(resolving)\n"); else seq_printf(seq, "(expired, ref %d)\n", refcount_read(&entry->neigh->refcnt)); } else if (!svc) { seq_printf(seq, "%d.%d.%d\n", clip_vcc->vcc->dev->number, clip_vcc->vcc->vpi, clip_vcc->vcc->vci); } else { svc_addr(seq, &clip_vcc->vcc->remote); seq_putc(seq, '\n'); } } struct clip_seq_state { /* This member must be first. */ struct neigh_seq_state ns; /* Local to clip specific iteration. */ struct clip_vcc *vcc; }; static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e, struct clip_vcc *curr) { if (!curr) { curr = e->vccs; if (!curr) return SEQ_NO_VCC_TOKEN; return curr; } if (curr == SEQ_NO_VCC_TOKEN) return NULL; curr = curr->next; return curr; } static void *clip_seq_vcc_walk(struct clip_seq_state *state, struct atmarp_entry *e, loff_t * pos) { struct clip_vcc *vcc = state->vcc; vcc = clip_seq_next_vcc(e, vcc); if (vcc && pos != NULL) { while (*pos) { vcc = clip_seq_next_vcc(e, vcc); if (!vcc) break; --(*pos); } } state->vcc = vcc; return vcc; } static void *clip_seq_sub_iter(struct neigh_seq_state *_state, struct neighbour *n, loff_t * pos) { struct clip_seq_state *state = (struct clip_seq_state *)_state; if (n->dev->type != ARPHRD_ATM) return NULL; return clip_seq_vcc_walk(state, neighbour_priv(n), pos); } static void *clip_seq_start(struct seq_file *seq, loff_t * pos) { struct clip_seq_state *state = seq->private; state->ns.neigh_sub_iter = clip_seq_sub_iter; return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY); } static int clip_seq_show(struct seq_file *seq, void *v) { static char atm_arp_banner[] = "IPitf TypeEncp Idle IP address ATM address\n"; if (v == SEQ_START_TOKEN) { seq_puts(seq, atm_arp_banner); } else { struct clip_seq_state *state = seq->private; struct clip_vcc *vcc = state->vcc; struct neighbour *n = v; atmarp_info(seq, n, neighbour_priv(n), vcc); } return 0; } static const struct seq_operations arp_seq_ops = { .start = clip_seq_start, .next = neigh_seq_next, .stop = neigh_seq_stop, .show = clip_seq_show, }; #endif static void atm_clip_exit_noproc(void); static int __init atm_clip_init(void) { register_atm_ioctl(&clip_ioctl_ops); register_netdevice_notifier(&clip_dev_notifier); register_inetaddr_notifier(&clip_inet_notifier); timer_setup(&idle_timer, idle_timer_check, 0); #ifdef CONFIG_PROC_FS { struct proc_dir_entry *p; p = proc_create_net("arp", 0444, atm_proc_root, &arp_seq_ops, sizeof(struct clip_seq_state)); if (!p) { pr_err("Unable to initialize /proc/net/atm/arp\n"); atm_clip_exit_noproc(); return -ENOMEM; } } #endif return 0; } static void atm_clip_exit_noproc(void) { struct net_device *dev, *next; unregister_inetaddr_notifier(&clip_inet_notifier); unregister_netdevice_notifier(&clip_dev_notifier); deregister_atm_ioctl(&clip_ioctl_ops); /* First, stop the idle timer, so it stops banging * on the table. */ timer_delete_sync(&idle_timer); dev = clip_devs; while (dev) { next = PRIV(dev)->next; unregister_netdev(dev); free_netdev(dev); dev = next; } } static void __exit atm_clip_exit(void) { remove_proc_entry("arp", atm_proc_root); atm_clip_exit_noproc(); } module_init(atm_clip_init); module_exit(atm_clip_exit); MODULE_AUTHOR("Werner Almesberger"); MODULE_DESCRIPTION("Classical/IP over ATM interface"); MODULE_LICENSE("GPL");
7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Conexant CX2584x Audio/Video decoder chip and related cores * * Integrated Consumer Infrared Controller * * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net> */ #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/module.h> #include <media/drv-intf/cx25840.h> #include <media/rc-core.h> #include "cx25840-core.h" static unsigned int ir_debug; module_param(ir_debug, int, 0644); MODULE_PARM_DESC(ir_debug, "enable integrated IR debug messages"); #define CX25840_IR_REG_BASE 0x200 #define CX25840_IR_CNTRL_REG 0x200 #define CNTRL_WIN_3_3 0x00000000 #define CNTRL_WIN_4_3 0x00000001 #define CNTRL_WIN_3_4 0x00000002 #define CNTRL_WIN_4_4 0x00000003 #define CNTRL_WIN 0x00000003 #define CNTRL_EDG_NONE 0x00000000 #define CNTRL_EDG_FALL 0x00000004 #define CNTRL_EDG_RISE 0x00000008 #define CNTRL_EDG_BOTH 0x0000000C #define CNTRL_EDG 0x0000000C #define CNTRL_DMD 0x00000010 #define CNTRL_MOD 0x00000020 #define CNTRL_RFE 0x00000040 #define CNTRL_TFE 0x00000080 #define CNTRL_RXE 0x00000100 #define CNTRL_TXE 0x00000200 #define CNTRL_RIC 0x00000400 #define CNTRL_TIC 0x00000800 #define CNTRL_CPL 0x00001000 #define CNTRL_LBM 0x00002000 #define CNTRL_R 0x00004000 #define CX25840_IR_TXCLK_REG 0x204 #define TXCLK_TCD 0x0000FFFF #define CX25840_IR_RXCLK_REG 0x208 #define RXCLK_RCD 0x0000FFFF #define CX25840_IR_CDUTY_REG 0x20C #define CDUTY_CDC 0x0000000F #define CX25840_IR_STATS_REG 0x210 #define STATS_RTO 0x00000001 #define STATS_ROR 0x00000002 #define STATS_RBY 0x00000004 #define STATS_TBY 0x00000008 #define STATS_RSR 0x00000010 #define STATS_TSR 0x00000020 #define CX25840_IR_IRQEN_REG 0x214 #define IRQEN_RTE 0x00000001 #define IRQEN_ROE 0x00000002 #define IRQEN_RSE 0x00000010 #define IRQEN_TSE 0x00000020 #define IRQEN_MSK 0x00000033 #define CX25840_IR_FILTR_REG 0x218 #define FILTR_LPF 0x0000FFFF #define CX25840_IR_FIFO_REG 0x23C #define FIFO_RXTX 0x0000FFFF #define FIFO_RXTX_LVL 0x00010000 #define FIFO_RXTX_RTO 0x0001FFFF #define FIFO_RX_NDV 0x00020000 #define FIFO_RX_DEPTH 8 #define FIFO_TX_DEPTH 8 #define CX25840_VIDCLK_FREQ 108000000 /* 108 MHz, BT.656 */ #define CX25840_IR_REFCLK_FREQ (CX25840_VIDCLK_FREQ / 2) /* * We use this union internally for convenience, but callers to tx_write * and rx_read will be expecting records of type struct ir_raw_event. * Always ensure the size of this union is dictated by struct ir_raw_event. */ union cx25840_ir_fifo_rec { u32 hw_fifo_data; struct ir_raw_event ir_core_data; }; #define CX25840_IR_RX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec)) #define CX25840_IR_TX_KFIFO_SIZE (256 * sizeof(union cx25840_ir_fifo_rec)) struct cx25840_ir_state { struct i2c_client *c; struct v4l2_subdev_ir_parameters rx_params; struct mutex rx_params_lock; /* protects Rx parameter settings cache */ atomic_t rxclk_divider; atomic_t rx_invert; struct kfifo rx_kfifo; spinlock_t rx_kfifo_lock; /* protect Rx data kfifo */ struct v4l2_subdev_ir_parameters tx_params; struct mutex tx_params_lock; /* protects Tx parameter settings cache */ atomic_t txclk_divider; }; static inline struct cx25840_ir_state *to_ir_state(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); return state ? state->ir_state : NULL; } /* * Rx and Tx Clock Divider register computations * * Note the largest clock divider value of 0xffff corresponds to: * (0xffff + 1) * 1000 / 108/2 MHz = 1,213,629.629... ns * which fits in 21 bits, so we'll use unsigned int for time arguments. */ static inline u16 count_to_clock_divider(unsigned int d) { if (d > RXCLK_RCD + 1) d = RXCLK_RCD; else if (d < 2) d = 1; else d--; return (u16) d; } static inline u16 carrier_freq_to_clock_divider(unsigned int freq) { return count_to_clock_divider( DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, freq * 16)); } static inline unsigned int clock_divider_to_carrier_freq(unsigned int divider) { return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, (divider + 1) * 16); } static inline unsigned int clock_divider_to_freq(unsigned int divider, unsigned int rollovers) { return DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ, (divider + 1) * rollovers); } /* * Low Pass Filter register calculations * * Note the largest count value of 0xffff corresponds to: * 0xffff * 1000 / 108/2 MHz = 1,213,611.11... ns * which fits in 21 bits, so we'll use unsigned int for time arguments. */ static inline u16 count_to_lpf_count(unsigned int d) { if (d > FILTR_LPF) d = FILTR_LPF; else if (d < 4) d = 0; return (u16) d; } static inline u16 ns_to_lpf_count(unsigned int ns) { return count_to_lpf_count( DIV_ROUND_CLOSEST(CX25840_IR_REFCLK_FREQ / 1000000 * ns, 1000)); } static inline unsigned int lpf_count_to_ns(unsigned int count) { /* Duration of the Low Pass Filter rejection window in ns */ return DIV_ROUND_CLOSEST(count * 1000, CX25840_IR_REFCLK_FREQ / 1000000); } static inline unsigned int lpf_count_to_us(unsigned int count) { /* Duration of the Low Pass Filter rejection window in us */ return DIV_ROUND_CLOSEST(count, CX25840_IR_REFCLK_FREQ / 1000000); } /* * FIFO register pulse width count computations */ static u32 clock_divider_to_resolution(u16 divider) { /* * Resolution is the duration of 1 tick of the readable portion of * the pulse width counter as read from the FIFO. The two lsb's are * not readable, hence the << 2. This function returns ns. */ return DIV_ROUND_CLOSEST((1 << 2) * ((u32) divider + 1) * 1000, CX25840_IR_REFCLK_FREQ / 1000000); } static u64 pulse_width_count_to_ns(u16 count, u16 divider) { u64 n; u32 rem; /* * The 2 lsb's of the pulse width timer count are not readable, hence * the (count << 2) | 0x3 */ n = (((u64) count << 2) | 0x3) * (divider + 1) * 1000; /* millicycles */ rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => ns */ if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2) n++; return n; } #if 0 /* Keep as we will need this for Transmit functionality */ static u16 ns_to_pulse_width_count(u32 ns, u16 divider) { u64 n; u32 d; u32 rem; /* * The 2 lsb's of the pulse width timer count are not accessible, hence * the (1 << 2) */ n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */ d = (1 << 2) * ((u32) divider + 1) * 1000; /* millicycles/count */ rem = do_div(n, d); if (rem >= d / 2) n++; if (n > FIFO_RXTX) n = FIFO_RXTX; else if (n == 0) n = 1; return (u16) n; } #endif static unsigned int pulse_width_count_to_us(u16 count, u16 divider) { u64 n; u32 rem; /* * The 2 lsb's of the pulse width timer count are not readable, hence * the (count << 2) | 0x3 */ n = (((u64) count << 2) | 0x3) * (divider + 1); /* cycles */ rem = do_div(n, CX25840_IR_REFCLK_FREQ / 1000000); /* / MHz => us */ if (rem >= CX25840_IR_REFCLK_FREQ / 1000000 / 2) n++; return (unsigned int) n; } /* * Pulse Clocks computations: Combined Pulse Width Count & Rx Clock Counts * * The total pulse clock count is an 18 bit pulse width timer count as the most * significant part and (up to) 16 bit clock divider count as a modulus. * When the Rx clock divider ticks down to 0, it increments the 18 bit pulse * width timer count's least significant bit. */ static u64 ns_to_pulse_clocks(u32 ns) { u64 clocks; u32 rem; clocks = CX25840_IR_REFCLK_FREQ / 1000000 * (u64) ns; /* millicycles */ rem = do_div(clocks, 1000); /* /1000 = cycles */ if (rem >= 1000 / 2) clocks++; return clocks; } static u16 pulse_clocks_to_clock_divider(u64 count) { do_div(count, (FIFO_RXTX << 2) | 0x3); /* net result needs to be rounded down and decremented by 1 */ if (count > RXCLK_RCD + 1) count = RXCLK_RCD; else if (count < 2) count = 1; else count--; return (u16) count; } /* * IR Control Register helpers */ enum tx_fifo_watermark { TX_FIFO_HALF_EMPTY = 0, TX_FIFO_EMPTY = CNTRL_TIC, }; enum rx_fifo_watermark { RX_FIFO_HALF_FULL = 0, RX_FIFO_NOT_EMPTY = CNTRL_RIC, }; static inline void control_tx_irq_watermark(struct i2c_client *c, enum tx_fifo_watermark level) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_TIC, level); } static inline void control_rx_irq_watermark(struct i2c_client *c, enum rx_fifo_watermark level) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_RIC, level); } static inline void control_tx_enable(struct i2c_client *c, bool enable) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_TXE | CNTRL_TFE), enable ? (CNTRL_TXE | CNTRL_TFE) : 0); } static inline void control_rx_enable(struct i2c_client *c, bool enable) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~(CNTRL_RXE | CNTRL_RFE), enable ? (CNTRL_RXE | CNTRL_RFE) : 0); } static inline void control_tx_modulation_enable(struct i2c_client *c, bool enable) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_MOD, enable ? CNTRL_MOD : 0); } static inline void control_rx_demodulation_enable(struct i2c_client *c, bool enable) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_DMD, enable ? CNTRL_DMD : 0); } static inline void control_rx_s_edge_detection(struct i2c_client *c, u32 edge_types) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_EDG_BOTH, edge_types & CNTRL_EDG_BOTH); } static void control_rx_s_carrier_window(struct i2c_client *c, unsigned int carrier, unsigned int *carrier_range_low, unsigned int *carrier_range_high) { u32 v; unsigned int c16 = carrier * 16; if (*carrier_range_low < DIV_ROUND_CLOSEST(c16, 16 + 3)) { v = CNTRL_WIN_3_4; *carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 4); } else { v = CNTRL_WIN_3_3; *carrier_range_low = DIV_ROUND_CLOSEST(c16, 16 + 3); } if (*carrier_range_high > DIV_ROUND_CLOSEST(c16, 16 - 3)) { v |= CNTRL_WIN_4_3; *carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 4); } else { v |= CNTRL_WIN_3_3; *carrier_range_high = DIV_ROUND_CLOSEST(c16, 16 - 3); } cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_WIN, v); } static inline void control_tx_polarity_invert(struct i2c_client *c, bool invert) { cx25840_and_or4(c, CX25840_IR_CNTRL_REG, ~CNTRL_CPL, invert ? CNTRL_CPL : 0); } /* * IR Rx & Tx Clock Register helpers */ static unsigned int txclk_tx_s_carrier(struct i2c_client *c, unsigned int freq, u16 *divider) { *divider = carrier_freq_to_clock_divider(freq); cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider); return clock_divider_to_carrier_freq(*divider); } static unsigned int rxclk_rx_s_carrier(struct i2c_client *c, unsigned int freq, u16 *divider) { *divider = carrier_freq_to_clock_divider(freq); cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider); return clock_divider_to_carrier_freq(*divider); } static u32 txclk_tx_s_max_pulse_width(struct i2c_client *c, u32 ns, u16 *divider) { u64 pulse_clocks; if (ns > IR_MAX_DURATION) ns = IR_MAX_DURATION; pulse_clocks = ns_to_pulse_clocks(ns); *divider = pulse_clocks_to_clock_divider(pulse_clocks); cx25840_write4(c, CX25840_IR_TXCLK_REG, *divider); return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider); } static u32 rxclk_rx_s_max_pulse_width(struct i2c_client *c, u32 ns, u16 *divider) { u64 pulse_clocks; if (ns > IR_MAX_DURATION) ns = IR_MAX_DURATION; pulse_clocks = ns_to_pulse_clocks(ns); *divider = pulse_clocks_to_clock_divider(pulse_clocks); cx25840_write4(c, CX25840_IR_RXCLK_REG, *divider); return (u32) pulse_width_count_to_ns(FIFO_RXTX, *divider); } /* * IR Tx Carrier Duty Cycle register helpers */ static unsigned int cduty_tx_s_duty_cycle(struct i2c_client *c, unsigned int duty_cycle) { u32 n; n = DIV_ROUND_CLOSEST(duty_cycle * 100, 625); /* 16ths of 100% */ if (n != 0) n--; if (n > 15) n = 15; cx25840_write4(c, CX25840_IR_CDUTY_REG, n); return DIV_ROUND_CLOSEST((n + 1) * 100, 16); } /* * IR Filter Register helpers */ static u32 filter_rx_s_min_width(struct i2c_client *c, u32 min_width_ns) { u32 count = ns_to_lpf_count(min_width_ns); cx25840_write4(c, CX25840_IR_FILTR_REG, count); return lpf_count_to_ns(count); } /* * IR IRQ Enable Register helpers */ static inline void irqenable_rx(struct v4l2_subdev *sd, u32 mask) { struct cx25840_state *state = to_state(sd); if (is_cx23885(state) || is_cx23887(state)) mask ^= IRQEN_MSK; mask &= (IRQEN_RTE | IRQEN_ROE | IRQEN_RSE); cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, ~(IRQEN_RTE | IRQEN_ROE | IRQEN_RSE), mask); } static inline void irqenable_tx(struct v4l2_subdev *sd, u32 mask) { struct cx25840_state *state = to_state(sd); if (is_cx23885(state) || is_cx23887(state)) mask ^= IRQEN_MSK; mask &= IRQEN_TSE; cx25840_and_or4(state->c, CX25840_IR_IRQEN_REG, ~IRQEN_TSE, mask); } /* * V4L2 Subdevice IR Ops */ int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled) { struct cx25840_state *state = to_state(sd); struct cx25840_ir_state *ir_state = to_ir_state(sd); struct i2c_client *c = NULL; unsigned long flags; union cx25840_ir_fifo_rec rx_data[FIFO_RX_DEPTH]; unsigned int i, j, k; u32 events, v; int tsr, rsr, rto, ror, tse, rse, rte, roe, kror; u32 cntrl, irqen, stats; *handled = false; if (ir_state == NULL) return -ENODEV; c = ir_state->c; /* Only support the IR controller for the CX2388[57] AV Core for now */ if (!(is_cx23885(state) || is_cx23887(state))) return -ENODEV; cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG); irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG); if (is_cx23885(state) || is_cx23887(state)) irqen ^= IRQEN_MSK; stats = cx25840_read4(c, CX25840_IR_STATS_REG); tsr = stats & STATS_TSR; /* Tx FIFO Service Request */ rsr = stats & STATS_RSR; /* Rx FIFO Service Request */ rto = stats & STATS_RTO; /* Rx Pulse Width Timer Time Out */ ror = stats & STATS_ROR; /* Rx FIFO Over Run */ tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */ rse = irqen & IRQEN_RSE; /* Rx FIFO Service Request IRQ Enable */ rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */ roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */ v4l2_dbg(2, ir_debug, sd, "IR IRQ Status: %s %s %s %s %s %s\n", tsr ? "tsr" : " ", rsr ? "rsr" : " ", rto ? "rto" : " ", ror ? "ror" : " ", stats & STATS_TBY ? "tby" : " ", stats & STATS_RBY ? "rby" : " "); v4l2_dbg(2, ir_debug, sd, "IR IRQ Enables: %s %s %s %s\n", tse ? "tse" : " ", rse ? "rse" : " ", rte ? "rte" : " ", roe ? "roe" : " "); /* * Transmitter interrupt service */ if (tse && tsr) { /* * TODO: * Check the watermark threshold setting * Pull FIFO_TX_DEPTH or FIFO_TX_DEPTH/2 entries from tx_kfifo * Push the data to the hardware FIFO. * If there was nothing more to send in the tx_kfifo, disable * the TSR IRQ and notify the v4l2_device. * If there was something in the tx_kfifo, check the tx_kfifo * level and notify the v4l2_device, if it is low. */ /* For now, inhibit TSR interrupt until Tx is implemented */ irqenable_tx(sd, 0); events = V4L2_SUBDEV_IR_TX_FIFO_SERVICE_REQ; v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_TX_NOTIFY, &events); *handled = true; } /* * Receiver interrupt service */ kror = 0; if ((rse && rsr) || (rte && rto)) { /* * Receive data on RSR to clear the STATS_RSR. * Receive data on RTO, since we may not have yet hit the RSR * watermark when we receive the RTO. */ for (i = 0, v = FIFO_RX_NDV; (v & FIFO_RX_NDV) && !kror; i = 0) { for (j = 0; (v & FIFO_RX_NDV) && j < FIFO_RX_DEPTH; j++) { v = cx25840_read4(c, CX25840_IR_FIFO_REG); rx_data[i].hw_fifo_data = v & ~FIFO_RX_NDV; i++; } if (i == 0) break; j = i * sizeof(union cx25840_ir_fifo_rec); k = kfifo_in_locked(&ir_state->rx_kfifo, (unsigned char *) rx_data, j, &ir_state->rx_kfifo_lock); if (k != j) kror++; /* rx_kfifo over run */ } *handled = true; } events = 0; v = 0; if (kror) { events |= V4L2_SUBDEV_IR_RX_SW_FIFO_OVERRUN; v4l2_err(sd, "IR receiver software FIFO overrun\n"); } if (roe && ror) { /* * The RX FIFO Enable (CNTRL_RFE) must be toggled to clear * the Rx FIFO Over Run status (STATS_ROR) */ v |= CNTRL_RFE; events |= V4L2_SUBDEV_IR_RX_HW_FIFO_OVERRUN; v4l2_err(sd, "IR receiver hardware FIFO overrun\n"); } if (rte && rto) { /* * The IR Receiver Enable (CNTRL_RXE) must be toggled to clear * the Rx Pulse Width Timer Time Out (STATS_RTO) */ v |= CNTRL_RXE; events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED; } if (v) { /* Clear STATS_ROR & STATS_RTO as needed by resetting hardware */ cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v); cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl); *handled = true; } spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags); if (kfifo_len(&ir_state->rx_kfifo) >= CX25840_IR_RX_KFIFO_SIZE / 2) events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ; spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags); if (events) v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events); return 0; } /* Receiver */ static int cx25840_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count, ssize_t *num) { struct cx25840_ir_state *ir_state = to_ir_state(sd); bool invert; u16 divider; unsigned int i, n; union cx25840_ir_fifo_rec *p; unsigned u, v, w; if (ir_state == NULL) return -ENODEV; invert = (bool) atomic_read(&ir_state->rx_invert); divider = (u16) atomic_read(&ir_state->rxclk_divider); n = count / sizeof(union cx25840_ir_fifo_rec) * sizeof(union cx25840_ir_fifo_rec); if (n == 0) { *num = 0; return 0; } n = kfifo_out_locked(&ir_state->rx_kfifo, buf, n, &ir_state->rx_kfifo_lock); n /= sizeof(union cx25840_ir_fifo_rec); *num = n * sizeof(union cx25840_ir_fifo_rec); for (p = (union cx25840_ir_fifo_rec *) buf, i = 0; i < n; p++, i++) { if ((p->hw_fifo_data & FIFO_RXTX_RTO) == FIFO_RXTX_RTO) { /* Assume RTO was because of no IR light input */ u = 0; w = 1; } else { u = (p->hw_fifo_data & FIFO_RXTX_LVL) ? 1 : 0; if (invert) u = u ? 0 : 1; w = 0; } v = (unsigned) pulse_width_count_to_ns( (u16)(p->hw_fifo_data & FIFO_RXTX), divider) / 1000; if (v > IR_MAX_DURATION) v = IR_MAX_DURATION; p->ir_core_data = (struct ir_raw_event) { .pulse = u, .duration = v, .timeout = w }; v4l2_dbg(2, ir_debug, sd, "rx read: %10u ns %s %s\n", v, u ? "mark" : "space", w ? "(timed out)" : ""); if (w) v4l2_dbg(2, ir_debug, sd, "rx read: end of rx\n"); } return 0; } static int cx25840_ir_rx_g_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx25840_ir_state *ir_state = to_ir_state(sd); if (ir_state == NULL) return -ENODEV; mutex_lock(&ir_state->rx_params_lock); memcpy(p, &ir_state->rx_params, sizeof(struct v4l2_subdev_ir_parameters)); mutex_unlock(&ir_state->rx_params_lock); return 0; } static int cx25840_ir_rx_shutdown(struct v4l2_subdev *sd) { struct cx25840_ir_state *ir_state = to_ir_state(sd); struct i2c_client *c; if (ir_state == NULL) return -ENODEV; c = ir_state->c; mutex_lock(&ir_state->rx_params_lock); /* Disable or slow down all IR Rx circuits and counters */ irqenable_rx(sd, 0); control_rx_enable(c, false); control_rx_demodulation_enable(c, false); control_rx_s_edge_detection(c, CNTRL_EDG_NONE); filter_rx_s_min_width(c, 0); cx25840_write4(c, CX25840_IR_RXCLK_REG, RXCLK_RCD); ir_state->rx_params.shutdown = true; mutex_unlock(&ir_state->rx_params_lock); return 0; } static int cx25840_ir_rx_s_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx25840_ir_state *ir_state = to_ir_state(sd); struct i2c_client *c; struct v4l2_subdev_ir_parameters *o; u16 rxclk_divider; if (ir_state == NULL) return -ENODEV; if (p->shutdown) return cx25840_ir_rx_shutdown(sd); if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH) return -ENOSYS; c = ir_state->c; o = &ir_state->rx_params; mutex_lock(&ir_state->rx_params_lock); o->shutdown = p->shutdown; p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; o->mode = p->mode; p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec); o->bytes_per_data_element = p->bytes_per_data_element; /* Before we tweak the hardware, we have to disable the receiver */ irqenable_rx(sd, 0); control_rx_enable(c, false); control_rx_demodulation_enable(c, p->modulation); o->modulation = p->modulation; if (p->modulation) { p->carrier_freq = rxclk_rx_s_carrier(c, p->carrier_freq, &rxclk_divider); o->carrier_freq = p->carrier_freq; p->duty_cycle = 50; o->duty_cycle = p->duty_cycle; control_rx_s_carrier_window(c, p->carrier_freq, &p->carrier_range_lower, &p->carrier_range_upper); o->carrier_range_lower = p->carrier_range_lower; o->carrier_range_upper = p->carrier_range_upper; p->max_pulse_width = (u32) pulse_width_count_to_ns(FIFO_RXTX, rxclk_divider); } else { p->max_pulse_width = rxclk_rx_s_max_pulse_width(c, p->max_pulse_width, &rxclk_divider); } o->max_pulse_width = p->max_pulse_width; atomic_set(&ir_state->rxclk_divider, rxclk_divider); p->noise_filter_min_width = filter_rx_s_min_width(c, p->noise_filter_min_width); o->noise_filter_min_width = p->noise_filter_min_width; p->resolution = clock_divider_to_resolution(rxclk_divider); o->resolution = p->resolution; /* FIXME - make this dependent on resolution for better performance */ control_rx_irq_watermark(c, RX_FIFO_HALF_FULL); control_rx_s_edge_detection(c, CNTRL_EDG_BOTH); o->invert_level = p->invert_level; atomic_set(&ir_state->rx_invert, p->invert_level); o->interrupt_enable = p->interrupt_enable; o->enable = p->enable; if (p->enable) { unsigned long flags; spin_lock_irqsave(&ir_state->rx_kfifo_lock, flags); kfifo_reset(&ir_state->rx_kfifo); spin_unlock_irqrestore(&ir_state->rx_kfifo_lock, flags); if (p->interrupt_enable) irqenable_rx(sd, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE); control_rx_enable(c, p->enable); } mutex_unlock(&ir_state->rx_params_lock); return 0; } /* Transmitter */ static int cx25840_ir_tx_write(struct v4l2_subdev *sd, u8 *buf, size_t count, ssize_t *num) { struct cx25840_ir_state *ir_state = to_ir_state(sd); if (ir_state == NULL) return -ENODEV; #if 0 /* * FIXME - the code below is an incomplete and untested sketch of what * may need to be done. The critical part is to get 4 (or 8) pulses * from the tx_kfifo, or converted from ns to the proper units from the * input, and push them off to the hardware Tx FIFO right away, if the * HW TX fifo needs service. The rest can be pushed to the tx_kfifo in * a less critical timeframe. Also watch out for overruning the * tx_kfifo - don't let it happen and let the caller know not all his * pulses were written. */ u32 *ns_pulse = (u32 *) buf; unsigned int n; u32 fifo_pulse[FIFO_TX_DEPTH]; u32 mark; /* Compute how much we can fit in the tx kfifo */ n = CX25840_IR_TX_KFIFO_SIZE - kfifo_len(ir_state->tx_kfifo); n = min(n, (unsigned int) count); n /= sizeof(u32); /* FIXME - turn on Tx Fifo service interrupt * check hardware fifo level, and other stuff */ for (i = 0; i < n; ) { for (j = 0; j < FIFO_TX_DEPTH / 2 && i < n; j++) { mark = ns_pulse[i] & LEVEL_MASK; fifo_pulse[j] = ns_to_pulse_width_count( ns_pulse[i] & ~LEVEL_MASK, ir_state->txclk_divider); if (mark) fifo_pulse[j] &= FIFO_RXTX_LVL; i++; } kfifo_put(ir_state->tx_kfifo, (u8 *) fifo_pulse, j * sizeof(u32)); } *num = n * sizeof(u32); #else /* For now enable the Tx FIFO Service interrupt & pretend we did work */ irqenable_tx(sd, IRQEN_TSE); *num = count; #endif return 0; } static int cx25840_ir_tx_g_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx25840_ir_state *ir_state = to_ir_state(sd); if (ir_state == NULL) return -ENODEV; mutex_lock(&ir_state->tx_params_lock); memcpy(p, &ir_state->tx_params, sizeof(struct v4l2_subdev_ir_parameters)); mutex_unlock(&ir_state->tx_params_lock); return 0; } static int cx25840_ir_tx_shutdown(struct v4l2_subdev *sd) { struct cx25840_ir_state *ir_state = to_ir_state(sd); struct i2c_client *c; if (ir_state == NULL) return -ENODEV; c = ir_state->c; mutex_lock(&ir_state->tx_params_lock); /* Disable or slow down all IR Tx circuits and counters */ irqenable_tx(sd, 0); control_tx_enable(c, false); control_tx_modulation_enable(c, false); cx25840_write4(c, CX25840_IR_TXCLK_REG, TXCLK_TCD); ir_state->tx_params.shutdown = true; mutex_unlock(&ir_state->tx_params_lock); return 0; } static int cx25840_ir_tx_s_parameters(struct v4l2_subdev *sd, struct v4l2_subdev_ir_parameters *p) { struct cx25840_ir_state *ir_state = to_ir_state(sd); struct i2c_client *c; struct v4l2_subdev_ir_parameters *o; u16 txclk_divider; if (ir_state == NULL) return -ENODEV; if (p->shutdown) return cx25840_ir_tx_shutdown(sd); if (p->mode != V4L2_SUBDEV_IR_MODE_PULSE_WIDTH) return -ENOSYS; c = ir_state->c; o = &ir_state->tx_params; mutex_lock(&ir_state->tx_params_lock); o->shutdown = p->shutdown; p->mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH; o->mode = p->mode; p->bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec); o->bytes_per_data_element = p->bytes_per_data_element; /* Before we tweak the hardware, we have to disable the transmitter */ irqenable_tx(sd, 0); control_tx_enable(c, false); control_tx_modulation_enable(c, p->modulation); o->modulation = p->modulation; if (p->modulation) { p->carrier_freq = txclk_tx_s_carrier(c, p->carrier_freq, &txclk_divider); o->carrier_freq = p->carrier_freq; p->duty_cycle = cduty_tx_s_duty_cycle(c, p->duty_cycle); o->duty_cycle = p->duty_cycle; p->max_pulse_width = (u32) pulse_width_count_to_ns(FIFO_RXTX, txclk_divider); } else { p->max_pulse_width = txclk_tx_s_max_pulse_width(c, p->max_pulse_width, &txclk_divider); } o->max_pulse_width = p->max_pulse_width; atomic_set(&ir_state->txclk_divider, txclk_divider); p->resolution = clock_divider_to_resolution(txclk_divider); o->resolution = p->resolution; /* FIXME - make this dependent on resolution for better performance */ control_tx_irq_watermark(c, TX_FIFO_HALF_EMPTY); control_tx_polarity_invert(c, p->invert_carrier_sense); o->invert_carrier_sense = p->invert_carrier_sense; /* * FIXME: we don't have hardware help for IO pin level inversion * here like we have on the CX23888. * Act on this with some mix of logical inversion of data levels, * carrier polarity, and carrier duty cycle. */ o->invert_level = p->invert_level; o->interrupt_enable = p->interrupt_enable; o->enable = p->enable; if (p->enable) { /* reset tx_fifo here */ if (p->interrupt_enable) irqenable_tx(sd, IRQEN_TSE); control_tx_enable(c, p->enable); } mutex_unlock(&ir_state->tx_params_lock); return 0; } /* * V4L2 Subdevice Core Ops support */ int cx25840_ir_log_status(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); struct i2c_client *c = state->c; char *s; int i, j; u32 cntrl, txclk, rxclk, cduty, stats, irqen, filtr; /* The CX23888 chip doesn't have an IR controller on the A/V core */ if (is_cx23888(state)) return 0; cntrl = cx25840_read4(c, CX25840_IR_CNTRL_REG); txclk = cx25840_read4(c, CX25840_IR_TXCLK_REG) & TXCLK_TCD; rxclk = cx25840_read4(c, CX25840_IR_RXCLK_REG) & RXCLK_RCD; cduty = cx25840_read4(c, CX25840_IR_CDUTY_REG) & CDUTY_CDC; stats = cx25840_read4(c, CX25840_IR_STATS_REG); irqen = cx25840_read4(c, CX25840_IR_IRQEN_REG); if (is_cx23885(state) || is_cx23887(state)) irqen ^= IRQEN_MSK; filtr = cx25840_read4(c, CX25840_IR_FILTR_REG) & FILTR_LPF; v4l2_info(sd, "IR Receiver:\n"); v4l2_info(sd, "\tEnabled: %s\n", cntrl & CNTRL_RXE ? "yes" : "no"); v4l2_info(sd, "\tDemodulation from a carrier: %s\n", cntrl & CNTRL_DMD ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO: %s\n", cntrl & CNTRL_RFE ? "enabled" : "disabled"); switch (cntrl & CNTRL_EDG) { case CNTRL_EDG_NONE: s = "disabled"; break; case CNTRL_EDG_FALL: s = "falling edge"; break; case CNTRL_EDG_RISE: s = "rising edge"; break; case CNTRL_EDG_BOTH: s = "rising & falling edges"; break; default: s = "??? edge"; break; } v4l2_info(sd, "\tPulse timers' start/stop trigger: %s\n", s); v4l2_info(sd, "\tFIFO data on pulse timer overflow: %s\n", cntrl & CNTRL_R ? "not loaded" : "overflow marker"); v4l2_info(sd, "\tFIFO interrupt watermark: %s\n", cntrl & CNTRL_RIC ? "not empty" : "half full or greater"); v4l2_info(sd, "\tLoopback mode: %s\n", cntrl & CNTRL_LBM ? "loopback active" : "normal receive"); if (cntrl & CNTRL_DMD) { v4l2_info(sd, "\tExpected carrier (16 clocks): %u Hz\n", clock_divider_to_carrier_freq(rxclk)); switch (cntrl & CNTRL_WIN) { case CNTRL_WIN_3_3: i = 3; j = 3; break; case CNTRL_WIN_4_3: i = 4; j = 3; break; case CNTRL_WIN_3_4: i = 3; j = 4; break; case CNTRL_WIN_4_4: i = 4; j = 4; break; default: i = 0; j = 0; break; } v4l2_info(sd, "\tNext carrier edge window: 16 clocks -%1d/+%1d, %u to %u Hz\n", i, j, clock_divider_to_freq(rxclk, 16 + j), clock_divider_to_freq(rxclk, 16 - i)); } v4l2_info(sd, "\tMax measurable pulse width: %u us, %llu ns\n", pulse_width_count_to_us(FIFO_RXTX, rxclk), pulse_width_count_to_ns(FIFO_RXTX, rxclk)); v4l2_info(sd, "\tLow pass filter: %s\n", filtr ? "enabled" : "disabled"); if (filtr) v4l2_info(sd, "\tMin acceptable pulse width (LPF): %u us, %u ns\n", lpf_count_to_us(filtr), lpf_count_to_ns(filtr)); v4l2_info(sd, "\tPulse width timer timed-out: %s\n", stats & STATS_RTO ? "yes" : "no"); v4l2_info(sd, "\tPulse width timer time-out intr: %s\n", irqen & IRQEN_RTE ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO overrun: %s\n", stats & STATS_ROR ? "yes" : "no"); v4l2_info(sd, "\tFIFO overrun interrupt: %s\n", irqen & IRQEN_ROE ? "enabled" : "disabled"); v4l2_info(sd, "\tBusy: %s\n", stats & STATS_RBY ? "yes" : "no"); v4l2_info(sd, "\tFIFO service requested: %s\n", stats & STATS_RSR ? "yes" : "no"); v4l2_info(sd, "\tFIFO service request interrupt: %s\n", irqen & IRQEN_RSE ? "enabled" : "disabled"); v4l2_info(sd, "IR Transmitter:\n"); v4l2_info(sd, "\tEnabled: %s\n", cntrl & CNTRL_TXE ? "yes" : "no"); v4l2_info(sd, "\tModulation onto a carrier: %s\n", cntrl & CNTRL_MOD ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO: %s\n", cntrl & CNTRL_TFE ? "enabled" : "disabled"); v4l2_info(sd, "\tFIFO interrupt watermark: %s\n", cntrl & CNTRL_TIC ? "not empty" : "half full or less"); v4l2_info(sd, "\tCarrier polarity: %s\n", cntrl & CNTRL_CPL ? "space:burst mark:noburst" : "space:noburst mark:burst"); if (cntrl & CNTRL_MOD) { v4l2_info(sd, "\tCarrier (16 clocks): %u Hz\n", clock_divider_to_carrier_freq(txclk)); v4l2_info(sd, "\tCarrier duty cycle: %2u/16\n", cduty + 1); } v4l2_info(sd, "\tMax pulse width: %u us, %llu ns\n", pulse_width_count_to_us(FIFO_RXTX, txclk), pulse_width_count_to_ns(FIFO_RXTX, txclk)); v4l2_info(sd, "\tBusy: %s\n", stats & STATS_TBY ? "yes" : "no"); v4l2_info(sd, "\tFIFO service requested: %s\n", stats & STATS_TSR ? "yes" : "no"); v4l2_info(sd, "\tFIFO service request interrupt: %s\n", irqen & IRQEN_TSE ? "enabled" : "disabled"); return 0; } const struct v4l2_subdev_ir_ops cx25840_ir_ops = { .rx_read = cx25840_ir_rx_read, .rx_g_parameters = cx25840_ir_rx_g_parameters, .rx_s_parameters = cx25840_ir_rx_s_parameters, .tx_write = cx25840_ir_tx_write, .tx_g_parameters = cx25840_ir_tx_g_parameters, .tx_s_parameters = cx25840_ir_tx_s_parameters, }; static const struct v4l2_subdev_ir_parameters default_rx_params = { .bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec), .mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, .enable = false, .interrupt_enable = false, .shutdown = true, .modulation = true, .carrier_freq = 36000, /* 36 kHz - RC-5, and RC-6 carrier */ /* RC-5: 666,667 ns = 1/36 kHz * 32 cycles * 1 mark * 0.75 */ /* RC-6: 333,333 ns = 1/36 kHz * 16 cycles * 1 mark * 0.75 */ .noise_filter_min_width = 333333, /* ns */ .carrier_range_lower = 35000, .carrier_range_upper = 37000, .invert_level = false, }; static const struct v4l2_subdev_ir_parameters default_tx_params = { .bytes_per_data_element = sizeof(union cx25840_ir_fifo_rec), .mode = V4L2_SUBDEV_IR_MODE_PULSE_WIDTH, .enable = false, .interrupt_enable = false, .shutdown = true, .modulation = true, .carrier_freq = 36000, /* 36 kHz - RC-5 carrier */ .duty_cycle = 25, /* 25 % - RC-5 carrier */ .invert_level = false, .invert_carrier_sense = false, }; int cx25840_ir_probe(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); struct cx25840_ir_state *ir_state; struct v4l2_subdev_ir_parameters default_params; /* Only init the IR controller for the CX2388[57] AV Core for now */ if (!(is_cx23885(state) || is_cx23887(state))) return 0; ir_state = devm_kzalloc(&state->c->dev, sizeof(*ir_state), GFP_KERNEL); if (ir_state == NULL) return -ENOMEM; spin_lock_init(&ir_state->rx_kfifo_lock); if (kfifo_alloc(&ir_state->rx_kfifo, CX25840_IR_RX_KFIFO_SIZE, GFP_KERNEL)) return -ENOMEM; ir_state->c = state->c; state->ir_state = ir_state; /* Ensure no interrupts arrive yet */ if (is_cx23885(state) || is_cx23887(state)) cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, IRQEN_MSK); else cx25840_write4(ir_state->c, CX25840_IR_IRQEN_REG, 0); mutex_init(&ir_state->rx_params_lock); default_params = default_rx_params; v4l2_subdev_call(sd, ir, rx_s_parameters, &default_params); mutex_init(&ir_state->tx_params_lock); default_params = default_tx_params; v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); return 0; } int cx25840_ir_remove(struct v4l2_subdev *sd) { struct cx25840_state *state = to_state(sd); struct cx25840_ir_state *ir_state = to_ir_state(sd); if (ir_state == NULL) return -ENODEV; cx25840_ir_rx_shutdown(sd); cx25840_ir_tx_shutdown(sd); kfifo_free(&ir_state->rx_kfifo); state->ir_state = NULL; return 0; }
82 28 81 27 39 37 164 44 47 38 62 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 /* * net/tipc/addr.h: Include file for TIPC address utility routines * * Copyright (c) 2000-2006, 2018, Ericsson AB * Copyright (c) 2004-2005, Wind River Systems * Copyright (c) 2020-2021, Red Hat Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _TIPC_ADDR_H #define _TIPC_ADDR_H #include <linux/types.h> #include <linux/tipc.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "core.h" /* Struct tipc_uaddr: internal version of struct sockaddr_tipc. * Must be kept aligned both regarding field positions and size. */ struct tipc_uaddr { unsigned short family; unsigned char addrtype; signed char scope; union { struct { struct tipc_service_addr sa; u32 lookup_node; }; struct tipc_service_range sr; struct tipc_socket_addr sk; }; }; static inline void tipc_uaddr(struct tipc_uaddr *ua, u32 atype, u32 scope, u32 type, u32 lower, u32 upper) { ua->family = AF_TIPC; ua->addrtype = atype; ua->scope = scope; ua->sr.type = type; ua->sr.lower = lower; ua->sr.upper = upper; } static inline bool tipc_uaddr_valid(struct tipc_uaddr *ua, int len) { u32 atype; if (len < sizeof(struct sockaddr_tipc)) return false; atype = ua->addrtype; if (ua->family != AF_TIPC) return false; if (atype == TIPC_SERVICE_ADDR || atype == TIPC_SOCKET_ADDR) return true; if (atype == TIPC_SERVICE_RANGE) return ua->sr.upper >= ua->sr.lower; return false; } static inline u32 tipc_own_addr(struct net *net) { return tipc_net(net)->node_addr; } static inline u8 *tipc_own_id(struct net *net) { struct tipc_net *tn = tipc_net(net); if (!strlen(tn->node_id_string)) return NULL; return tn->node_id; } static inline char *tipc_own_id_string(struct net *net) { return tipc_net(net)->node_id_string; } static inline u32 tipc_cluster_mask(u32 addr) { return addr & TIPC_ZONE_CLUSTER_MASK; } static inline int tipc_node2scope(u32 node) { return node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE; } static inline int tipc_scope2node(struct net *net, int sc) { return sc != TIPC_NODE_SCOPE ? 0 : tipc_own_addr(net); } static inline int in_own_node(struct net *net, u32 addr) { return addr == tipc_own_addr(net) || !addr; } bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr); void tipc_set_node_id(struct net *net, u8 *id); void tipc_set_node_addr(struct net *net, u32 addr); char *tipc_nodeid2string(char *str, u8 *id); #endif
39786 881 881 842 762 1184 39631 39786 201 39642 103 1183 1183 1182 44 42264 39796 42223 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 /* SPDX-License-Identifier: GPL-2.0 */ /* * Latched RB-trees * * Copyright (C) 2015 Intel Corp., Peter Zijlstra <peterz@infradead.org> * * Since RB-trees have non-atomic modifications they're not immediately suited * for RCU/lockless queries. Even though we made RB-tree lookups non-fatal for * lockless lookups; we cannot guarantee they return a correct result. * * The simplest solution is a seqlock + RB-tree, this will allow lockless * lookups; but has the constraint (inherent to the seqlock) that read sides * cannot nest in write sides. * * If we need to allow unconditional lookups (say as required for NMI context * usage) we need a more complex setup; this data structure provides this by * employing the latch technique -- see @write_seqcount_latch_begin -- to * implement a latched RB-tree which does allow for unconditional lookups by * virtue of always having (at least) one stable copy of the tree. * * However, while we have the guarantee that there is at all times one stable * copy, this does not guarantee an iteration will not observe modifications. * What might have been a stable copy at the start of the iteration, need not * remain so for the duration of the iteration. * * Therefore, this does require a lockless RB-tree iteration to be non-fatal; * see the comment in lib/rbtree.c. Note however that we only require the first * condition -- not seeing partial stores -- because the latch thing isolates * us from loops. If we were to interrupt a modification the lookup would be * pointed at the stable tree and complete while the modification was halted. */ #ifndef RB_TREE_LATCH_H #define RB_TREE_LATCH_H #include <linux/rbtree.h> #include <linux/seqlock.h> #include <linux/rcupdate.h> struct latch_tree_node { struct rb_node node[2]; }; struct latch_tree_root { seqcount_latch_t seq; struct rb_root tree[2]; }; /** * latch_tree_ops - operators to define the tree order * @less: used for insertion; provides the (partial) order between two elements. * @comp: used for lookups; provides the order between the search key and an element. * * The operators are related like: * * comp(a->key,b) < 0 := less(a,b) * comp(a->key,b) > 0 := less(b,a) * comp(a->key,b) == 0 := !less(a,b) && !less(b,a) * * If these operators define a partial order on the elements we make no * guarantee on which of the elements matching the key is found. See * latch_tree_find(). */ struct latch_tree_ops { bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b); int (*comp)(void *key, struct latch_tree_node *b); }; static __always_inline struct latch_tree_node * __lt_from_rb(struct rb_node *node, int idx) { return container_of(node, struct latch_tree_node, node[idx]); } static __always_inline void __lt_insert(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx, bool (*less)(struct latch_tree_node *a, struct latch_tree_node *b)) { struct rb_root *root = &ltr->tree[idx]; struct rb_node **link = &root->rb_node; struct rb_node *node = &ltn->node[idx]; struct rb_node *parent = NULL; struct latch_tree_node *ltp; while (*link) { parent = *link; ltp = __lt_from_rb(parent, idx); if (less(ltn, ltp)) link = &parent->rb_left; else link = &parent->rb_right; } rb_link_node_rcu(node, parent, link); rb_insert_color(node, root); } static __always_inline void __lt_erase(struct latch_tree_node *ltn, struct latch_tree_root *ltr, int idx) { rb_erase(&ltn->node[idx], &ltr->tree[idx]); } static __always_inline struct latch_tree_node * __lt_find(void *key, struct latch_tree_root *ltr, int idx, int (*comp)(void *key, struct latch_tree_node *node)) { struct rb_node *node = rcu_dereference_raw(ltr->tree[idx].rb_node); struct latch_tree_node *ltn; int c; while (node) { ltn = __lt_from_rb(node, idx); c = comp(key, ltn); if (c < 0) node = rcu_dereference_raw(node->rb_left); else if (c > 0) node = rcu_dereference_raw(node->rb_right); else return ltn; } return NULL; } /** * latch_tree_insert() - insert @node into the trees @root * @node: nodes to insert * @root: trees to insert @node into * @ops: operators defining the node order * * It inserts @node into @root in an ordered fashion such that we can always * observe one complete tree. See the comment for write_seqcount_latch_begin(). * * The inserts use rcu_assign_pointer() to publish the element such that the * tree structure is stored before we can observe the new @node. * * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be * serialized. */ static __always_inline void latch_tree_insert(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { write_seqcount_latch_begin(&root->seq); __lt_insert(node, root, 0, ops->less); write_seqcount_latch(&root->seq); __lt_insert(node, root, 1, ops->less); write_seqcount_latch_end(&root->seq); } /** * latch_tree_erase() - removes @node from the trees @root * @node: nodes to remote * @root: trees to remove @node from * @ops: operators defining the node order * * Removes @node from the trees @root in an ordered fashion such that we can * always observe one complete tree. See the comment for * write_seqcount_latch_begin(). * * It is assumed that @node will observe one RCU quiescent state before being * reused of freed. * * All modifications (latch_tree_insert, latch_tree_remove) are assumed to be * serialized. */ static __always_inline void latch_tree_erase(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { write_seqcount_latch_begin(&root->seq); __lt_erase(node, root, 0); write_seqcount_latch(&root->seq); __lt_erase(node, root, 1); write_seqcount_latch_end(&root->seq); } /** * latch_tree_find() - find the node matching @key in the trees @root * @key: search key * @root: trees to search for @key * @ops: operators defining the node order * * Does a lockless lookup in the trees @root for the node matching @key. * * It is assumed that this is called while holding the appropriate RCU read * side lock. * * If the operators define a partial order on the elements (there are multiple * elements which have the same key value) it is undefined which of these * elements will be found. Nor is it possible to iterate the tree to find * further elements with the same key value. * * Returns: a pointer to the node matching @key or NULL. */ static __always_inline struct latch_tree_node * latch_tree_find(void *key, struct latch_tree_root *root, const struct latch_tree_ops *ops) { struct latch_tree_node *node; unsigned int seq; do { seq = read_seqcount_latch(&root->seq); node = __lt_find(key, root, seq & 1, ops->comp); } while (read_seqcount_latch_retry(&root->seq, seq)); return node; } #endif /* RB_TREE_LATCH_H */
15 15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 // SPDX-License-Identifier: GPL-2.0-only /* * fs/dax.c - Direct Access filesystem code * Copyright (c) 2013-2014 Intel Corporation * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> * Author: Ross Zwisler <ross.zwisler@linux.intel.com> */ #include <linux/atomic.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/memcontrol.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pagevec.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/uio.h> #include <linux/vmstat.h> #include <linux/pfn_t.h> #include <linux/sizes.h> #include <linux/mmu_notifier.h> #include <linux/iomap.h> #include <linux/rmap.h> #include <asm/pgalloc.h> #define CREATE_TRACE_POINTS #include <trace/events/fs_dax.h> /* We choose 4096 entries - same as per-zone page wait tables */ #define DAX_WAIT_TABLE_BITS 12 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) /* The 'colour' (ie low bits) within a PMD of a page offset. */ #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; static int __init init_dax_wait_table(void) { int i; for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) init_waitqueue_head(wait_table + i); return 0; } fs_initcall(init_dax_wait_table); /* * DAX pagecache entries use XArray value entries so they can't be mistaken * for pages. We use one bit for locking, one bit for the entry size (PMD) * and two more to tell us if the entry is a zero page or an empty entry that * is just used for locking. In total four special bits. * * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem * block allocation. */ #define DAX_SHIFT (4) #define DAX_LOCKED (1UL << 0) #define DAX_PMD (1UL << 1) #define DAX_ZERO_PAGE (1UL << 2) #define DAX_EMPTY (1UL << 3) static unsigned long dax_to_pfn(void *entry) { return xa_to_value(entry) >> DAX_SHIFT; } static struct folio *dax_to_folio(void *entry) { return page_folio(pfn_to_page(dax_to_pfn(entry))); } static void *dax_make_entry(pfn_t pfn, unsigned long flags) { return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); } static bool dax_is_locked(void *entry) { return xa_to_value(entry) & DAX_LOCKED; } static unsigned int dax_entry_order(void *entry) { if (xa_to_value(entry) & DAX_PMD) return PMD_ORDER; return 0; } static unsigned long dax_is_pmd_entry(void *entry) { return xa_to_value(entry) & DAX_PMD; } static bool dax_is_pte_entry(void *entry) { return !(xa_to_value(entry) & DAX_PMD); } static int dax_is_zero_entry(void *entry) { return xa_to_value(entry) & DAX_ZERO_PAGE; } static int dax_is_empty_entry(void *entry) { return xa_to_value(entry) & DAX_EMPTY; } /* * true if the entry that was found is of a smaller order than the entry * we were looking for */ static bool dax_is_conflict(void *entry) { return entry == XA_RETRY_ENTRY; } /* * DAX page cache entry locking */ struct exceptional_entry_key { struct xarray *xa; pgoff_t entry_start; }; struct wait_exceptional_entry_queue { wait_queue_entry_t wait; struct exceptional_entry_key key; }; /** * enum dax_wake_mode: waitqueue wakeup behaviour * @WAKE_ALL: wake all waiters in the waitqueue * @WAKE_NEXT: wake only the first waiter in the waitqueue */ enum dax_wake_mode { WAKE_ALL, WAKE_NEXT, }; static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, void *entry, struct exceptional_entry_key *key) { unsigned long hash; unsigned long index = xas->xa_index; /* * If 'entry' is a PMD, align the 'index' that we use for the wait * queue to the start of that PMD. This ensures that all offsets in * the range covered by the PMD map to the same bit lock. */ if (dax_is_pmd_entry(entry)) index &= ~PG_PMD_COLOUR; key->xa = xas->xa; key->entry_start = index; hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); return wait_table + hash; } static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, int sync, void *keyp) { struct exceptional_entry_key *key = keyp; struct wait_exceptional_entry_queue *ewait = container_of(wait, struct wait_exceptional_entry_queue, wait); if (key->xa != ewait->key.xa || key->entry_start != ewait->key.entry_start) return 0; return autoremove_wake_function(wait, mode, sync, NULL); } /* * @entry may no longer be the entry at the index in the mapping. * The important information it's conveying is whether the entry at * this index used to be a PMD entry. */ static void dax_wake_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) { struct exceptional_entry_key key; wait_queue_head_t *wq; wq = dax_entry_waitqueue(xas, entry, &key); /* * Checking for locked entry and prepare_to_wait_exclusive() happens * under the i_pages lock, ditto for entry handling in our callers. * So at this point all tasks that could have seen our entry locked * must be in the waitqueue and the following check will see them. */ if (waitqueue_active(wq)) __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); } /* * Look up entry in page cache, wait for it to become unlocked if it * is a DAX entry and return it. The caller must subsequently call * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() * if it did. The entry returned may have a larger order than @order. * If @order is larger than the order of the entry found in i_pages, this * function returns a dax_is_conflict entry. * * Must be called with the i_pages lock held. */ static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order) { void *entry; struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; for (;;) { entry = xas_find_conflict(xas); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) return entry; if (dax_entry_order(entry) < order) return XA_RETRY_ENTRY; if (!dax_is_locked(entry)) return entry; wq = dax_entry_waitqueue(xas, entry, &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); xas_reset(xas); schedule(); finish_wait(wq, &ewait.wait); xas_lock_irq(xas); } } /* * Wait for the given entry to become unlocked. Caller must hold the i_pages * lock and call either put_unlocked_entry() if it did not lock the entry or * dax_unlock_entry() if it did. Returns an unlocked entry if still present. */ static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry) { struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; while (unlikely(dax_is_locked(entry))) { wq = dax_entry_waitqueue(xas, entry, &ewait.key); prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_reset(xas); xas_unlock_irq(xas); schedule(); finish_wait(wq, &ewait.wait); xas_lock_irq(xas); entry = xas_load(xas); } if (xa_is_internal(entry)) return NULL; return entry; } /* * The only thing keeping the address space around is the i_pages lock * (it's cycled in clear_inode() after removing the entries from i_pages) * After we call xas_unlock_irq(), we cannot touch xas->xa. */ static void wait_entry_unlocked(struct xa_state *xas, void *entry) { struct wait_exceptional_entry_queue ewait; wait_queue_head_t *wq; init_wait(&ewait.wait); ewait.wait.func = wake_exceptional_entry_func; wq = dax_entry_waitqueue(xas, entry, &ewait.key); /* * Unlike get_next_unlocked_entry() there is no guarantee that this * path ever successfully retrieves an unlocked entry before an * inode dies. Perform a non-exclusive wait in case this path * never successfully performs its own wake up. */ prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); xas_unlock_irq(xas); schedule(); finish_wait(wq, &ewait.wait); } static void put_unlocked_entry(struct xa_state *xas, void *entry, enum dax_wake_mode mode) { if (entry && !dax_is_conflict(entry)) dax_wake_entry(xas, entry, mode); } /* * We used the xa_state to get the entry, but then we locked the entry and * dropped the xa_lock, so we know the xa_state is stale and must be reset * before use. */ static void dax_unlock_entry(struct xa_state *xas, void *entry) { void *old; BUG_ON(dax_is_locked(entry)); xas_reset(xas); xas_lock_irq(xas); old = xas_store(xas, entry); xas_unlock_irq(xas); BUG_ON(!dax_is_locked(old)); dax_wake_entry(xas, entry, WAKE_NEXT); } /* * Return: The entry stored at this location before it was locked. */ static void *dax_lock_entry(struct xa_state *xas, void *entry) { unsigned long v = xa_to_value(entry); return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); } static unsigned long dax_entry_size(void *entry) { if (dax_is_zero_entry(entry)) return 0; else if (dax_is_empty_entry(entry)) return 0; else if (dax_is_pmd_entry(entry)) return PMD_SIZE; else return PAGE_SIZE; } /* * A DAX folio is considered shared if it has no mapping set and ->share (which * shares the ->index field) is non-zero. Note this may return false even if the * page is shared between multiple files but has not yet actually been mapped * into multiple address spaces. */ static inline bool dax_folio_is_shared(struct folio *folio) { return !folio->mapping && folio->share; } /* * When it is called by dax_insert_entry(), the shared flag will indicate * whether this entry is shared by multiple files. If the page has not * previously been associated with any mappings the ->mapping and ->index * fields will be set. If it has already been associated with a mapping * the mapping will be cleared and the share count set. It's then up to * reverse map users like memory_failure() to call back into the filesystem to * recover ->mapping and ->index information. For example by implementing * dax_holder_operations. */ static void dax_folio_make_shared(struct folio *folio) { /* * folio is not currently shared so mark it as shared by clearing * folio->mapping. */ folio->mapping = NULL; /* * folio has previously been mapped into one address space so set the * share count. */ folio->share = 1; } static inline unsigned long dax_folio_put(struct folio *folio) { unsigned long ref; int order, i; if (!dax_folio_is_shared(folio)) ref = 0; else ref = --folio->share; if (ref) return ref; folio->mapping = NULL; order = folio_order(folio); if (!order) return 0; folio_reset_order(folio); for (i = 0; i < (1UL << order); i++) { struct dev_pagemap *pgmap = page_pgmap(&folio->page); struct page *page = folio_page(folio, i); struct folio *new_folio = (struct folio *)page; ClearPageHead(page); clear_compound_head(page); new_folio->mapping = NULL; /* * Reset pgmap which was over-written by * prep_compound_page(). */ new_folio->pgmap = pgmap; new_folio->share = 0; WARN_ON_ONCE(folio_ref_count(new_folio)); } return ref; } static void dax_folio_init(void *entry) { struct folio *folio = dax_to_folio(entry); int order = dax_entry_order(entry); /* * Folio should have been split back to order-0 pages in * dax_folio_put() when they were removed from their * final mapping. */ WARN_ON_ONCE(folio_order(folio)); if (order > 0) { prep_compound_page(&folio->page, order); if (order > 1) INIT_LIST_HEAD(&folio->_deferred_list); WARN_ON_ONCE(folio_ref_count(folio)); } } static void dax_associate_entry(void *entry, struct address_space *mapping, struct vm_area_struct *vma, unsigned long address, bool shared) { unsigned long size = dax_entry_size(entry), index; struct folio *folio = dax_to_folio(entry); if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) return; if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; index = linear_page_index(vma, address & ~(size - 1)); if (shared && (folio->mapping || dax_folio_is_shared(folio))) { if (folio->mapping) dax_folio_make_shared(folio); WARN_ON_ONCE(!folio->share); WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio)); folio->share++; } else { WARN_ON_ONCE(folio->mapping); dax_folio_init(entry); folio = dax_to_folio(entry); folio->mapping = mapping; folio->index = index; } } static void dax_disassociate_entry(void *entry, struct address_space *mapping, bool trunc) { struct folio *folio = dax_to_folio(entry); if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) return; dax_folio_put(folio); } static struct page *dax_busy_page(void *entry) { struct folio *folio = dax_to_folio(entry); if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) return NULL; if (folio_ref_count(folio) - folio_mapcount(folio)) return &folio->page; else return NULL; } /** * dax_lock_folio - Lock the DAX entry corresponding to a folio * @folio: The folio whose entry we want to lock * * Context: Process context. * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could * not be locked. */ dax_entry_t dax_lock_folio(struct folio *folio) { XA_STATE(xas, NULL, 0); void *entry; /* Ensure folio->mapping isn't freed while we look at it */ rcu_read_lock(); for (;;) { struct address_space *mapping = READ_ONCE(folio->mapping); entry = NULL; if (!mapping || !dax_mapping(mapping)) break; /* * In the device-dax case there's no need to lock, a * struct dev_pagemap pin is sufficient to keep the * inode alive, and we assume we have dev_pagemap pin * otherwise we would not have a valid pfn_to_page() * translation. */ entry = (void *)~0UL; if (S_ISCHR(mapping->host->i_mode)) break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); if (mapping != folio->mapping) { xas_unlock_irq(&xas); continue; } xas_set(&xas, folio->index); entry = xas_load(&xas); if (dax_is_locked(entry)) { rcu_read_unlock(); wait_entry_unlocked(&xas, entry); rcu_read_lock(); continue; } dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); break; } rcu_read_unlock(); return (dax_entry_t)entry; } void dax_unlock_folio(struct folio *folio, dax_entry_t cookie) { struct address_space *mapping = folio->mapping; XA_STATE(xas, &mapping->i_pages, folio->index); if (S_ISCHR(mapping->host->i_mode)) return; dax_unlock_entry(&xas, (void *)cookie); } /* * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping * @mapping: the file's mapping whose entry we want to lock * @index: the offset within this file * @page: output the dax page corresponding to this dax entry * * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry * could not be locked. */ dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, struct page **page) { XA_STATE(xas, NULL, 0); void *entry; rcu_read_lock(); for (;;) { entry = NULL; if (!dax_mapping(mapping)) break; xas.xa = &mapping->i_pages; xas_lock_irq(&xas); xas_set(&xas, index); entry = xas_load(&xas); if (dax_is_locked(entry)) { rcu_read_unlock(); wait_entry_unlocked(&xas, entry); rcu_read_lock(); continue; } if (!entry || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { /* * Because we are looking for entry from file's mapping * and index, so the entry may not be inserted for now, * or even a zero/empty entry. We don't think this is * an error case. So, return a special value and do * not output @page. */ entry = (void *)~0UL; } else { *page = pfn_to_page(dax_to_pfn(entry)); dax_lock_entry(&xas, entry); } xas_unlock_irq(&xas); break; } rcu_read_unlock(); return (dax_entry_t)entry; } void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, dax_entry_t cookie) { XA_STATE(xas, &mapping->i_pages, index); if (cookie == ~0UL) return; dax_unlock_entry(&xas, (void *)cookie); } /* * Find page cache entry at given index. If it is a DAX entry, return it * with the entry locked. If the page cache doesn't contain an entry at * that index, add a locked empty entry. * * When requesting an entry with size DAX_PMD, grab_mapping_entry() will * either return that locked entry or will return VM_FAULT_FALLBACK. * This will happen if there are any PTE entries within the PMD range * that we are requesting. * * We always favor PTE entries over PMD entries. There isn't a flow where we * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD * insertion will fail if it finds any PTE entries already in the tree, and a * PTE insertion will cause an existing PMD entry to be unmapped and * downgraded to PTE entries. This happens for both PMD zero pages as * well as PMD empty entries. * * The exception to this downgrade path is for PMD entries that have * real storage backing them. We will leave these real PMD entries in * the tree, and PTE writes will simply dirty the entire PMD entry. * * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For * persistent memory the benefit is doubtful. We can add that later if we can * show it helps. * * On error, this function does not return an ERR_PTR. Instead it returns * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values * overlap with xarray value entries. */ static void *grab_mapping_entry(struct xa_state *xas, struct address_space *mapping, unsigned int order) { unsigned long index = xas->xa_index; bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ void *entry; retry: pmd_downgrade = false; xas_lock_irq(xas); entry = get_next_unlocked_entry(xas, order); if (entry) { if (dax_is_conflict(entry)) goto fallback; if (!xa_is_value(entry)) { xas_set_err(xas, -EIO); goto out_unlock; } if (order == 0) { if (dax_is_pmd_entry(entry) && (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))) { pmd_downgrade = true; } } } if (pmd_downgrade) { /* * Make sure 'entry' remains valid while we drop * the i_pages lock. */ dax_lock_entry(xas, entry); /* * Besides huge zero pages the only other thing that gets * downgraded are empty entries which don't need to be * unmapped. */ if (dax_is_zero_entry(entry)) { xas_unlock_irq(xas); unmap_mapping_pages(mapping, xas->xa_index & ~PG_PMD_COLOUR, PG_PMD_NR, false); xas_reset(xas); xas_lock_irq(xas); } dax_disassociate_entry(entry, mapping, false); xas_store(xas, NULL); /* undo the PMD join */ dax_wake_entry(xas, entry, WAKE_ALL); mapping->nrpages -= PG_PMD_NR; entry = NULL; xas_set(xas, index); } if (entry) { dax_lock_entry(xas, entry); } else { unsigned long flags = DAX_EMPTY; if (order > 0) flags |= DAX_PMD; entry = dax_make_entry(pfn_to_pfn_t(0), flags); dax_lock_entry(xas, entry); if (xas_error(xas)) goto out_unlock; mapping->nrpages += 1UL << order; } out_unlock: xas_unlock_irq(xas); if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) goto retry; if (xas->xa_node == XA_ERROR(-ENOMEM)) return xa_mk_internal(VM_FAULT_OOM); if (xas_error(xas)) return xa_mk_internal(VM_FAULT_SIGBUS); return entry; fallback: xas_unlock_irq(xas); return xa_mk_internal(VM_FAULT_FALLBACK); } /** * dax_layout_busy_page_range - find first pinned page in @mapping * @mapping: address space to scan for a page with ref count > 1 * @start: Starting offset. Page containing 'start' is included. * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, * pages from 'start' till the end of file are included. * * DAX requires ZONE_DEVICE mapped pages. These pages are never * 'onlined' to the page allocator so they are considered idle when * page->count == 1. A filesystem uses this interface to determine if * any page in the mapping is busy, i.e. for DMA, or other * get_user_pages() usages. * * It is expected that the filesystem is holding locks to block the * establishment of new mappings in this address_space. I.e. it expects * to be able to run unmap_mapping_range() and subsequently not race * mapping_mapped() becoming true. */ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end) { void *entry; unsigned int scanned = 0; struct page *page = NULL; pgoff_t start_idx = start >> PAGE_SHIFT; pgoff_t end_idx; XA_STATE(xas, &mapping->i_pages, start_idx); /* * In the 'limited' case get_user_pages() for dax is disabled. */ if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return NULL; if (!dax_mapping(mapping)) return NULL; /* If end == LLONG_MAX, all pages from start to till end of file */ if (end == LLONG_MAX) end_idx = ULONG_MAX; else end_idx = end >> PAGE_SHIFT; /* * If we race get_user_pages_fast() here either we'll see the * elevated page count in the iteration and wait, or * get_user_pages_fast() will see that the page it took a reference * against is no longer mapped in the page tables and bail to the * get_user_pages() slow path. The slow path is protected by * pte_lock() and pmd_lock(). New references are not taken without * holding those locks, and unmap_mapping_pages() will not zero the * pte or pmd without holding the respective lock, so we are * guaranteed to either see new references or prevent new * references from being established. */ unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); xas_lock_irq(&xas); xas_for_each(&xas, entry, end_idx) { if (WARN_ON_ONCE(!xa_is_value(entry))) continue; entry = wait_entry_unlocked_exclusive(&xas, entry); if (entry) page = dax_busy_page(entry); put_unlocked_entry(&xas, entry, WAKE_NEXT); if (page) break; if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return page; } EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); struct page *dax_layout_busy_page(struct address_space *mapping) { return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_layout_busy_page); static int __dax_invalidate_entry(struct address_space *mapping, pgoff_t index, bool trunc) { XA_STATE(xas, &mapping->i_pages, index); int ret = 0; void *entry; xas_lock_irq(&xas); entry = get_next_unlocked_entry(&xas, 0); if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto out; if (!trunc && (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) goto out; dax_disassociate_entry(entry, mapping, trunc); xas_store(&xas, NULL); mapping->nrpages -= 1UL << dax_entry_order(entry); ret = 1; out: put_unlocked_entry(&xas, entry, WAKE_ALL); xas_unlock_irq(&xas); return ret; } static int __dax_clear_dirty_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { XA_STATE(xas, &mapping->i_pages, start); unsigned int scanned = 0; void *entry; xas_lock_irq(&xas); xas_for_each(&xas, entry, end) { entry = wait_entry_unlocked_exclusive(&xas, entry); if (!entry) continue; xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); put_unlocked_entry(&xas, entry, WAKE_NEXT); if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); return 0; } /* * Delete DAX entry at @index from @mapping. Wait for it * to be unlocked before deleting it. */ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) { int ret = __dax_invalidate_entry(mapping, index, true); /* * This gets called from truncate / punch_hole path. As such, the caller * must hold locks protecting against concurrent modifications of the * page cache (usually fs-private i_mmap_sem for writing). Since the * caller has seen a DAX entry for this index, we better find it * at that index as well... */ WARN_ON_ONCE(!ret); return ret; } void dax_delete_mapping_range(struct address_space *mapping, loff_t start, loff_t end) { void *entry; pgoff_t start_idx = start >> PAGE_SHIFT; pgoff_t end_idx; XA_STATE(xas, &mapping->i_pages, start_idx); /* If end == LLONG_MAX, all pages from start to till end of file */ if (end == LLONG_MAX) end_idx = ULONG_MAX; else end_idx = end >> PAGE_SHIFT; xas_lock_irq(&xas); xas_for_each(&xas, entry, end_idx) { if (!xa_is_value(entry)) continue; entry = wait_entry_unlocked_exclusive(&xas, entry); if (!entry) continue; dax_disassociate_entry(entry, mapping, true); xas_store(&xas, NULL); mapping->nrpages -= 1UL << dax_entry_order(entry); put_unlocked_entry(&xas, entry, WAKE_ALL); } xas_unlock_irq(&xas); } EXPORT_SYMBOL_GPL(dax_delete_mapping_range); static int wait_page_idle(struct page *page, void (cb)(struct inode *), struct inode *inode) { return ___wait_var_event(page, dax_page_is_idle(page), TASK_INTERRUPTIBLE, 0, 0, cb(inode)); } static void wait_page_idle_uninterruptible(struct page *page, struct inode *inode) { ___wait_var_event(page, dax_page_is_idle(page), TASK_UNINTERRUPTIBLE, 0, 0, schedule()); } /* * Unmaps the inode and waits for any DMA to complete prior to deleting the * DAX mapping entries for the range. * * For NOWAIT behavior, pass @cb as NULL to early-exit on first found * busy page */ int dax_break_layout(struct inode *inode, loff_t start, loff_t end, void (cb)(struct inode *)) { struct page *page; int error = 0; if (!dax_mapping(inode->i_mapping)) return 0; do { page = dax_layout_busy_page_range(inode->i_mapping, start, end); if (!page) break; if (!cb) { error = -ERESTARTSYS; break; } error = wait_page_idle(page, cb, inode); } while (error == 0); if (!page) dax_delete_mapping_range(inode->i_mapping, start, end); return error; } EXPORT_SYMBOL_GPL(dax_break_layout); void dax_break_layout_final(struct inode *inode) { struct page *page; if (!dax_mapping(inode->i_mapping)) return; do { page = dax_layout_busy_page_range(inode->i_mapping, 0, LLONG_MAX); if (!page) break; wait_page_idle_uninterruptible(page, inode); } while (true); if (!page) dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX); } EXPORT_SYMBOL_GPL(dax_break_layout_final); /* * Invalidate DAX entry if it is clean. */ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index) { return __dax_invalidate_entry(mapping, index, false); } static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) { return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); } static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) { pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); void *vto, *kaddr; long rc; int id; id = dax_read_lock(); rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; } vto = kmap_atomic(vmf->cow_page); copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); kunmap_atomic(vto); dax_read_unlock(id); return 0; } /* * MAP_SYNC on a dax mapping guarantees dirty metadata is * flushed on write-faults (non-cow), but not read-faults. */ static bool dax_fault_is_synchronous(const struct iomap_iter *iter, struct vm_area_struct *vma) { return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && (iter->iomap.flags & IOMAP_F_DIRTY); } /* * By this point grab_mapping_entry() has ensured that we have a locked entry * of the appropriate size so we don't have to worry about downgrading PMDs to * PTEs. If we happen to be trying to insert a PTE and there is a PMD * already in the tree, we will skip the insertion and just dirty the PMD as * appropriate. */ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void *entry, pfn_t pfn, unsigned long flags) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; void *new_entry = dax_make_entry(pfn, flags); bool write = iter->flags & IOMAP_WRITE; bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); bool shared = iter->iomap.flags & IOMAP_F_SHARED; if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { unsigned long index = xas->xa_index; /* we are replacing a zero page with block mapping */ if (dax_is_pmd_entry(entry)) unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, PG_PMD_NR, false); else /* pte entry */ unmap_mapping_pages(mapping, index, 1, false); } xas_reset(xas); xas_lock_irq(xas); if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { void *old; dax_disassociate_entry(entry, mapping, false); dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, shared); /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or * PMD entry is already in the cache, we leave it alone. This * means that if we are trying to insert a PTE and the * existing entry is a PMD, we will just leave the PMD in the * tree and dirty it if necessary. */ old = dax_lock_entry(xas, new_entry); WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | DAX_LOCKED)); entry = new_entry; } else { xas_load(xas); /* Walk the xa_state */ } if (dirty) xas_set_mark(xas, PAGECACHE_TAG_DIRTY); if (write && shared) xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); return entry; } static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, struct address_space *mapping, void *entry) { unsigned long pfn, index, count, end; long ret = 0; struct vm_area_struct *vma; /* * A page got tagged dirty in DAX mapping? Something is seriously * wrong. */ if (WARN_ON(!xa_is_value(entry))) return -EIO; if (unlikely(dax_is_locked(entry))) { void *old_entry = entry; entry = get_next_unlocked_entry(xas, 0); /* Entry got punched out / reallocated? */ if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) goto put_unlocked; /* * Entry got reallocated elsewhere? No need to writeback. * We have to compare pfns as we must not bail out due to * difference in lockbit or entry type. */ if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) goto put_unlocked; if (WARN_ON_ONCE(dax_is_empty_entry(entry) || dax_is_zero_entry(entry))) { ret = -EIO; goto put_unlocked; } /* Another fsync thread may have already done this entry */ if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) goto put_unlocked; } /* Lock the entry to serialize with page faults */ dax_lock_entry(xas, entry); /* * We can clear the tag now but we have to be careful so that concurrent * dax_writeback_one() calls for the same index cannot finish before we * actually flush the caches. This is achieved as the calls will look * at the entry only under the i_pages lock and once they do that * they will see the entry locked and wait for it to unlock. */ xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); /* * If dax_writeback_mapping_range() was given a wbc->range_start * in the middle of a PMD, the 'index' we use needs to be * aligned to the start of the PMD. * This allows us to flush for PMD_SIZE and not have to worry about * partial PMD writebacks. */ pfn = dax_to_pfn(entry); count = 1UL << dax_entry_order(entry); index = xas->xa_index & ~(count - 1); end = index + count - 1; /* Walk all mappings of a given index of a file and writeprotect them */ i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { pfn_mkclean_range(pfn, count, index, vma); cond_resched(); } i_mmap_unlock_read(mapping); dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); /* * After we have flushed the cache, we can clear the dirty tag. There * cannot be new dirty data in the pfn after the flush has completed as * the pfn mappings are writeprotected and fault waits for mapping * entry lock. */ xas_reset(xas); xas_lock_irq(xas); xas_store(xas, entry); xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); dax_wake_entry(xas, entry, WAKE_NEXT); trace_dax_writeback_one(mapping->host, index, count); return ret; put_unlocked: put_unlocked_entry(xas, entry, WAKE_NEXT); return ret; } /* * Flush the mapping to the persistent domain within the byte range of [start, * end]. This is required by data integrity operations to ensure file data is * on persistent storage prior to completion of the operation. */ int dax_writeback_mapping_range(struct address_space *mapping, struct dax_device *dax_dev, struct writeback_control *wbc) { XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); struct inode *inode = mapping->host; pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; void *entry; int ret = 0; unsigned int scanned = 0; if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) return -EIO; if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) return 0; trace_dax_writeback_range(inode, xas.xa_index, end_index); tag_pages_for_writeback(mapping, xas.xa_index, end_index); xas_lock_irq(&xas); xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { ret = dax_writeback_one(&xas, dax_dev, mapping, entry); if (ret < 0) { mapping_set_error(mapping, ret); break; } if (++scanned % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); trace_dax_writeback_range_done(inode, xas.xa_index, end_index); return ret; } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, size_t size, void **kaddr, pfn_t *pfnp) { pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id, rc = 0; long length; id = dax_read_lock(); length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, kaddr, pfnp); if (length < 0) { rc = length; goto out; } if (!pfnp) goto out_check_addr; rc = -EINVAL; if (PFN_PHYS(length) < size) goto out; if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) goto out; rc = 0; out_check_addr: if (!kaddr) goto out; if (!*kaddr) rc = -EFAULT; out: dax_read_unlock(id); return rc; } /** * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page * by copying the data before and after the range to be written. * @pos: address to do copy from. * @length: size of copy operation. * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) * @srcmap: iomap srcmap * @daddr: destination address to copy to. * * This can be called from two places. Either during DAX write fault (page * aligned), to copy the length size data to daddr. Or, while doing normal DAX * write operation, dax_iomap_iter() might call this to do the copy of either * start or end unaligned address. In the latter case the rest of the copy of * aligned ranges is taken care by dax_iomap_iter() itself. * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the * area to make sure no old data remains. */ static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, const struct iomap *srcmap, void *daddr) { loff_t head_off = pos & (align_size - 1); size_t size = ALIGN(head_off + length, align_size); loff_t end = pos + length; loff_t pg_end = round_up(end, align_size); /* copy_all is usually in page fault case */ bool copy_all = head_off == 0 && end == pg_end; /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ bool zero_edge = srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN; void *saddr = NULL; int ret = 0; if (!zero_edge) { ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); if (ret) return dax_mem2blk_err(ret); } if (copy_all) { if (zero_edge) memset(daddr, 0, size); else ret = copy_mc_to_kernel(daddr, saddr, length); goto out; } /* Copy the head part of the range */ if (head_off) { if (zero_edge) memset(daddr, 0, head_off); else { ret = copy_mc_to_kernel(daddr, saddr, head_off); if (ret) return -EIO; } } /* Copy the tail part of the range */ if (end < pg_end) { loff_t tail_off = head_off + length; loff_t tail_len = pg_end - end; if (zero_edge) memset(daddr + tail_off, 0, tail_len); else { ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off, tail_len); if (ret) return -EIO; } } out: if (zero_edge) dax_flush(srcmap->dax_dev, daddr, size); return ret ? -EIO : 0; } /* * The user has performed a load from a hole in the file. Allocating a new * page in the file would cause excessive storage usage for workloads with * sparse files. Instead we insert a read-only mapping of the 4k zero page. * If this page is ever written to we will re-fault and change the mapping to * point to real DAX storage instead. */ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { struct inode *inode = iter->inode; unsigned long vaddr = vmf->address; pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); vm_fault_t ret; *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false); trace_dax_load_hole(inode, vmf, ret); return ret; } #ifdef CONFIG_FS_DAX_PMD static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; unsigned long pmd_addr = vmf->address & PMD_MASK; struct vm_area_struct *vma = vmf->vma; struct inode *inode = mapping->host; pgtable_t pgtable = NULL; struct folio *zero_folio; spinlock_t *ptl; pmd_t pmd_entry; pfn_t pfn; zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm); if (unlikely(!zero_folio)) goto fallback; pfn = page_to_pfn_t(&zero_folio->page); *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_PMD | DAX_ZERO_PAGE); if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); if (!pgtable) return VM_FAULT_OOM; } ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); if (!pmd_none(*(vmf->pmd))) { spin_unlock(ptl); goto fallback; } if (pgtable) { pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); mm_inc_nr_ptes(vma->vm_mm); } pmd_entry = folio_mk_pmd(zero_folio, vmf->vma->vm_page_prot); set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); spin_unlock(ptl); trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry); return VM_FAULT_NOPAGE; fallback: if (pgtable) pte_free(vma->vm_mm, pgtable); trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry); return VM_FAULT_FALLBACK; } #else static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, const struct iomap_iter *iter, void **entry) { return VM_FAULT_FALLBACK; } #endif /* CONFIG_FS_DAX_PMD */ static int dax_unshare_iter(struct iomap_iter *iter) { struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t copy_pos = iter->pos; u64 copy_len = iomap_length(iter); u32 mod; int id = 0; s64 ret; void *daddr = NULL, *saddr = NULL; if (!iomap_want_unshare_iter(iter)) return iomap_iter_advance_full(iter); /* * Extend the file range to be aligned to fsblock/pagesize, because * we need to copy entire blocks, not just the byte range specified. * Invalidate the mapping because we're about to CoW. */ mod = offset_in_page(copy_pos); if (mod) { copy_len += mod; copy_pos -= mod; } mod = offset_in_page(copy_pos + copy_len); if (mod) copy_len += PAGE_SIZE - mod; invalidate_inode_pages2_range(iter->inode->i_mapping, copy_pos >> PAGE_SHIFT, (copy_pos + copy_len - 1) >> PAGE_SHIFT); id = dax_read_lock(); ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL); if (ret < 0) goto out_unlock; ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL); if (ret < 0) goto out_unlock; if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0) ret = -EIO; out_unlock: dax_read_unlock(id); if (ret < 0) return dax_mem2blk_err(ret); return iomap_iter_advance_full(iter); } int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, }; loff_t size = i_size_read(inode); int ret; if (pos < 0 || pos >= size) return 0; iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = dax_unshare_iter(&iter); return ret; } EXPORT_SYMBOL_GPL(dax_file_unshare); static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); unsigned offset = offset_in_page(pos); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); void *kaddr; long ret; ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL); if (ret < 0) return dax_mem2blk_err(ret); memset(kaddr + offset, 0, size); if (iomap->flags & IOMAP_F_SHARED) ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, kaddr); else dax_flush(iomap->dax_dev, kaddr + offset, size); return ret; } static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); u64 length = iomap_length(iter); int ret; /* already zeroed? we're done. */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return iomap_iter_advance(iter, &length); /* * invalidate the pages whose sharing state is to be changed * because of CoW. */ if (iomap->flags & IOMAP_F_SHARED) invalidate_inode_pages2_range(iter->inode->i_mapping, iter->pos >> PAGE_SHIFT, (iter->pos + length - 1) >> PAGE_SHIFT); do { loff_t pos = iter->pos; unsigned offset = offset_in_page(pos); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); int id; length = min_t(u64, PAGE_SIZE - offset, length); id = dax_read_lock(); if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE) ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1); else ret = dax_memzero(iter, pos, length); dax_read_unlock(id); if (ret < 0) return ret; ret = iomap_iter_advance(iter, &length); if (ret) return ret; } while (length > 0); if (did_zero) *did_zero = true; return ret; } int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_DAX | IOMAP_ZERO, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.status = dax_zero_iter(&iter, did_zero); return ret; } EXPORT_SYMBOL_GPL(dax_zero_range); int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) return 0; return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); } EXPORT_SYMBOL_GPL(dax_truncate_page); static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter) { const struct iomap *iomap = &iomi->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iomi); loff_t length = iomap_length(iomi); loff_t pos = iomi->pos; struct dax_device *dax_dev = iomap->dax_dev; loff_t end = pos + length, done = 0; bool write = iov_iter_rw(iter) == WRITE; bool cow = write && iomap->flags & IOMAP_F_SHARED; ssize_t ret = 0; size_t xfer; int id; if (!write) { end = min(end, i_size_read(iomi->inode)); if (pos >= end) return 0; if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) { done = iov_iter_zero(min(length, end - pos), iter); return iomap_iter_advance(iomi, &done); } } /* * In DAX mode, enforce either pure overwrites of written extents, or * writes to unwritten extents as part of a copy-on-write operation. */ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED))) return -EIO; /* * Write can allocate block for an area which has a hole page mapped * into page tables. We have to tear down these mappings so that data * written by write(2) is visible in mmap. */ if (iomap->flags & IOMAP_F_NEW || cow) { /* * Filesystem allows CoW on non-shared extents. The src extents * may have been mmapped with dirty mark before. To be able to * invalidate its dax entries, we need to clear the dirty mark * in advance. */ if (cow) __dax_clear_dirty_range(iomi->inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); invalidate_inode_pages2_range(iomi->inode->i_mapping, pos >> PAGE_SHIFT, (end - 1) >> PAGE_SHIFT); } id = dax_read_lock(); while ((pos = iomi->pos) < end) { unsigned offset = pos & (PAGE_SIZE - 1); const size_t size = ALIGN(length + offset, PAGE_SIZE); pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); ssize_t map_len; bool recovery = false; void *kaddr; if (fatal_signal_pending(current)) { ret = -EINTR; break; } map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, &kaddr, NULL); if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) { map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_RECOVERY_WRITE, &kaddr, NULL); if (map_len > 0) recovery = true; } if (map_len < 0) { ret = dax_mem2blk_err(map_len); break; } if (cow) { ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, srcmap, kaddr); if (ret) break; } map_len = PFN_PHYS(map_len); kaddr += offset; map_len -= offset; if (map_len > end - pos) map_len = end - pos; if (recovery) xfer = dax_recovery_write(dax_dev, pgoff, kaddr, map_len, iter); else if (write) xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, map_len, iter); else xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, map_len, iter); length = xfer; ret = iomap_iter_advance(iomi, &length); if (!ret && xfer == 0) ret = -EFAULT; if (xfer < map_len) break; } dax_read_unlock(id); return ret; } /** * dax_iomap_rw - Perform I/O to a DAX file * @iocb: The control block for this I/O * @iter: The addresses to do I/O from or to * @ops: iomap ops passed from the file system * * This function performs read and write operations to directly mapped * persistent memory. The callers needs to take care of read/write exclusion * and evicting any page cache pages in the region under I/O. */ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops) { struct iomap_iter iomi = { .inode = iocb->ki_filp->f_mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(iter), .flags = IOMAP_DAX, }; loff_t done = 0; int ret; if (!iomi.len) return 0; if (iov_iter_rw(iter) == WRITE) { lockdep_assert_held_write(&iomi.inode->i_rwsem); iomi.flags |= IOMAP_WRITE; } else { lockdep_assert_held(&iomi.inode->i_rwsem); } if (iocb->ki_flags & IOCB_NOWAIT) iomi.flags |= IOMAP_NOWAIT; while ((ret = iomap_iter(&iomi, ops)) > 0) iomi.status = dax_iomap_iter(&iomi, iter); done = iomi.pos - iocb->ki_pos; iocb->ki_pos = iomi.pos; return done ? done : ret; } EXPORT_SYMBOL_GPL(dax_iomap_rw); static vm_fault_t dax_fault_return(int error) { if (error == 0) return VM_FAULT_NOPAGE; return vmf_error(error); } /* * When handling a synchronous page fault and the inode need a fsync, we can * insert the PTE/PMD into page tables only after that fsync happened. Skip * insertion for now and return the pfn so that caller can insert it after the * fsync is done. */ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) { if (WARN_ON_ONCE(!pfnp)) return VM_FAULT_SIGBUS; *pfnp = pfn; return VM_FAULT_NEEDDSYNC; } static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, const struct iomap_iter *iter) { vm_fault_t ret; int error = 0; switch (iter->iomap.type) { case IOMAP_HOLE: case IOMAP_UNWRITTEN: clear_user_highpage(vmf->cow_page, vmf->address); break; case IOMAP_MAPPED: error = copy_cow_page_dax(vmf, iter); break; default: WARN_ON_ONCE(1); error = -EIO; break; } if (error) return dax_fault_return(error); __SetPageUptodate(vmf->cow_page); ret = finish_fault(vmf); if (!ret) return VM_FAULT_DONE_COW; return ret; } /** * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. * @vmf: vm fault instance * @iter: iomap iter * @pfnp: pfn to be returned * @xas: the dax mapping tree of a file * @entry: an unlocked dax entry to be inserted * @pmd: distinguish whether it is a pmd fault */ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, pfn_t *pfnp, struct xa_state *xas, void **entry, bool pmd) { const struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); size_t size = pmd ? PMD_SIZE : PAGE_SIZE; loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; struct folio *folio; int ret, err = 0; pfn_t pfn; void *kaddr; if (!pmd && vmf->cow_page) return dax_fault_cow_page(vmf, iter); /* if we are reading UNWRITTEN and HOLE, return a hole. */ if (!write && (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { if (!pmd) return dax_load_hole(xas, vmf, iter, entry); return dax_pmd_load_hole(xas, vmf, iter, entry); } if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { WARN_ON_ONCE(1); return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; } err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); if (write && iomap->flags & IOMAP_F_SHARED) { err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); if (err) return dax_fault_return(err); } folio = dax_to_folio(*entry); if (dax_fault_is_synchronous(iter, vmf->vma)) return dax_fault_synchronous_pfnp(pfnp, pfn); folio_ref_inc(folio); if (pmd) ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)), write); else ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write); folio_put(folio); return ret; } static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE(xas, &mapping->i_pages, vmf->pgoff); struct iomap_iter iter = { .inode = mapping->host, .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, .len = PAGE_SIZE, .flags = IOMAP_DAX | IOMAP_FAULT, }; vm_fault_t ret = 0; void *entry; int error; trace_dax_pte_fault(iter.inode, vmf, ret); /* * Check whether offset isn't beyond end of file now. Caller is supposed * to hold locks serializing us with truncate / punch hole so this is * a reliable test. */ if (iter.pos >= i_size_read(iter.inode)) { ret = VM_FAULT_SIGBUS; goto out; } if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) iter.flags |= IOMAP_WRITE; entry = grab_mapping_entry(&xas, mapping, 0); if (xa_is_internal(entry)) { ret = xa_to_internal(entry); goto out; } /* * It is possible, particularly with mixed reads & writes to private * mappings, that we have raced with a PMD fault that overlaps with * the PTE we need to set up. If so just return and the fault will be * retried. */ if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { ret = VM_FAULT_NOPAGE; goto unlock_entry; } while ((error = iomap_iter(&iter, ops)) > 0) { if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { iter.status = -EIO; /* fs corruption? */ continue; } ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); if (ret != VM_FAULT_SIGBUS && (iter.iomap.flags & IOMAP_F_NEW)) { count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); ret |= VM_FAULT_MAJOR; } if (!(ret & VM_FAULT_ERROR)) { u64 length = PAGE_SIZE; iter.status = iomap_iter_advance(&iter, &length); } } if (iomap_errp) *iomap_errp = error; if (!ret && error) ret = dax_fault_return(error); unlock_entry: dax_unlock_entry(&xas, entry); out: trace_dax_pte_fault_done(iter.inode, vmf, ret); return ret; } #ifdef CONFIG_FS_DAX_PMD static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, pgoff_t max_pgoff) { unsigned long pmd_addr = vmf->address & PMD_MASK; bool write = vmf->flags & FAULT_FLAG_WRITE; /* * Make sure that the faulting address's PMD offset (color) matches * the PMD offset from the start of the file. This is necessary so * that a PMD range in the page table overlaps exactly with a PMD * range in the page cache. */ if ((vmf->pgoff & PG_PMD_COLOUR) != ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) return true; /* Fall back to PTEs if we're going to COW */ if (write && !(vmf->vma->vm_flags & VM_SHARED)) return true; /* If the PMD would extend outside the VMA */ if (pmd_addr < vmf->vma->vm_start) return true; if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) return true; /* If the PMD would extend beyond the file size */ if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) return true; return false; } static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); struct iomap_iter iter = { .inode = mapping->host, .len = PMD_SIZE, .flags = IOMAP_DAX | IOMAP_FAULT, }; vm_fault_t ret = VM_FAULT_FALLBACK; pgoff_t max_pgoff; void *entry; if (vmf->flags & FAULT_FLAG_WRITE) iter.flags |= IOMAP_WRITE; /* * Check whether offset isn't beyond end of file now. Caller is * supposed to hold locks serializing us with truncate / punch hole so * this is a reliable test. */ max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); if (xas.xa_index >= max_pgoff) { ret = VM_FAULT_SIGBUS; goto out; } if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) goto fallback; /* * grab_mapping_entry() will make sure we get an empty PMD entry, * a zero PMD entry or a DAX PMD. If it can't (because a PTE * entry is already in the array, for instance), it will return * VM_FAULT_FALLBACK. */ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); if (xa_is_internal(entry)) { ret = xa_to_internal(entry); goto fallback; } /* * It is possible, particularly with mixed reads & writes to private * mappings, that we have raced with a PTE fault that overlaps with * the PMD we need to set up. If so just return and the fault will be * retried. */ if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && !pmd_devmap(*vmf->pmd)) { ret = 0; goto unlock_entry; } iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; while (iomap_iter(&iter, ops) > 0) { if (iomap_length(&iter) < PMD_SIZE) continue; /* actually breaks out of the loop */ ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); if (ret != VM_FAULT_FALLBACK) { u64 length = PMD_SIZE; iter.status = iomap_iter_advance(&iter, &length); } } unlock_entry: dax_unlock_entry(&xas, entry); fallback: if (ret == VM_FAULT_FALLBACK) { split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); count_vm_event(THP_FAULT_FALLBACK); } out: trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); return ret; } #else static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, const struct iomap_ops *ops) { return VM_FAULT_FALLBACK; } #endif /* CONFIG_FS_DAX_PMD */ /** * dax_iomap_fault - handle a page fault on a DAX file * @vmf: The description of the fault * @order: Order of the page to fault in * @pfnp: PFN to insert for synchronous faults if fsync is required * @iomap_errp: Storage for detailed error code in case of error * @ops: Iomap ops passed from the file system * * When a page fault occurs, filesystems may call this helper in * their fault handler for DAX files. dax_iomap_fault() assumes the caller * has done all the necessary locking for page fault to proceed * successfully. */ vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) { if (order == 0) return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); else if (order == PMD_ORDER) return dax_iomap_pmd_fault(vmf, pfnp, ops); else return VM_FAULT_FALLBACK; } EXPORT_SYMBOL_GPL(dax_iomap_fault); /* * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables * @vmf: The description of the fault * @pfn: PFN to insert * @order: Order of entry to insert. * * This function inserts a writeable PTE or PMD entry into the page tables * for an mmaped DAX file. It also marks the page cache entry as dirty. */ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); struct folio *folio; void *entry; vm_fault_t ret; xas_lock_irq(&xas); entry = get_next_unlocked_entry(&xas, order); /* Did we race with someone splitting entry or so? */ if (!entry || dax_is_conflict(entry) || (order == 0 && !dax_is_pte_entry(entry))) { put_unlocked_entry(&xas, entry, WAKE_NEXT); xas_unlock_irq(&xas); trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, VM_FAULT_NOPAGE); return VM_FAULT_NOPAGE; } xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); dax_lock_entry(&xas, entry); xas_unlock_irq(&xas); folio = pfn_folio(pfn_t_to_pfn(pfn)); folio_ref_inc(folio); if (order == 0) ret = vmf_insert_page_mkwrite(vmf, &folio->page, true); #ifdef CONFIG_FS_DAX_PMD else if (order == PMD_ORDER) ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE); #endif else ret = VM_FAULT_FALLBACK; folio_put(folio); dax_unlock_entry(&xas, entry); trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); return ret; } /** * dax_finish_sync_fault - finish synchronous page fault * @vmf: The description of the fault * @order: Order of entry to be inserted * @pfn: PFN to insert * * This function ensures that the file range touched by the page fault is * stored persistently on the media and handles inserting of appropriate page * table entry. */ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order, pfn_t pfn) { int err; loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; size_t len = PAGE_SIZE << order; err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); if (err) return VM_FAULT_SIGBUS; return dax_insert_pfn_mkwrite(vmf, pfn, order); } EXPORT_SYMBOL_GPL(dax_finish_sync_fault); static int dax_range_compare_iter(struct iomap_iter *it_src, struct iomap_iter *it_dest, u64 len, bool *same) { const struct iomap *smap = &it_src->iomap; const struct iomap *dmap = &it_dest->iomap; loff_t pos1 = it_src->pos, pos2 = it_dest->pos; u64 dest_len; void *saddr, *daddr; int id, ret; len = min(len, min(smap->length, dmap->length)); if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { *same = true; goto advance; } if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { *same = false; return 0; } id = dax_read_lock(); ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), &saddr, NULL); if (ret < 0) goto out_unlock; ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), &daddr, NULL); if (ret < 0) goto out_unlock; *same = !memcmp(saddr, daddr, len); if (!*same) len = 0; dax_read_unlock(id); advance: dest_len = len; ret = iomap_iter_advance(it_src, &len); if (!ret) ret = iomap_iter_advance(it_dest, &dest_len); return ret; out_unlock: dax_read_unlock(id); return -EIO; } int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dst, loff_t dstoff, loff_t len, bool *same, const struct iomap_ops *ops) { struct iomap_iter src_iter = { .inode = src, .pos = srcoff, .len = len, .flags = IOMAP_DAX, }; struct iomap_iter dst_iter = { .inode = dst, .pos = dstoff, .len = len, .flags = IOMAP_DAX, }; int ret, status; while ((ret = iomap_iter(&src_iter, ops)) > 0 && (ret = iomap_iter(&dst_iter, ops)) > 0) { status = dax_range_compare_iter(&src_iter, &dst_iter, min(src_iter.len, dst_iter.len), same); if (status < 0) return ret; src_iter.status = dst_iter.status = status; } return ret; } int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, const struct iomap_ops *ops) { return __generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags, ops); } EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);
6666 12770 10621 13634 15997 15997 16018 5856 5286 35 35 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_PGTABLE_DEFS_H #define _ASM_X86_PGTABLE_DEFS_H #include <linux/const.h> #include <linux/mem_encrypt.h> #include <asm/page_types.h> #define _PAGE_BIT_PRESENT 0 /* is present */ #define _PAGE_BIT_RW 1 /* writeable */ #define _PAGE_BIT_USER 2 /* userspace addressable */ #define _PAGE_BIT_PWT 3 /* page write through */ #define _PAGE_BIT_PCD 4 /* page cache disabled */ #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ #define _PAGE_BIT_PAT 7 /* on 4KB pages */ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_SOFTW1 9 /* available for programmer */ #define _PAGE_BIT_SOFTW2 10 /* " */ #define _PAGE_BIT_SOFTW3 11 /* " */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SOFTW4 57 /* available for programmer */ #define _PAGE_BIT_SOFTW5 58 /* available for programmer */ #define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */ #define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */ #define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */ #define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_KERNEL_4K _PAGE_BIT_SOFTW3 /* page must not be converted to large */ #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 #ifdef CONFIG_X86_64 #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */ #define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */ #else /* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */ #define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */ #define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */ #endif /* If _PAGE_BIT_PRESENT is clear, we use these: */ /* - if the user mapped it with PROT_NONE; pte_present gives true */ #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1) #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2) #define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3) #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) #define _PAGE_KERNEL_4K (_AT(pteval_t, 1) << _PAGE_BIT_KERNEL_4K) #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0) #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1) #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2) #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3) #else #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0)) #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0)) #endif #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \ _PAGE_PKEY_BIT1 | \ _PAGE_PKEY_BIT2 | \ _PAGE_PKEY_BIT3) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED) #else #define _PAGE_KNL_ERRATUM_MASK 0 #endif #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) #else #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #endif /* * Tracking soft dirty bit when a page goes to a swap is tricky. * We need a bit which can be stored in pte _and_ not conflict * with swap entry format. On x86 bits 1-4 are *not* involved * into swap entry computation, but bit 7 is used for thp migration, * so we borrow bit 1 for soft dirty tracking. * * Please note that this bit must be treated as swap dirty page * mark if and only if the PTE/PMD has present bit clear! */ #ifdef CONFIG_MEM_SOFT_DIRTY #define _PAGE_SWP_SOFT_DIRTY _PAGE_RW #else #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) #endif #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP #define _PAGE_UFFD_WP (_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP) #define _PAGE_SWP_UFFD_WP _PAGE_USER #else #define _PAGE_UFFD_WP (_AT(pteval_t, 0)) #define _PAGE_SWP_UFFD_WP (_AT(pteval_t, 0)) #endif #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4) #else #define _PAGE_NX (_AT(pteval_t, 0)) #define _PAGE_DEVMAP (_AT(pteval_t, 0)) #define _PAGE_SOFTW4 (_AT(pteval_t, 0)) #endif /* * The hardware requires shadow stack to be Write=0,Dirty=1. However, * there are valid cases where the kernel might create read-only PTEs that * are dirty (e.g., fork(), mprotect(), uffd-wp(), soft-dirty tracking). In * this case, the _PAGE_SAVED_DIRTY bit is used instead of the HW-dirty bit, * to avoid creating a wrong "shadow stack" PTEs. Such PTEs have * (Write=0,SavedDirty=1,Dirty=0) set. */ #define _PAGE_SAVED_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SAVED_DIRTY) #define _PAGE_DIRTY_BITS (_PAGE_DIRTY | _PAGE_SAVED_DIRTY) #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) #define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW) /* * Set of bits not changed in pte_modify. The pte's * protection key is treated like _PAGE_RW, for * instance, and is *not* included in this mask since * pte_modify() does modify it. */ #define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | \ _PAGE_DIRTY_BITS | _PAGE_SOFT_DIRTY | \ _PAGE_DEVMAP | _PAGE_CC | _PAGE_UFFD_WP) #define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT) #define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE) /* * The cache modes defined here are used to translate between pure SW usage * and the HW defined cache mode bits and/or PAT entries. * * The resulting bits for PWT, PCD and PAT should be chosen in a way * to have the WB mode at index 0 (all bits clear). This is the default * right now and likely would break too much if changed. */ #ifndef __ASSEMBLER__ enum page_cache_mode { _PAGE_CACHE_MODE_WB = 0, _PAGE_CACHE_MODE_WC = 1, _PAGE_CACHE_MODE_UC_MINUS = 2, _PAGE_CACHE_MODE_UC = 3, _PAGE_CACHE_MODE_WT = 4, _PAGE_CACHE_MODE_WP = 5, _PAGE_CACHE_MODE_NUM = 8 }; #endif #define _PAGE_CC (_AT(pteval_t, cc_get_mask())) #define _PAGE_ENC (_AT(pteval_t, sme_me_mask)) #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT) #define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE) #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC)) #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP)) #define __PP _PAGE_PRESENT #define __RW _PAGE_RW #define _USR _PAGE_USER #define ___A _PAGE_ACCESSED #define ___D _PAGE_DIRTY #define ___G _PAGE_GLOBAL #define __NX _PAGE_NX #define _ENC _PAGE_ENC #define __WP _PAGE_CACHE_WP #define __NC _PAGE_NOCACHE #define _PSE _PAGE_PSE #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) #define __pg(x) __pgprot(x) #define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G) #define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0) #define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0) #define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) #define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G) #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G) /* * Page tables needs to have Write=1 in order for any lower PTEs to be * writable. This includes shadow stack memory (Write=0, Dirty=1) */ #define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0) #define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC) #define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0) #define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC) #define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX| 0| 0|___G) #define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0| 0| 0|___G) #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G) #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G) #define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC) #define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX| 0| 0|___G) #define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G) #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G) #define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP) #define __PAGE_KERNEL_IO __PAGE_KERNEL #define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE #ifndef __ASSEMBLER__ #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC) #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC) #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0) #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0) #define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask) #define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC) #define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0) #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC) #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC) #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0) #define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC) #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC) #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC) #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC) #define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC) #define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO) #define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE) #endif /* __ASSEMBLER__ */ /* * early identity mapping pte attrib macros. */ #ifdef CONFIG_X86_64 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC #else #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ #endif #ifdef CONFIG_X86_32 # include <asm/pgtable_32_types.h> #else # include <asm/pgtable_64_types.h> #endif #ifndef __ASSEMBLER__ #include <linux/types.h> /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) /* * Extracts the flags from a (pte|pmd|pud|pgd)val_t * This includes the protection key value. */ #define PTE_FLAGS_MASK (~PTE_PFN_MASK) typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; typedef struct { pgdval_t pgd; } pgd_t; static inline pgprot_t pgprot_nx(pgprot_t prot) { return __pgprot(pgprot_val(prot) | _PAGE_NX); } #define pgprot_nx pgprot_nx #ifdef CONFIG_X86_PAE /* * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't * use it here. */ #define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK) #define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK) /* * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries. * All other bits are Reserved MBZ */ #define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \ _PAGE_PWT | _PAGE_PCD | \ _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3) #else /* No need to mask any bits for !PAE */ #define PGD_ALLOWED_BITS (~0ULL) #endif static inline pgd_t native_make_pgd(pgdval_t val) { return (pgd_t) { val & PGD_ALLOWED_BITS }; } static inline pgdval_t native_pgd_val(pgd_t pgd) { return pgd.pgd & PGD_ALLOWED_BITS; } static inline pgdval_t pgd_flags(pgd_t pgd) { return native_pgd_val(pgd) & PTE_FLAGS_MASK; } #if CONFIG_PGTABLE_LEVELS > 4 typedef struct { p4dval_t p4d; } p4d_t; static inline p4d_t native_make_p4d(pudval_t val) { return (p4d_t) { val }; } static inline p4dval_t native_p4d_val(p4d_t p4d) { return p4d.p4d; } #else #include <asm-generic/pgtable-nop4d.h> static inline p4d_t native_make_p4d(pudval_t val) { return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) }; } static inline p4dval_t native_p4d_val(p4d_t p4d) { return native_pgd_val(p4d.pgd); } #endif #if CONFIG_PGTABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) { return (pud_t) { val }; } static inline pudval_t native_pud_val(pud_t pud) { return pud.pud; } #else #include <asm-generic/pgtable-nopud.h> static inline pud_t native_make_pud(pudval_t val) { return (pud_t) { .p4d.pgd = native_make_pgd(val) }; } static inline pudval_t native_pud_val(pud_t pud) { return native_pgd_val(pud.p4d.pgd); } #endif #if CONFIG_PGTABLE_LEVELS > 2 static inline pmd_t native_make_pmd(pmdval_t val) { return (pmd_t) { .pmd = val }; } static inline pmdval_t native_pmd_val(pmd_t pmd) { return pmd.pmd; } #else #include <asm-generic/pgtable-nopmd.h> static inline pmd_t native_make_pmd(pmdval_t val) { return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; } static inline pmdval_t native_pmd_val(pmd_t pmd) { return native_pgd_val(pmd.pud.p4d.pgd); } #endif static inline p4dval_t p4d_pfn_mask(p4d_t p4d) { /* No 512 GiB huge pages yet */ return PTE_PFN_MASK; } static inline p4dval_t p4d_flags_mask(p4d_t p4d) { return ~p4d_pfn_mask(p4d); } static inline p4dval_t p4d_flags(p4d_t p4d) { return native_p4d_val(p4d) & p4d_flags_mask(p4d); } static inline pudval_t pud_pfn_mask(pud_t pud) { if (native_pud_val(pud) & _PAGE_PSE) return PHYSICAL_PUD_PAGE_MASK; else return PTE_PFN_MASK; } static inline pudval_t pud_flags_mask(pud_t pud) { return ~pud_pfn_mask(pud); } static inline pudval_t pud_flags(pud_t pud) { return native_pud_val(pud) & pud_flags_mask(pud); } static inline pmdval_t pmd_pfn_mask(pmd_t pmd) { if (native_pmd_val(pmd) & _PAGE_PSE) return PHYSICAL_PMD_PAGE_MASK; else return PTE_PFN_MASK; } static inline pmdval_t pmd_flags_mask(pmd_t pmd) { return ~pmd_pfn_mask(pmd); } static inline pmdval_t pmd_flags(pmd_t pmd) { return native_pmd_val(pmd) & pmd_flags_mask(pmd); } static inline pte_t native_make_pte(pteval_t val) { return (pte_t) { .pte = val }; } static inline pteval_t native_pte_val(pte_t pte) { return pte.pte; } static inline pteval_t pte_flags(pte_t pte) { return native_pte_val(pte) & PTE_FLAGS_MASK; } #define __pte2cm_idx(cb) \ ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \ (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \ (((cb) >> _PAGE_BIT_PWT) & 1)) #define __cm_idx2pte(i) \ ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \ (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \ (((i) & 1) << _PAGE_BIT_PWT)) unsigned long cachemode2protval(enum page_cache_mode pcm); static inline pgprotval_t protval_4k_2_large(pgprotval_t val) { return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); } static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) { return __pgprot(protval_4k_2_large(pgprot_val(pgprot))); } static inline pgprotval_t protval_large_2_4k(pgprotval_t val) { return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | ((val & _PAGE_PAT_LARGE) >> (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); } static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) { return __pgprot(protval_large_2_4k(pgprot_val(pgprot))); } typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; extern pteval_t __default_kernel_pte_mask; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); #define pgprot_writethrough pgprot_writethrough extern pgprot_t pgprot_writethrough(pgprot_t prot); /* Indicate that x86 has its own track and untrack pfn vma functions */ #define __HAVE_PFNMAP_TRACKING #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); /* Install a pte for a particular vaddr in kernel space. */ void set_pte_vaddr(unsigned long vaddr, pte_t pte); #ifdef CONFIG_X86_32 extern void native_pagetable_init(void); #else #define native_pagetable_init paging_init #endif enum pg_level { PG_LEVEL_NONE, PG_LEVEL_4K, PG_LEVEL_2M, PG_LEVEL_1G, PG_LEVEL_512G, PG_LEVEL_256T, PG_LEVEL_NUM }; #ifdef CONFIG_PROC_FS extern void update_page_count(int level, unsigned long pages); #else static inline void update_page_count(int level, unsigned long pages) { } #endif /* * Helper function that returns the kernel pagetable entry controlling * the virtual address 'address'. NULL means no pagetable entry present. * NOTE: the return type is pte_t but if the pmd is PSE then we return it * as a pte too. */ extern pte_t *lookup_address(unsigned long address, unsigned int *level); extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, unsigned int *level); pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, unsigned int *level, bool *nx, bool *rw); extern pmd_t *lookup_pmd_address(unsigned long address); extern phys_addr_t slow_virt_to_phys(void *__address); extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, unsigned numpages, unsigned long page_flags); extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, unsigned long numpages); #endif /* !__ASSEMBLER__ */ #endif /* _ASM_X86_PGTABLE_DEFS_H */
4 2 1 1 1 1 4 4 1 1 1 1 1 1 1 5 5 5 5 5 2 5 5 5 5 5 5 5 5 5 109 106 109 22 109 3 3 3 5 4 5 1 171 171 7 172 172 3 103 18 102 381 1602 1601 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 // SPDX-License-Identifier: GPL-2.0 #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/xarray.h> #include <net/busy_poll.h> #include <net/net_debug.h> #include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> #include <net/page_pool/types.h> #include <net/page_pool/memory_provider.h> #include <net/sock.h> #include "page_pool_priv.h" #include "netdev-genl-gen.h" static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1); /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev, * pool->user. * Ordering: inside rtnl_lock */ DEFINE_MUTEX(page_pools_lock); /* Page pools are only reachable from user space (via netlink) if they are * linked to a netdev at creation time. Following page pool "visibility" * states are possible: * - normal * - user.list: linked to real netdev, netdev: real netdev * - orphaned - real netdev has disappeared * - user.list: linked to lo, netdev: lo * - invisible - either (a) created without netdev linking, (b) unlisted due * to error, or (c) the entire namespace which owned this pool disappeared * - user.list: unhashed, netdev: unknown */ typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info); static int netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill) { struct page_pool *pool; struct sk_buff *rsp; int err; mutex_lock(&page_pools_lock); pool = xa_load(&page_pools, id); if (!pool || hlist_unhashed(&pool->user.list) || !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { err = -ENOENT; goto err_unlock; } rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) { err = -ENOMEM; goto err_unlock; } err = fill(rsp, pool, info); if (err) goto err_free_msg; mutex_unlock(&page_pools_lock); return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); err_unlock: mutex_unlock(&page_pools_lock); return err; } struct page_pool_dump_cb { unsigned long ifindex; u32 pp_id; }; static int netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb, pp_nl_fill_cb fill) { struct page_pool_dump_cb *state = (void *)cb->ctx; const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; struct page_pool *pool; int err = 0; rtnl_lock(); mutex_lock(&page_pools_lock); for_each_netdev_dump(net, netdev, state->ifindex) { hlist_for_each_entry(pool, &netdev->page_pools, user.list) { if (state->pp_id && state->pp_id < pool->user.id) continue; state->pp_id = pool->user.id; err = fill(skb, pool, info); if (err) goto out; } state->pp_id = 0; } out: mutex_unlock(&page_pools_lock); rtnl_unlock(); return err; } static int page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info) { #ifdef CONFIG_PAGE_POOL_STATS struct page_pool_stats stats = {}; struct nlattr *nest; void *hdr; if (!page_pool_get_stats(pool, &stats)) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO); if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) || (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX, pool->slow.netdev->ifindex))) goto err_cancel_nest; nla_nest_end(rsp, nest); if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST, stats.alloc_stats.fast) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW, stats.alloc_stats.slow) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER, stats.alloc_stats.slow_high_order) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY, stats.alloc_stats.empty) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL, stats.alloc_stats.refill) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE, stats.alloc_stats.waive) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED, stats.recycle_stats.cached) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL, stats.recycle_stats.cache_full) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING, stats.recycle_stats.ring) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL, stats.recycle_stats.ring_full) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT, stats.recycle_stats.released_refcnt)) goto err_cancel_msg; genlmsg_end(rsp, hdr); return 0; err_cancel_nest: nla_nest_cancel(rsp, nest); err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; #else GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS"); return -EOPNOTSUPP; #endif } int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)]; struct nlattr *nest; int err; u32 id; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO)) return -EINVAL; nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO]; err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest, netdev_page_pool_info_nl_policy, info->extack); if (err) return err; if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID)) return -EINVAL; if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NETDEV_A_PAGE_POOL_IFINDEX], "selecting by ifindex not supported"); return -EINVAL; } id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]); return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill); } int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill); } static int page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info) { size_t inflight, refsz; unsigned int napi_id; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id)) goto err_cancel; if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX, pool->slow.netdev->ifindex)) goto err_cancel; napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0; if (napi_id_valid(napi_id) && nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id)) goto err_cancel; inflight = page_pool_inflight(pool, false); refsz = PAGE_SIZE << pool->p.order; if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM, inflight * refsz)) goto err_cancel; if (pool->user.detach_time && nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME, pool->user.detach_time)) goto err_cancel; if (pool->mp_ops && pool->mp_ops->nl_fill(pool->mp_priv, rsp, NULL)) goto err_cancel; genlmsg_end(rsp, hdr); return 0; err_cancel: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd) { struct genl_info info; struct sk_buff *ntf; struct net *net; lockdep_assert_held(&page_pools_lock); /* 'invisible' page pools don't matter */ if (hlist_unhashed(&pool->user.list)) return; net = dev_net(pool->slow.netdev); if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (page_pool_nl_fill(ntf, pool, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, net, ntf, 0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL); } int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 id; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID)) return -EINVAL; id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]); return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill); } int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill); } int page_pool_list(struct page_pool *pool) { static u32 id_alloc_next; int err; mutex_lock(&page_pools_lock); err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b, &id_alloc_next, GFP_KERNEL); if (err < 0) goto err_unlock; INIT_HLIST_NODE(&pool->user.list); if (pool->slow.netdev) { hlist_add_head(&pool->user.list, &pool->slow.netdev->page_pools); netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF); } mutex_unlock(&page_pools_lock); return 0; err_unlock: mutex_unlock(&page_pools_lock); return err; } void page_pool_detached(struct page_pool *pool) { mutex_lock(&page_pools_lock); pool->user.detach_time = ktime_get_boottime_seconds(); netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); mutex_unlock(&page_pools_lock); } void page_pool_unlist(struct page_pool *pool) { mutex_lock(&page_pools_lock); netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF); xa_erase(&page_pools, pool->user.id); if (!hlist_unhashed(&pool->user.list)) hlist_del(&pool->user.list); mutex_unlock(&page_pools_lock); } int page_pool_check_memory_provider(struct net_device *dev, struct netdev_rx_queue *rxq) { void *binding = rxq->mp_params.mp_priv; struct page_pool *pool; struct hlist_node *n; if (!binding) return 0; mutex_lock(&page_pools_lock); hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { if (pool->mp_priv != binding) continue; if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) { mutex_unlock(&page_pools_lock); return 0; } } mutex_unlock(&page_pools_lock); return -ENODATA; } static void page_pool_unreg_netdev_wipe(struct net_device *netdev) { struct page_pool *pool; struct hlist_node *n; mutex_lock(&page_pools_lock); hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) { hlist_del_init(&pool->user.list); pool->slow.netdev = NET_PTR_POISON; } mutex_unlock(&page_pools_lock); } static void page_pool_unreg_netdev(struct net_device *netdev) { struct page_pool *pool, *last; struct net_device *lo; lo = dev_net(netdev)->loopback_dev; mutex_lock(&page_pools_lock); last = NULL; hlist_for_each_entry(pool, &netdev->page_pools, user.list) { pool->slow.netdev = lo; netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); last = pool; } if (last) hlist_splice_init(&netdev->page_pools, &last->user.list, &lo->page_pools); mutex_unlock(&page_pools_lock); } static int page_pool_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; if (hlist_empty(&netdev->page_pools)) return NOTIFY_OK; if (netdev->ifindex != LOOPBACK_IFINDEX) page_pool_unreg_netdev(netdev); else page_pool_unreg_netdev_wipe(netdev); return NOTIFY_OK; } static struct notifier_block page_pool_netdevice_nb = { .notifier_call = page_pool_netdevice_event, }; static int __init page_pool_user_init(void) { return register_netdevice_notifier(&page_pool_netdevice_nb); } subsys_initcall(page_pool_user_init);
19 17 17 17 15 13 13 13 5 13 7 6 11 6 10 5 7 6 5 5 3 2 9 5 9 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/msync.c * * Copyright (C) 1994-1999 Linus Torvalds */ /* * The msync() system call. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/syscalls.h> #include <linux/sched.h> /* * MS_SYNC syncs the entire file - including mappings. * * MS_ASYNC does not start I/O (it used to, up to 2.5.67). * Nor does it marks the relevant pages dirty (it used to up to 2.6.17). * Now it doesn't do anything, since dirty pages are properly tracked. * * The application may now run fsync() to * write out the dirty pages and wait on the writeout and check the result. * Or the application may run fadvise(FADV_DONTNEED) against the fd to start * async writeout immediately. * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to * applications. */ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) { unsigned long end; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int unmapped_error = 0; int error = -EINVAL; start = untagged_addr(start); if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC)) goto out; if (offset_in_page(start)) goto out; if ((flags & MS_ASYNC) && (flags & MS_SYNC)) goto out; error = -ENOMEM; len = (len + ~PAGE_MASK) & PAGE_MASK; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address ranges, * just ignore them, but return -ENOMEM at the end. Besides, if the * flag is MS_ASYNC (w/o MS_INVALIDATE) the result would be -ENOMEM * anyway and there is nothing left to do, so return immediately. */ mmap_read_lock(mm); vma = find_vma(mm, start); for (;;) { struct file *file; loff_t fstart, fend; /* Still start < end. */ error = -ENOMEM; if (!vma) goto out_unlock; /* Here start < vma->vm_end. */ if (start < vma->vm_start) { if (flags == MS_ASYNC) goto out_unlock; start = vma->vm_start; if (start >= end) goto out_unlock; unmapped_error = -ENOMEM; } /* Here vma->vm_start <= start < vma->vm_end. */ if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED)) { error = -EBUSY; goto out_unlock; } file = vma->vm_file; fstart = (start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); fend = fstart + (min(end, vma->vm_end) - start) - 1; start = vma->vm_end; if ((flags & MS_SYNC) && file && (vma->vm_flags & VM_SHARED)) { get_file(file); mmap_read_unlock(mm); error = vfs_fsync_range(file, fstart, fend, 1); fput(file); if (error || start >= end) goto out; mmap_read_lock(mm); vma = find_vma(mm, start); } else { if (start >= end) { error = 0; goto out_unlock; } vma = find_vma(mm, vma->vm_end); } } out_unlock: mmap_read_unlock(mm); out: return error ? : unmapped_error; }
7 34 26 26 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __CEPH_DECODE_H #define __CEPH_DECODE_H #include <linux/err.h> #include <linux/bug.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/unaligned.h> #include <linux/ceph/types.h> /* * in all cases, * void **p pointer to position pointer * void *end pointer to end of buffer (last byte + 1) */ static inline u64 ceph_decode_64(void **p) { u64 v = get_unaligned_le64(*p); *p += sizeof(u64); return v; } static inline u32 ceph_decode_32(void **p) { u32 v = get_unaligned_le32(*p); *p += sizeof(u32); return v; } static inline u16 ceph_decode_16(void **p) { u16 v = get_unaligned_le16(*p); *p += sizeof(u16); return v; } static inline u8 ceph_decode_8(void **p) { u8 v = *(u8 *)*p; (*p)++; return v; } static inline void ceph_decode_copy(void **p, void *pv, size_t n) { memcpy(pv, *p, n); *p += n; } /* * bounds check input. */ static inline bool ceph_has_room(void **p, void *end, size_t n) { return end >= *p && n <= end - *p; } #define ceph_decode_need(p, end, n, bad) \ do { \ if (!likely(ceph_has_room(p, end, n))) \ goto bad; \ } while (0) #define ceph_decode_64_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u64), bad); \ v = ceph_decode_64(p); \ } while (0) #define ceph_decode_32_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u32), bad); \ v = ceph_decode_32(p); \ } while (0) #define ceph_decode_16_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u16), bad); \ v = ceph_decode_16(p); \ } while (0) #define ceph_decode_8_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u8), bad); \ v = ceph_decode_8(p); \ } while (0) #define ceph_decode_copy_safe(p, end, pv, n, bad) \ do { \ ceph_decode_need(p, end, n, bad); \ ceph_decode_copy(p, pv, n); \ } while (0) /* * Allocate a buffer big enough to hold the wire-encoded string, and * decode the string into it. The resulting string will always be * terminated with '\0'. If successful, *p will be advanced * past the decoded data. Also, if lenp is not a null pointer, the * length (not including the terminating '\0') will be recorded in * *lenp. Note that a zero-length string is a valid return value. * * Returns a pointer to the newly-allocated string buffer, or a * pointer-coded errno if an error occurs. Neither *p nor *lenp * will have been updated if an error is returned. * * There are two possible failures: * - converting the string would require accessing memory at or * beyond the "end" pointer provided (-ERANGE) * - memory could not be allocated for the result (-ENOMEM) */ static inline char *ceph_extract_encoded_string(void **p, void *end, size_t *lenp, gfp_t gfp) { u32 len; void *sp = *p; char *buf; ceph_decode_32_safe(&sp, end, len, bad); if (!ceph_has_room(&sp, end, len)) goto bad; buf = kmalloc(len + 1, gfp); if (!buf) return ERR_PTR(-ENOMEM); if (len) memcpy(buf, sp, len); buf[len] = '\0'; *p = (char *) *p + sizeof (u32) + len; if (lenp) *lenp = (size_t) len; return buf; bad: return ERR_PTR(-ERANGE); } /* * skip helpers */ #define ceph_decode_skip_n(p, end, n, bad) \ do { \ ceph_decode_need(p, end, n, bad); \ *p += n; \ } while (0) #define ceph_decode_skip_64(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u64), bad) #define ceph_decode_skip_32(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u32), bad) #define ceph_decode_skip_16(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u16), bad) #define ceph_decode_skip_8(p, end, bad) \ ceph_decode_skip_n(p, end, sizeof(u8), bad) #define ceph_decode_skip_string(p, end, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ ceph_decode_skip_n(p, end, len, bad); \ } while (0) #define ceph_decode_skip_set(p, end, type, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ while (len--) \ ceph_decode_skip_##type(p, end, bad); \ } while (0) #define ceph_decode_skip_map(p, end, ktype, vtype, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ while (len--) { \ ceph_decode_skip_##ktype(p, end, bad); \ ceph_decode_skip_##vtype(p, end, bad); \ } \ } while (0) #define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \ do { \ u32 len; \ \ ceph_decode_32_safe(p, end, len, bad); \ while (len--) { \ ceph_decode_skip_##ktype1(p, end, bad); \ ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \ } \ } while (0) /* * struct ceph_timespec <-> struct timespec64 */ static inline void ceph_decode_timespec64(struct timespec64 *ts, const struct ceph_timespec *tv) { /* * This will still overflow in year 2106. We could extend * the protocol to steal two more bits from tv_nsec to * add three more 136 year epochs after that the way ext4 * does if necessary. */ ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec); ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); } static inline void ceph_encode_timespec64(struct ceph_timespec *tv, const struct timespec64 *ts) { tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); } /* * sockaddr_storage <-> ceph_sockaddr */ #define CEPH_ENTITY_ADDR_TYPE_NONE 0 #define CEPH_ENTITY_ADDR_TYPE_LEGACY __cpu_to_le32(1) #define CEPH_ENTITY_ADDR_TYPE_MSGR2 __cpu_to_le32(2) #define CEPH_ENTITY_ADDR_TYPE_ANY __cpu_to_le32(3) static inline void ceph_encode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = htons(a->in_addr.ss_family); a->in_addr.ss_family = *(__u16 *)&ss_family; /* Banner addresses require TYPE_NONE */ a->type = CEPH_ENTITY_ADDR_TYPE_NONE; } static inline void ceph_decode_banner_addr(struct ceph_entity_addr *a) { __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; a->in_addr.ss_family = ntohs(ss_family); WARN_ON(a->in_addr.ss_family == 512); a->type = CEPH_ENTITY_ADDR_TYPE_LEGACY; } extern int ceph_decode_entity_addr(void **p, void *end, struct ceph_entity_addr *addr); int ceph_decode_entity_addrvec(void **p, void *end, bool msgr2, struct ceph_entity_addr *addr); int ceph_entity_addr_encoding_len(const struct ceph_entity_addr *addr); void ceph_encode_entity_addr(void **p, const struct ceph_entity_addr *addr); /* * encoders */ static inline void ceph_encode_64(void **p, u64 v) { put_unaligned_le64(v, (__le64 *)*p); *p += sizeof(u64); } static inline void ceph_encode_32(void **p, u32 v) { put_unaligned_le32(v, (__le32 *)*p); *p += sizeof(u32); } static inline void ceph_encode_16(void **p, u16 v) { put_unaligned_le16(v, (__le16 *)*p); *p += sizeof(u16); } static inline void ceph_encode_8(void **p, u8 v) { *(u8 *)*p = v; (*p)++; } static inline void ceph_encode_copy(void **p, const void *s, int len) { memcpy(*p, s, len); *p += len; } /* * filepath, string encoders */ static inline void ceph_encode_filepath(void **p, void *end, u64 ino, const char *path) { u32 len = path ? strlen(path) : 0; BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end); ceph_encode_8(p, 1); ceph_encode_64(p, ino); ceph_encode_32(p, len); if (len) memcpy(*p, path, len); *p += len; } static inline void ceph_encode_string(void **p, void *end, const char *s, u32 len) { BUG_ON(*p + sizeof(len) + len > end); ceph_encode_32(p, len); if (len) memcpy(*p, s, len); *p += len; } /* * version and length starting block encoders/decoders */ /* current code version (u8) + compat code version (u8) + len of struct (u32) */ #define CEPH_ENCODING_START_BLK_LEN 6 /** * ceph_start_encoding - start encoding block * @struct_v: current (code) version of the encoding * @struct_compat: oldest code version that can decode it * @struct_len: length of struct encoding */ static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat, u32 struct_len) { ceph_encode_8(p, struct_v); ceph_encode_8(p, struct_compat); ceph_encode_32(p, struct_len); } /** * ceph_start_decoding - start decoding block * @v: current version of the encoding that the code supports * @name: name of the struct (free-form) * @struct_v: out param for the encoding version * @struct_len: out param for the length of struct encoding * * Validates the length of struct encoding, so unsafe ceph_decode_* * variants can be used for decoding. */ static inline int ceph_start_decoding(void **p, void *end, u8 v, const char *name, u8 *struct_v, u32 *struct_len) { u8 struct_compat; ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad); *struct_v = ceph_decode_8(p); struct_compat = ceph_decode_8(p); if (v < struct_compat) { pr_warn("got struct_v %d struct_compat %d > %d of %s\n", *struct_v, struct_compat, v, name); return -EINVAL; } *struct_len = ceph_decode_32(p); ceph_decode_need(p, end, *struct_len, bad); return 0; bad: return -ERANGE; } #define ceph_encode_need(p, end, n, bad) \ do { \ if (!likely(ceph_has_room(p, end, n))) \ goto bad; \ } while (0) #define ceph_encode_64_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u64), bad); \ ceph_encode_64(p, v); \ } while (0) #define ceph_encode_32_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u32), bad); \ ceph_encode_32(p, v); \ } while (0) #define ceph_encode_16_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u16), bad); \ ceph_encode_16(p, v); \ } while (0) #define ceph_encode_8_safe(p, end, v, bad) \ do { \ ceph_encode_need(p, end, sizeof(u8), bad); \ ceph_encode_8(p, v); \ } while (0) #define ceph_encode_copy_safe(p, end, pv, n, bad) \ do { \ ceph_encode_need(p, end, n, bad); \ ceph_encode_copy(p, pv, n); \ } while (0) #define ceph_encode_string_safe(p, end, s, n, bad) \ do { \ ceph_encode_need(p, end, n, bad); \ ceph_encode_string(p, end, s, n); \ } while (0) #endif
2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 61 61 61 61 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * This file implements the various access functions for the * PROC file system. It is mainly used for debugging and * statistics. * * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Gerald J. Heim, <heim@peanuts.informatik.uni-tuebingen.de> * Fred Baumgarten, <dc6iq@insu1.etec.uni-karlsruhe.de> * Erik Schoenfelder, <schoenfr@ibr.cs.tu-bs.de> * * Fixes: * Alan Cox : UDP sockets show the rxqueue/txqueue * using hint flag for the netinfo. * Pauline Middelink : identd support * Alan Cox : Make /proc safer. * Erik Schoenfelder : /proc/net/snmp * Alan Cox : Handle dead sockets properly. * Gerhard Koerting : Show both timers * Alan Cox : Allow inode to be NULL (kernel socket) * Andi Kleen : Add support for open_requests and * split functions for more readibility. * Andi Kleen : Add support for /proc/net/netstat * Arnaldo C. Melo : Convert to seq_file */ #include <linux/types.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/mptcp.h> #include <net/proto_memory.h> #include <net/udp.h> #include <net/udplite.h> #include <linux/bottom_half.h> #include <linux/inetdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/export.h> #include <net/sock.h> #include <net/raw.h> #define TCPUDP_MIB_MAX MAX_T(u32, UDP_MIB_MAX, TCP_MIB_MAX) /* * Report socket allocation statistics [mea@utu.fi] */ static int sockstat_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; int orphans, sockets; orphans = tcp_orphan_count_sum(); sockets = proto_sockets_allocated_sum_positive(&tcp_prot); socket_seq_show(seq); seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", sock_prot_inuse_get(net, &tcp_prot), orphans, refcount_read(&net->ipv4.tcp_death_row.tw_refcount) - 1, sockets, proto_memory_allocated(&tcp_prot)); seq_printf(seq, "UDP: inuse %d mem %ld\n", sock_prot_inuse_get(net, &udp_prot), proto_memory_allocated(&udp_prot)); seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(net, &udplite_prot)); seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(net, &raw_prot)); seq_printf(seq, "FRAG: inuse %u memory %lu\n", atomic_read(&net->ipv4.fqdir->rhashtable.nelems), frag_mem_limit(net->ipv4.fqdir)); return 0; } /* snmp items */ static const struct snmp_mib snmp4_ipstats_list[] = { SNMP_MIB_ITEM("InReceives", IPSTATS_MIB_INPKTS), SNMP_MIB_ITEM("InHdrErrors", IPSTATS_MIB_INHDRERRORS), SNMP_MIB_ITEM("InAddrErrors", IPSTATS_MIB_INADDRERRORS), SNMP_MIB_ITEM("ForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), SNMP_MIB_ITEM("InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS), SNMP_MIB_ITEM("InDiscards", IPSTATS_MIB_INDISCARDS), SNMP_MIB_ITEM("InDelivers", IPSTATS_MIB_INDELIVERS), SNMP_MIB_ITEM("OutRequests", IPSTATS_MIB_OUTREQUESTS), SNMP_MIB_ITEM("OutDiscards", IPSTATS_MIB_OUTDISCARDS), SNMP_MIB_ITEM("OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), SNMP_MIB_ITEM("ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), SNMP_MIB_ITEM("ReasmReqds", IPSTATS_MIB_REASMREQDS), SNMP_MIB_ITEM("ReasmOKs", IPSTATS_MIB_REASMOKS), SNMP_MIB_ITEM("ReasmFails", IPSTATS_MIB_REASMFAILS), SNMP_MIB_ITEM("FragOKs", IPSTATS_MIB_FRAGOKS), SNMP_MIB_ITEM("FragFails", IPSTATS_MIB_FRAGFAILS), SNMP_MIB_ITEM("FragCreates", IPSTATS_MIB_FRAGCREATES), SNMP_MIB_ITEM("OutTransmits", IPSTATS_MIB_OUTPKTS), SNMP_MIB_SENTINEL }; /* Following items are displayed in /proc/net/netstat */ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES), SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS), SNMP_MIB_ITEM("InMcastPkts", IPSTATS_MIB_INMCASTPKTS), SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS), SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS), SNMP_MIB_ITEM("InOctets", IPSTATS_MIB_INOCTETS), SNMP_MIB_ITEM("OutOctets", IPSTATS_MIB_OUTOCTETS), SNMP_MIB_ITEM("InMcastOctets", IPSTATS_MIB_INMCASTOCTETS), SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS), SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), /* Non RFC4293 fields */ SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS), SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS), SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS), SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS), SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS), SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS), SNMP_MIB_SENTINEL }; static const struct { const char *name; int index; } icmpmibmap[] = { { "DestUnreachs", ICMP_DEST_UNREACH }, { "TimeExcds", ICMP_TIME_EXCEEDED }, { "ParmProbs", ICMP_PARAMETERPROB }, { "SrcQuenchs", ICMP_SOURCE_QUENCH }, { "Redirects", ICMP_REDIRECT }, { "Echos", ICMP_ECHO }, { "EchoReps", ICMP_ECHOREPLY }, { "Timestamps", ICMP_TIMESTAMP }, { "TimestampReps", ICMP_TIMESTAMPREPLY }, { "AddrMasks", ICMP_ADDRESS }, { "AddrMaskReps", ICMP_ADDRESSREPLY }, { NULL, 0 } }; static const struct snmp_mib snmp4_tcp_list[] = { SNMP_MIB_ITEM("RtoAlgorithm", TCP_MIB_RTOALGORITHM), SNMP_MIB_ITEM("RtoMin", TCP_MIB_RTOMIN), SNMP_MIB_ITEM("RtoMax", TCP_MIB_RTOMAX), SNMP_MIB_ITEM("MaxConn", TCP_MIB_MAXCONN), SNMP_MIB_ITEM("ActiveOpens", TCP_MIB_ACTIVEOPENS), SNMP_MIB_ITEM("PassiveOpens", TCP_MIB_PASSIVEOPENS), SNMP_MIB_ITEM("AttemptFails", TCP_MIB_ATTEMPTFAILS), SNMP_MIB_ITEM("EstabResets", TCP_MIB_ESTABRESETS), SNMP_MIB_ITEM("CurrEstab", TCP_MIB_CURRESTAB), SNMP_MIB_ITEM("InSegs", TCP_MIB_INSEGS), SNMP_MIB_ITEM("OutSegs", TCP_MIB_OUTSEGS), SNMP_MIB_ITEM("RetransSegs", TCP_MIB_RETRANSSEGS), SNMP_MIB_ITEM("InErrs", TCP_MIB_INERRS), SNMP_MIB_ITEM("OutRsts", TCP_MIB_OUTRSTS), SNMP_MIB_ITEM("InCsumErrors", TCP_MIB_CSUMERRORS), SNMP_MIB_SENTINEL }; static const struct snmp_mib snmp4_udp_list[] = { SNMP_MIB_ITEM("InDatagrams", UDP_MIB_INDATAGRAMS), SNMP_MIB_ITEM("NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("OutDatagrams", UDP_MIB_OUTDATAGRAMS), SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS), SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_ITEM("MemErrors", UDP_MIB_MEMERRORS), SNMP_MIB_SENTINEL }; static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("SyncookiesSent", LINUX_MIB_SYNCOOKIESSENT), SNMP_MIB_ITEM("SyncookiesRecv", LINUX_MIB_SYNCOOKIESRECV), SNMP_MIB_ITEM("SyncookiesFailed", LINUX_MIB_SYNCOOKIESFAILED), SNMP_MIB_ITEM("EmbryonicRsts", LINUX_MIB_EMBRYONICRSTS), SNMP_MIB_ITEM("PruneCalled", LINUX_MIB_PRUNECALLED), SNMP_MIB_ITEM("RcvPruned", LINUX_MIB_RCVPRUNED), SNMP_MIB_ITEM("OfoPruned", LINUX_MIB_OFOPRUNED), SNMP_MIB_ITEM("OutOfWindowIcmps", LINUX_MIB_OUTOFWINDOWICMPS), SNMP_MIB_ITEM("LockDroppedIcmps", LINUX_MIB_LOCKDROPPEDICMPS), SNMP_MIB_ITEM("ArpFilter", LINUX_MIB_ARPFILTER), SNMP_MIB_ITEM("TW", LINUX_MIB_TIMEWAITED), SNMP_MIB_ITEM("TWRecycled", LINUX_MIB_TIMEWAITRECYCLED), SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED), SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED), SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED), SNMP_MIB_ITEM("TSEcrRejected", LINUX_MIB_TSECRREJECTED), SNMP_MIB_ITEM("PAWSOldAck", LINUX_MIB_PAWS_OLD_ACK), SNMP_MIB_ITEM("PAWSTimewait", LINUX_MIB_PAWS_TW_REJECTED), SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS), SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED), SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST), SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS), SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS), SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS), SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS), SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS), SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY), SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY), SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING), SNMP_MIB_ITEM("TCPSACKReorder", LINUX_MIB_TCPSACKREORDER), SNMP_MIB_ITEM("TCPRenoReorder", LINUX_MIB_TCPRENOREORDER), SNMP_MIB_ITEM("TCPTSReorder", LINUX_MIB_TCPTSREORDER), SNMP_MIB_ITEM("TCPFullUndo", LINUX_MIB_TCPFULLUNDO), SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES), SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS), SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS), SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS), SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES), SNMP_MIB_ITEM("TCPLossProbeRecovery", LINUX_MIB_TCPLOSSPROBERECOVERY), SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL), SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL), SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED), SNMP_MIB_ITEM("TCPBacklogCoalesce", LINUX_MIB_TCPBACKLOGCOALESCE), SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT), SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT), SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV), SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV), SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA), SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE), SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY), SNMP_MIB_ITEM("TCPAbortOnTimeout", LINUX_MIB_TCPABORTONTIMEOUT), SNMP_MIB_ITEM("TCPAbortOnLinger", LINUX_MIB_TCPABORTONLINGER), SNMP_MIB_ITEM("TCPAbortFailed", LINUX_MIB_TCPABORTFAILED), SNMP_MIB_ITEM("TCPMemoryPressures", LINUX_MIB_TCPMEMORYPRESSURES), SNMP_MIB_ITEM("TCPMemoryPressuresChrono", LINUX_MIB_TCPMEMORYPRESSURESCHRONO), SNMP_MIB_ITEM("TCPSACKDiscard", LINUX_MIB_TCPSACKDISCARD), SNMP_MIB_ITEM("TCPDSACKIgnoredOld", LINUX_MIB_TCPDSACKIGNOREDOLD), SNMP_MIB_ITEM("TCPDSACKIgnoredNoUndo", LINUX_MIB_TCPDSACKIGNOREDNOUNDO), SNMP_MIB_ITEM("TCPSpuriousRTOs", LINUX_MIB_TCPSPURIOUSRTOS), SNMP_MIB_ITEM("TCPMD5NotFound", LINUX_MIB_TCPMD5NOTFOUND), SNMP_MIB_ITEM("TCPMD5Unexpected", LINUX_MIB_TCPMD5UNEXPECTED), SNMP_MIB_ITEM("TCPMD5Failure", LINUX_MIB_TCPMD5FAILURE), SNMP_MIB_ITEM("TCPSackShifted", LINUX_MIB_SACKSHIFTED), SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED), SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), SNMP_MIB_ITEM("PFMemallocDrop", LINUX_MIB_PFMEMALLOCDROP), SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL), SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE), SNMP_MIB_ITEM("TCPOFOQueue", LINUX_MIB_TCPOFOQUEUE), SNMP_MIB_ITEM("TCPOFODrop", LINUX_MIB_TCPOFODROP), SNMP_MIB_ITEM("TCPOFOMerge", LINUX_MIB_TCPOFOMERGE), SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK), SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE), SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE), SNMP_MIB_ITEM("TCPFastOpenActiveFail", LINUX_MIB_TCPFASTOPENACTIVEFAIL), SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE), SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL), SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), SNMP_MIB_ITEM("TCPFastOpenBlackhole", LINUX_MIB_TCPFASTOPENBLACKHOLE), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING), SNMP_MIB_ITEM("TCPFromZeroWindowAdv", LINUX_MIB_TCPFROMZEROWINDOWADV), SNMP_MIB_ITEM("TCPToZeroWindowAdv", LINUX_MIB_TCPTOZEROWINDOWADV), SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV), SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS), SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT), SNMP_MIB_ITEM("TCPHystartTrainDetect", LINUX_MIB_TCPHYSTARTTRAINDETECT), SNMP_MIB_ITEM("TCPHystartTrainCwnd", LINUX_MIB_TCPHYSTARTTRAINCWND), SNMP_MIB_ITEM("TCPHystartDelayDetect", LINUX_MIB_TCPHYSTARTDELAYDETECT), SNMP_MIB_ITEM("TCPHystartDelayCwnd", LINUX_MIB_TCPHYSTARTDELAYCWND), SNMP_MIB_ITEM("TCPACKSkippedSynRecv", LINUX_MIB_TCPACKSKIPPEDSYNRECV), SNMP_MIB_ITEM("TCPACKSkippedPAWS", LINUX_MIB_TCPACKSKIPPEDPAWS), SNMP_MIB_ITEM("TCPACKSkippedSeq", LINUX_MIB_TCPACKSKIPPEDSEQ), SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2), SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT), SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE), SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE), SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE), SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL), SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS), SNMP_MIB_ITEM("TCPDelivered", LINUX_MIB_TCPDELIVERED), SNMP_MIB_ITEM("TCPDeliveredCE", LINUX_MIB_TCPDELIVEREDCE), SNMP_MIB_ITEM("TCPAckCompressed", LINUX_MIB_TCPACKCOMPRESSED), SNMP_MIB_ITEM("TCPZeroWindowDrop", LINUX_MIB_TCPZEROWINDOWDROP), SNMP_MIB_ITEM("TCPRcvQDrop", LINUX_MIB_TCPRCVQDROP), SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG), SNMP_MIB_ITEM("TCPFastOpenPassiveAltKey", LINUX_MIB_TCPFASTOPENPASSIVEALTKEY), SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH), SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH), SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS), SNMP_MIB_ITEM("TCPDSACKIgnoredDubious", LINUX_MIB_TCPDSACKIGNOREDDUBIOUS), SNMP_MIB_ITEM("TCPMigrateReqSuccess", LINUX_MIB_TCPMIGRATEREQSUCCESS), SNMP_MIB_ITEM("TCPMigrateReqFailure", LINUX_MIB_TCPMIGRATEREQFAILURE), SNMP_MIB_ITEM("TCPPLBRehash", LINUX_MIB_TCPPLBREHASH), SNMP_MIB_ITEM("TCPAORequired", LINUX_MIB_TCPAOREQUIRED), SNMP_MIB_ITEM("TCPAOBad", LINUX_MIB_TCPAOBAD), SNMP_MIB_ITEM("TCPAOKeyNotFound", LINUX_MIB_TCPAOKEYNOTFOUND), SNMP_MIB_ITEM("TCPAOGood", LINUX_MIB_TCPAOGOOD), SNMP_MIB_ITEM("TCPAODroppedIcmps", LINUX_MIB_TCPAODROPPEDICMPS), SNMP_MIB_SENTINEL }; static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals, unsigned short *type, int count) { int j; if (count) { seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %sType%u", type[j] & 0x100 ? "Out" : "In", type[j] & 0xff); seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %lu", vals[j]); } } static void icmpmsg_put(struct seq_file *seq) { #define PERLINE 16 int i, count; unsigned short type[PERLINE]; unsigned long vals[PERLINE], val; struct net *net = seq->private; count = 0; for (i = 0; i < ICMPMSG_MIB_MAX; i++) { val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]); if (val) { type[count] = i; vals[count++] = val; } if (count == PERLINE) { icmpmsg_put_line(seq, vals, type, count); count = 0; } } icmpmsg_put_line(seq, vals, type, count); #undef PERLINE } static void icmp_put(struct seq_file *seq) { int i; struct net *net = seq->private; atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs; seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors"); for (i = 0; icmpmibmap[i].name; i++) seq_printf(seq, " In%s", icmpmibmap[i].name); seq_puts(seq, " OutMsgs OutErrors OutRateLimitGlobal OutRateLimitHost"); for (i = 0; icmpmibmap[i].name; i++) seq_printf(seq, " Out%s", icmpmibmap[i].name); seq_printf(seq, "\nIcmp: %lu %lu %lu", snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INMSGS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_INERRORS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_CSUMERRORS)); for (i = 0; icmpmibmap[i].name; i++) seq_printf(seq, " %lu", atomic_long_read(ptr + icmpmibmap[i].index)); seq_printf(seq, " %lu %lu %lu %lu", snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_OUTERRORS), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_RATELIMITGLOBAL), snmp_fold_field(net->mib.icmp_statistics, ICMP_MIB_RATELIMITHOST)); for (i = 0; icmpmibmap[i].name; i++) seq_printf(seq, " %lu", atomic_long_read(ptr + (icmpmibmap[i].index | 0x100))); } /* * Called from the PROCfs module. This outputs /proc/net/snmp. */ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v) { struct net *net = seq->private; u64 buff64[IPSTATS_MIB_MAX]; int i; memset(buff64, 0, IPSTATS_MIB_MAX * sizeof(u64)); seq_puts(seq, "Ip: Forwarding DefaultTTL"); for (i = 0; snmp4_ipstats_list[i].name; i++) seq_printf(seq, " %s", snmp4_ipstats_list[i].name); seq_printf(seq, "\nIp: %d %d", IPV4_DEVCONF_ALL_RO(net, FORWARDING) ? 1 : 2, READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list, net->mib.ip_statistics, offsetof(struct ipstats_mib, syncp)); for (i = 0; snmp4_ipstats_list[i].name; i++) seq_printf(seq, " %llu", buff64[i]); return 0; } static int snmp_seq_show_tcp_udp(struct seq_file *seq, void *v) { unsigned long buff[TCPUDP_MIB_MAX]; struct net *net = seq->private; int i; memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long)); seq_puts(seq, "\nTcp:"); for (i = 0; snmp4_tcp_list[i].name; i++) seq_printf(seq, " %s", snmp4_tcp_list[i].name); seq_puts(seq, "\nTcp:"); snmp_get_cpu_field_batch(buff, snmp4_tcp_list, net->mib.tcp_statistics); for (i = 0; snmp4_tcp_list[i].name; i++) { /* MaxConn field is signed, RFC 2012 */ if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN) seq_printf(seq, " %ld", buff[i]); else seq_printf(seq, " %lu", buff[i]); } memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long)); snmp_get_cpu_field_batch(buff, snmp4_udp_list, net->mib.udp_statistics); seq_puts(seq, "\nUdp:"); for (i = 0; snmp4_udp_list[i].name; i++) seq_printf(seq, " %s", snmp4_udp_list[i].name); seq_puts(seq, "\nUdp:"); for (i = 0; snmp4_udp_list[i].name; i++) seq_printf(seq, " %lu", buff[i]); memset(buff, 0, TCPUDP_MIB_MAX * sizeof(unsigned long)); /* the UDP and UDP-Lite MIBs are the same */ seq_puts(seq, "\nUdpLite:"); snmp_get_cpu_field_batch(buff, snmp4_udp_list, net->mib.udplite_statistics); for (i = 0; snmp4_udp_list[i].name; i++) seq_printf(seq, " %s", snmp4_udp_list[i].name); seq_puts(seq, "\nUdpLite:"); for (i = 0; snmp4_udp_list[i].name; i++) seq_printf(seq, " %lu", buff[i]); seq_putc(seq, '\n'); return 0; } static int snmp_seq_show(struct seq_file *seq, void *v) { snmp_seq_show_ipstats(seq, v); icmp_put(seq); /* RFC 2011 compatibility */ icmpmsg_put(seq); snmp_seq_show_tcp_udp(seq, v); return 0; } /* * Output /proc/net/netstat */ static int netstat_seq_show(struct seq_file *seq, void *v) { const int ip_cnt = ARRAY_SIZE(snmp4_ipextstats_list) - 1; const int tcp_cnt = ARRAY_SIZE(snmp4_net_list) - 1; struct net *net = seq->private; unsigned long *buff; int i; seq_puts(seq, "TcpExt:"); for (i = 0; i < tcp_cnt; i++) seq_printf(seq, " %s", snmp4_net_list[i].name); seq_puts(seq, "\nTcpExt:"); buff = kzalloc(max(tcp_cnt * sizeof(long), ip_cnt * sizeof(u64)), GFP_KERNEL); if (buff) { snmp_get_cpu_field_batch(buff, snmp4_net_list, net->mib.net_statistics); for (i = 0; i < tcp_cnt; i++) seq_printf(seq, " %lu", buff[i]); } else { for (i = 0; i < tcp_cnt; i++) seq_printf(seq, " %lu", snmp_fold_field(net->mib.net_statistics, snmp4_net_list[i].entry)); } seq_puts(seq, "\nIpExt:"); for (i = 0; i < ip_cnt; i++) seq_printf(seq, " %s", snmp4_ipextstats_list[i].name); seq_puts(seq, "\nIpExt:"); if (buff) { u64 *buff64 = (u64 *)buff; memset(buff64, 0, ip_cnt * sizeof(u64)); snmp_get_cpu_field64_batch(buff64, snmp4_ipextstats_list, net->mib.ip_statistics, offsetof(struct ipstats_mib, syncp)); for (i = 0; i < ip_cnt; i++) seq_printf(seq, " %llu", buff64[i]); } else { for (i = 0; i < ip_cnt; i++) seq_printf(seq, " %llu", snmp_fold_field64(net->mib.ip_statistics, snmp4_ipextstats_list[i].entry, offsetof(struct ipstats_mib, syncp))); } kfree(buff); seq_putc(seq, '\n'); mptcp_seq_show(seq); return 0; } static __net_init int ip_proc_init_net(struct net *net) { if (!proc_create_net_single("sockstat", 0444, net->proc_net, sockstat_seq_show, NULL)) goto out_sockstat; if (!proc_create_net_single("netstat", 0444, net->proc_net, netstat_seq_show, NULL)) goto out_netstat; if (!proc_create_net_single("snmp", 0444, net->proc_net, snmp_seq_show, NULL)) goto out_snmp; return 0; out_snmp: remove_proc_entry("netstat", net->proc_net); out_netstat: remove_proc_entry("sockstat", net->proc_net); out_sockstat: return -ENOMEM; } static __net_exit void ip_proc_exit_net(struct net *net) { remove_proc_entry("snmp", net->proc_net); remove_proc_entry("netstat", net->proc_net); remove_proc_entry("sockstat", net->proc_net); } static __net_initdata struct pernet_operations ip_proc_ops = { .init = ip_proc_init_net, .exit = ip_proc_exit_net, }; int __init ip_misc_proc_init(void) { return register_pernet_subsys(&ip_proc_ops); }
2 3 3 1 1 1 1 1 1 1 1 2 2 2 2 2 1 2 2 1 2 2 2 1 1 1 1 6 6 6 6 6 6 6 6 6 2 1 5 5 5 5 5 5 2 2 2 8 2 2 2 1 10 10 10 10 2 2 8 1 1 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 // SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * * Copyright (C) 2014 Marvell International Ltd. * Copyright (C) 2011 Texas Instruments, Inc. * * Written by Ilan Elias <ilane@ti.com> * * Acknowledgements: * This file is based on hci_event.c, which was written * by Maxim Krasnyansky. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> /* Handle NCI Notification packets */ static void nci_core_reset_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { /* Handle NCI 2.x core reset notification */ const struct nci_core_reset_ntf *ntf = (void *)skb->data; ndev->nci_ver = ntf->nci_ver; pr_debug("nci_ver 0x%x, config_status 0x%x\n", ntf->nci_ver, ntf->config_status); ndev->manufact_id = ntf->manufact_id; ndev->manufact_specific_info = __le32_to_cpu(ntf->manufact_specific_info); nci_req_complete(ndev, NCI_STATUS_OK); } static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_conn_credit_ntf *ntf = (void *) skb->data; struct nci_conn_info *conn_info; int i; pr_debug("num_entries %d\n", ntf->num_entries); if (ntf->num_entries > NCI_MAX_NUM_CONN) ntf->num_entries = NCI_MAX_NUM_CONN; /* update the credits */ for (i = 0; i < ntf->num_entries; i++) { ntf->conn_entries[i].conn_id = nci_conn_id(&ntf->conn_entries[i].conn_id); pr_debug("entry[%d]: conn_id %d, credits %d\n", i, ntf->conn_entries[i].conn_id, ntf->conn_entries[i].credits); conn_info = nci_get_conn_info_by_conn_id(ndev, ntf->conn_entries[i].conn_id); if (!conn_info) return; atomic_add(ntf->conn_entries[i].credits, &conn_info->credits_cnt); } /* trigger the next tx */ if (!skb_queue_empty(&ndev->tx_q)) queue_work(ndev->tx_wq, &ndev->tx_work); } static void nci_core_generic_error_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { __u8 status = skb->data[0]; pr_debug("status 0x%x\n", status); if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { /* Activation failed, so complete the request (the state remains the same) */ nci_req_complete(ndev, status); } } static void nci_core_conn_intf_error_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_core_intf_error_ntf *ntf = (void *) skb->data; ntf->conn_id = nci_conn_id(&ntf->conn_id); pr_debug("status 0x%x, conn_id %d\n", ntf->status, ntf->conn_id); /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, ntf->conn_id, -EIO); } static const __u8 * nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfca_poll *nfca_poll, const __u8 *data) { nfca_poll->sens_res = __le16_to_cpu(*((__le16 *)data)); data += 2; nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE); pr_debug("sens_res 0x%x, nfcid1_len %d\n", nfca_poll->sens_res, nfca_poll->nfcid1_len); memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len); data += nfca_poll->nfcid1_len; nfca_poll->sel_res_len = *data++; if (nfca_poll->sel_res_len != 0) nfca_poll->sel_res = *data++; pr_debug("sel_res_len %d, sel_res 0x%x\n", nfca_poll->sel_res_len, nfca_poll->sel_res); return data; } static const __u8 * nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcb_poll *nfcb_poll, const __u8 *data) { nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE); pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len); memcpy(nfcb_poll->sensb_res, data, nfcb_poll->sensb_res_len); data += nfcb_poll->sensb_res_len; return data; } static const __u8 * nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcf_poll *nfcf_poll, const __u8 *data) { nfcf_poll->bit_rate = *data++; nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE); pr_debug("bit_rate %d, sensf_res_len %d\n", nfcf_poll->bit_rate, nfcf_poll->sensf_res_len); memcpy(nfcf_poll->sensf_res, data, nfcf_poll->sensf_res_len); data += nfcf_poll->sensf_res_len; return data; } static const __u8 * nci_extract_rf_params_nfcv_passive_poll(struct nci_dev *ndev, struct rf_tech_specific_params_nfcv_poll *nfcv_poll, const __u8 *data) { ++data; nfcv_poll->dsfid = *data++; memcpy(nfcv_poll->uid, data, NFC_ISO15693_UID_MAXSIZE); data += NFC_ISO15693_UID_MAXSIZE; return data; } static const __u8 * nci_extract_rf_params_nfcf_passive_listen(struct nci_dev *ndev, struct rf_tech_specific_params_nfcf_listen *nfcf_listen, const __u8 *data) { nfcf_listen->local_nfcid2_len = min_t(__u8, *data++, NFC_NFCID2_MAXSIZE); memcpy(nfcf_listen->local_nfcid2, data, nfcf_listen->local_nfcid2_len); data += nfcf_listen->local_nfcid2_len; return data; } static __u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol) { if (ndev->ops->get_rfprotocol) return ndev->ops->get_rfprotocol(ndev, rf_protocol); return 0; } static int nci_add_new_protocol(struct nci_dev *ndev, struct nfc_target *target, __u8 rf_protocol, __u8 rf_tech_and_mode, const void *params) { const struct rf_tech_specific_params_nfca_poll *nfca_poll; const struct rf_tech_specific_params_nfcb_poll *nfcb_poll; const struct rf_tech_specific_params_nfcf_poll *nfcf_poll; const struct rf_tech_specific_params_nfcv_poll *nfcv_poll; __u32 protocol; if (rf_protocol == NCI_RF_PROTOCOL_T1T) protocol = NFC_PROTO_JEWEL_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T2T) protocol = NFC_PROTO_MIFARE_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) protocol = NFC_PROTO_ISO14443_MASK; else protocol = NFC_PROTO_ISO14443_B_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T3T) protocol = NFC_PROTO_FELICA_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) protocol = NFC_PROTO_NFC_DEP_MASK; else if (rf_protocol == NCI_RF_PROTOCOL_T5T) protocol = NFC_PROTO_ISO15693_MASK; else protocol = nci_get_prop_rf_protocol(ndev, rf_protocol); if (!(protocol & ndev->poll_prots)) { pr_err("the target found does not have the desired protocol\n"); return -EPROTO; } if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) { nfca_poll = (struct rf_tech_specific_params_nfca_poll *)params; target->sens_res = nfca_poll->sens_res; target->sel_res = nfca_poll->sel_res; target->nfcid1_len = nfca_poll->nfcid1_len; if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1)) return -EPROTO; if (target->nfcid1_len > 0) { memcpy(target->nfcid1, nfca_poll->nfcid1, target->nfcid1_len); } } else if (rf_tech_and_mode == NCI_NFC_B_PASSIVE_POLL_MODE) { nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params; target->sensb_res_len = nfcb_poll->sensb_res_len; if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res)) return -EPROTO; if (target->sensb_res_len > 0) { memcpy(target->sensb_res, nfcb_poll->sensb_res, target->sensb_res_len); } } else if (rf_tech_and_mode == NCI_NFC_F_PASSIVE_POLL_MODE) { nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params; target->sensf_res_len = nfcf_poll->sensf_res_len; if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res)) return -EPROTO; if (target->sensf_res_len > 0) { memcpy(target->sensf_res, nfcf_poll->sensf_res, target->sensf_res_len); } } else if (rf_tech_and_mode == NCI_NFC_V_PASSIVE_POLL_MODE) { nfcv_poll = (struct rf_tech_specific_params_nfcv_poll *)params; target->is_iso15693 = 1; target->iso15693_dsfid = nfcv_poll->dsfid; memcpy(target->iso15693_uid, nfcv_poll->uid, NFC_ISO15693_UID_MAXSIZE); } else { pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode); return -EPROTO; } target->supported_protocols |= protocol; pr_debug("protocol 0x%x\n", protocol); return 0; } static void nci_add_new_target(struct nci_dev *ndev, const struct nci_rf_discover_ntf *ntf) { struct nfc_target *target; int i, rc; for (i = 0; i < ndev->n_targets; i++) { target = &ndev->targets[i]; if (target->logical_idx == ntf->rf_discovery_id) { /* This target already exists, add the new protocol */ nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); return; } } /* This is a new target, check if we've enough room */ if (ndev->n_targets == NCI_MAX_DISCOVERED_TARGETS) { pr_debug("not enough room, ignoring new target...\n"); return; } target = &ndev->targets[ndev->n_targets]; rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->rf_tech_and_mode, &ntf->rf_tech_specific_params); if (!rc) { target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, ndev->n_targets); } } void nci_clear_target_list(struct nci_dev *ndev) { memset(ndev->targets, 0, (sizeof(struct nfc_target)*NCI_MAX_DISCOVERED_TARGETS)); ndev->n_targets = 0; } static void nci_rf_discover_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { struct nci_rf_discover_ntf ntf; const __u8 *data = skb->data; bool add_target = true; ntf.rf_discovery_id = *data++; ntf.rf_protocol = *data++; ntf.rf_tech_and_mode = *data++; ntf.rf_tech_specific_params_len = *data++; pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); pr_debug("rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfca_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfca_poll), data); break; case NCI_NFC_B_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcb_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcb_poll), data); break; case NCI_NFC_F_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcf_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; case NCI_NFC_V_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcv_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcv_poll), data); break; default: pr_err("unsupported rf_tech_and_mode 0x%x\n", ntf.rf_tech_and_mode); data += ntf.rf_tech_specific_params_len; add_target = false; } } ntf.ntf_type = *data++; pr_debug("ntf_type %d\n", ntf.ntf_type); if (add_target == true) nci_add_new_target(ndev, &ntf); if (ntf.ntf_type == NCI_DISCOVER_NTF_TYPE_MORE) { atomic_set(&ndev->state, NCI_W4_ALL_DISCOVERIES); } else { atomic_set(&ndev->state, NCI_W4_HOST_SELECT); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } } static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, const __u8 *data) { struct activation_params_nfca_poll_iso_dep *nfca_poll; struct activation_params_nfcb_poll_iso_dep *nfcb_poll; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; nfca_poll->rats_res_len = min_t(__u8, *data++, NFC_ATS_MAXSIZE); pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len); if (nfca_poll->rats_res_len > 0) { memcpy(nfca_poll->rats_res, data, nfca_poll->rats_res_len); } break; case NCI_NFC_B_PASSIVE_POLL_MODE: nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep; nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50); pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len); if (nfcb_poll->attrib_res_len > 0) { memcpy(nfcb_poll->attrib_res, data, nfcb_poll->attrib_res_len); } break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static int nci_extract_activation_params_nfc_dep(struct nci_dev *ndev, struct nci_rf_intf_activated_ntf *ntf, const __u8 *data) { struct activation_params_poll_nfc_dep *poll; struct activation_params_listen_nfc_dep *listen; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: case NCI_NFC_F_PASSIVE_POLL_MODE: poll = &ntf->activation_params.poll_nfc_dep; poll->atr_res_len = min_t(__u8, *data++, NFC_ATR_RES_MAXSIZE - 2); pr_debug("atr_res_len %d\n", poll->atr_res_len); if (poll->atr_res_len > 0) memcpy(poll->atr_res, data, poll->atr_res_len); break; case NCI_NFC_A_PASSIVE_LISTEN_MODE: case NCI_NFC_F_PASSIVE_LISTEN_MODE: listen = &ntf->activation_params.listen_nfc_dep; listen->atr_req_len = min_t(__u8, *data++, NFC_ATR_REQ_MAXSIZE - 2); pr_debug("atr_req_len %d\n", listen->atr_req_len); if (listen->atr_req_len > 0) memcpy(listen->atr_req, data, listen->atr_req_len); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static void nci_target_auto_activated(struct nci_dev *ndev, const struct nci_rf_intf_activated_ntf *ntf) { struct nfc_target *target; int rc; target = &ndev->targets[ndev->n_targets]; rc = nci_add_new_protocol(ndev, target, ntf->rf_protocol, ntf->activation_rf_tech_and_mode, &ntf->rf_tech_specific_params); if (rc) return; target->logical_idx = ntf->rf_discovery_id; ndev->n_targets++; pr_debug("logical idx %d, n_targets %d\n", target->logical_idx, ndev->n_targets); nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); } static int nci_store_general_bytes_nfc_dep(struct nci_dev *ndev, const struct nci_rf_intf_activated_ntf *ntf) { ndev->remote_gb_len = 0; if (ntf->activation_params_len <= 0) return NCI_STATUS_OK; switch (ntf->activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: case NCI_NFC_F_PASSIVE_POLL_MODE: ndev->remote_gb_len = min_t(__u8, (ntf->activation_params.poll_nfc_dep.atr_res_len - NFC_ATR_RES_GT_OFFSET), NFC_ATR_RES_GB_MAXSIZE); memcpy(ndev->remote_gb, (ntf->activation_params.poll_nfc_dep.atr_res + NFC_ATR_RES_GT_OFFSET), ndev->remote_gb_len); break; case NCI_NFC_A_PASSIVE_LISTEN_MODE: case NCI_NFC_F_PASSIVE_LISTEN_MODE: ndev->remote_gb_len = min_t(__u8, (ntf->activation_params.listen_nfc_dep.atr_req_len - NFC_ATR_REQ_GT_OFFSET), NFC_ATR_REQ_GB_MAXSIZE); memcpy(ndev->remote_gb, (ntf->activation_params.listen_nfc_dep.atr_req + NFC_ATR_REQ_GT_OFFSET), ndev->remote_gb_len); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf->activation_rf_tech_and_mode); return NCI_STATUS_RF_PROTOCOL_ERROR; } return NCI_STATUS_OK; } static int nci_store_ats_nfc_iso_dep(struct nci_dev *ndev, const struct nci_rf_intf_activated_ntf *ntf) { ndev->target_ats_len = 0; if (ntf->activation_params_len <= 0) return NCI_STATUS_OK; if (ntf->activation_params.nfca_poll_iso_dep.rats_res_len > NFC_ATS_MAXSIZE) { pr_debug("ATS too long\n"); return NCI_STATUS_RF_PROTOCOL_ERROR; } if (ntf->activation_params.nfca_poll_iso_dep.rats_res_len > 0) { ndev->target_ats_len = ntf->activation_params.nfca_poll_iso_dep.rats_res_len; memcpy(ndev->target_ats, ntf->activation_params.nfca_poll_iso_dep.rats_res, ndev->target_ats_len); } return NCI_STATUS_OK; } static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { struct nci_conn_info *conn_info; struct nci_rf_intf_activated_ntf ntf; const __u8 *data = skb->data; int err = NCI_STATUS_OK; ntf.rf_discovery_id = *data++; ntf.rf_interface = *data++; ntf.rf_protocol = *data++; ntf.activation_rf_tech_and_mode = *data++; ntf.max_data_pkt_payload_size = *data++; ntf.initial_num_credits = *data++; ntf.rf_tech_specific_params_len = *data++; pr_debug("rf_discovery_id %d\n", ntf.rf_discovery_id); pr_debug("rf_interface 0x%x\n", ntf.rf_interface); pr_debug("rf_protocol 0x%x\n", ntf.rf_protocol); pr_debug("activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); pr_debug("max_data_pkt_payload_size 0x%x\n", ntf.max_data_pkt_payload_size); pr_debug("initial_num_credits 0x%x\n", ntf.initial_num_credits); pr_debug("rf_tech_specific_params_len %d\n", ntf.rf_tech_specific_params_len); /* If this contains a value of 0x00 (NFCEE Direct RF * Interface) then all following parameters SHALL contain a * value of 0 and SHALL be ignored. */ if (ntf.rf_interface == NCI_RF_INTERFACE_NFCEE_DIRECT) goto listen; if (ntf.rf_tech_specific_params_len > 0) { switch (ntf.activation_rf_tech_and_mode) { case NCI_NFC_A_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfca_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfca_poll), data); break; case NCI_NFC_B_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcb_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcb_poll), data); break; case NCI_NFC_F_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcf_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcf_poll), data); break; case NCI_NFC_V_PASSIVE_POLL_MODE: data = nci_extract_rf_params_nfcv_passive_poll(ndev, &(ntf.rf_tech_specific_params.nfcv_poll), data); break; case NCI_NFC_A_PASSIVE_LISTEN_MODE: /* no RF technology specific parameters */ break; case NCI_NFC_F_PASSIVE_LISTEN_MODE: data = nci_extract_rf_params_nfcf_passive_listen(ndev, &(ntf.rf_tech_specific_params.nfcf_listen), data); break; default: pr_err("unsupported activation_rf_tech_and_mode 0x%x\n", ntf.activation_rf_tech_and_mode); err = NCI_STATUS_RF_PROTOCOL_ERROR; goto exit; } } ntf.data_exch_rf_tech_and_mode = *data++; ntf.data_exch_tx_bit_rate = *data++; ntf.data_exch_rx_bit_rate = *data++; ntf.activation_params_len = *data++; pr_debug("data_exch_rf_tech_and_mode 0x%x\n", ntf.data_exch_rf_tech_and_mode); pr_debug("data_exch_tx_bit_rate 0x%x\n", ntf.data_exch_tx_bit_rate); pr_debug("data_exch_rx_bit_rate 0x%x\n", ntf.data_exch_rx_bit_rate); pr_debug("activation_params_len %d\n", ntf.activation_params_len); if (ntf.activation_params_len > 0) { switch (ntf.rf_interface) { case NCI_RF_INTERFACE_ISO_DEP: err = nci_extract_activation_params_iso_dep(ndev, &ntf, data); break; case NCI_RF_INTERFACE_NFC_DEP: err = nci_extract_activation_params_nfc_dep(ndev, &ntf, data); break; case NCI_RF_INTERFACE_FRAME: /* no activation params */ break; default: pr_err("unsupported rf_interface 0x%x\n", ntf.rf_interface); err = NCI_STATUS_RF_PROTOCOL_ERROR; break; } } exit: if (err == NCI_STATUS_OK) { conn_info = ndev->rf_conn_info; if (!conn_info) return; conn_info->max_pkt_payload_len = ntf.max_data_pkt_payload_size; conn_info->initial_num_credits = ntf.initial_num_credits; /* set the available credits to initial value */ atomic_set(&conn_info->credits_cnt, conn_info->initial_num_credits); /* store general bytes to be reported later in dep_link_up */ if (ntf.rf_interface == NCI_RF_INTERFACE_NFC_DEP) { err = nci_store_general_bytes_nfc_dep(ndev, &ntf); if (err != NCI_STATUS_OK) pr_err("unable to store general bytes\n"); } /* store ATS to be reported later in nci_activate_target */ if (ntf.rf_interface == NCI_RF_INTERFACE_ISO_DEP && ntf.activation_rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE) { err = nci_store_ats_nfc_iso_dep(ndev, &ntf); if (err != NCI_STATUS_OK) pr_err("unable to store ATS\n"); } } if (!(ntf.activation_rf_tech_and_mode & NCI_RF_TECH_MODE_LISTEN_MASK)) { /* Poll mode */ if (atomic_read(&ndev->state) == NCI_DISCOVERY) { /* A single target was found and activated * automatically */ atomic_set(&ndev->state, NCI_POLL_ACTIVE); if (err == NCI_STATUS_OK) nci_target_auto_activated(ndev, &ntf); } else { /* ndev->state == NCI_W4_HOST_SELECT */ /* A selected target was activated, so complete the * request */ atomic_set(&ndev->state, NCI_POLL_ACTIVE); nci_req_complete(ndev, err); } } else { listen: /* Listen mode */ atomic_set(&ndev->state, NCI_LISTEN_ACTIVE); if (err == NCI_STATUS_OK && ntf.rf_protocol == NCI_RF_PROTOCOL_NFC_DEP) { err = nfc_tm_activated(ndev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, NFC_COMM_PASSIVE, ndev->remote_gb, ndev->remote_gb_len); if (err != NCI_STATUS_OK) pr_err("error when signaling tm activation\n"); } } } static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { const struct nci_conn_info *conn_info; const struct nci_rf_deactivate_ntf *ntf = (void *)skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); conn_info = ndev->rf_conn_info; if (!conn_info) return; /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); /* drop partial rx data packet */ if (ndev->rx_data_reassembly) { kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = NULL; } /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, NCI_STATIC_RF_CONN_ID, -EIO); switch (ntf->type) { case NCI_DEACTIVATE_TYPE_IDLE_MODE: nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); break; case NCI_DEACTIVATE_TYPE_SLEEP_MODE: case NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE: atomic_set(&ndev->state, NCI_W4_HOST_SELECT); break; case NCI_DEACTIVATE_TYPE_DISCOVERY: nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_DISCOVERY); break; } nci_req_complete(ndev, NCI_STATUS_OK); } static void nci_nfcee_discover_ntf_packet(struct nci_dev *ndev, const struct sk_buff *skb) { u8 status = NCI_STATUS_OK; const struct nci_nfcee_discover_ntf *nfcee_ntf = (struct nci_nfcee_discover_ntf *)skb->data; /* NFCForum NCI 9.2.1 HCI Network Specific Handling * If the NFCC supports the HCI Network, it SHALL return one, * and only one, NFCEE_DISCOVER_NTF with a Protocol type of * “HCI Access”, even if the HCI Network contains multiple NFCEEs. */ ndev->hci_dev->nfcee_id = nfcee_ntf->nfcee_id; ndev->cur_params.id = nfcee_ntf->nfcee_id; nci_req_complete(ndev, status); } void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u16 ntf_opcode = nci_opcode(skb->data); pr_debug("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", nci_pbf(skb->data), nci_opcode_gid(ntf_opcode), nci_opcode_oid(ntf_opcode), nci_plen(skb->data)); /* strip the nci control header */ skb_pull(skb, NCI_CTRL_HDR_SIZE); if (nci_opcode_gid(ntf_opcode) == NCI_GID_PROPRIETARY) { if (nci_prop_ntf_packet(ndev, ntf_opcode, skb) == -ENOTSUPP) { pr_err("unsupported ntf opcode 0x%x\n", ntf_opcode); } goto end; } switch (ntf_opcode) { case NCI_OP_CORE_RESET_NTF: nci_core_reset_ntf_packet(ndev, skb); break; case NCI_OP_CORE_CONN_CREDITS_NTF: nci_core_conn_credits_ntf_packet(ndev, skb); break; case NCI_OP_CORE_GENERIC_ERROR_NTF: nci_core_generic_error_ntf_packet(ndev, skb); break; case NCI_OP_CORE_INTF_ERROR_NTF: nci_core_conn_intf_error_ntf_packet(ndev, skb); break; case NCI_OP_RF_DISCOVER_NTF: nci_rf_discover_ntf_packet(ndev, skb); break; case NCI_OP_RF_INTF_ACTIVATED_NTF: nci_rf_intf_activated_ntf_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_NTF: nci_rf_deactivate_ntf_packet(ndev, skb); break; case NCI_OP_NFCEE_DISCOVER_NTF: nci_nfcee_discover_ntf_packet(ndev, skb); break; case NCI_OP_RF_NFCEE_ACTION_NTF: break; default: pr_err("unknown ntf opcode 0x%x\n", ntf_opcode); break; } nci_core_ntf_packet(ndev, ntf_opcode, skb); end: kfree_skb(skb); }
10535 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM ipi #if !defined(_TRACE_IPI_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IPI_H #include <linux/tracepoint.h> /** * ipi_raise - called when a smp cross call is made * * @mask: mask of recipient CPUs for the IPI * @reason: string identifying the IPI purpose * * It is necessary for @reason to be a static string declared with * __tracepoint_string. */ TRACE_EVENT(ipi_raise, TP_PROTO(const struct cpumask *mask, const char *reason), TP_ARGS(mask, reason), TP_STRUCT__entry( __bitmask(target_cpus, nr_cpumask_bits) __field(const char *, reason) ), TP_fast_assign( __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits); __entry->reason = reason; ), TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason) ); TRACE_EVENT(ipi_send_cpu, TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback), TP_ARGS(cpu, callsite, callback), TP_STRUCT__entry( __field(unsigned int, cpu) __field(void *, callsite) __field(void *, callback) ), TP_fast_assign( __entry->cpu = cpu; __entry->callsite = (void *)callsite; __entry->callback = callback; ), TP_printk("cpu=%u callsite=%pS callback=%pS", __entry->cpu, __entry->callsite, __entry->callback) ); TRACE_EVENT(ipi_send_cpumask, TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback), TP_ARGS(cpumask, callsite, callback), TP_STRUCT__entry( __cpumask(cpumask) __field(void *, callsite) __field(void *, callback) ), TP_fast_assign( __assign_cpumask(cpumask, cpumask_bits(cpumask)); __entry->callsite = (void *)callsite; __entry->callback = callback; ), TP_printk("cpumask=%s callsite=%pS callback=%pS", __get_cpumask(cpumask), __entry->callsite, __entry->callback) ); DECLARE_EVENT_CLASS(ipi_handler, TP_PROTO(const char *reason), TP_ARGS(reason), TP_STRUCT__entry( __field(const char *, reason) ), TP_fast_assign( __entry->reason = reason; ), TP_printk("(%s)", __entry->reason) ); /** * ipi_entry - called immediately before the IPI handler * * @reason: string identifying the IPI purpose * * It is necessary for @reason to be a static string declared with * __tracepoint_string, ideally the same as used with trace_ipi_raise * for that IPI. */ DEFINE_EVENT(ipi_handler, ipi_entry, TP_PROTO(const char *reason), TP_ARGS(reason) ); /** * ipi_exit - called immediately after the IPI handler returns * * @reason: string identifying the IPI purpose * * It is necessary for @reason to be a static string declared with * __tracepoint_string, ideally the same as used with trace_ipi_raise for * that IPI. */ DEFINE_EVENT(ipi_handler, ipi_exit, TP_PROTO(const char *reason), TP_ARGS(reason) ); #endif /* _TRACE_IPI_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 // SPDX-License-Identifier: GPL-2.0-or-later /* * GeneSys GL620USB-A based links * Copyright (C) 2001 by Jiun-Jie Huang <huangjj@genesyslogic.com.tw> * Copyright (C) 2001 by Stanislav Brabec <utx@penguin.cz> */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> #include <linux/gfp.h> /* * GeneSys GL620USB-A (www.genesyslogic.com.tw) * * ... should partially interop with the Win32 driver for this hardware. * The GeneSys docs imply there's some NDIS issue motivating this framing. * * Some info from GeneSys: * - GL620USB-A is full duplex; GL620USB is only half duplex for bulk. * (Some cables, like the BAFO-100c, use the half duplex version.) * - For the full duplex model, the low bit of the version code says * which side is which ("left/right"). * - For the half duplex type, a control/interrupt handshake settles * the transfer direction. (That's disabled here, partially coded.) * A control URB would block until other side writes an interrupt. * * Original code from Jiun-Jie Huang <huangjj@genesyslogic.com.tw> * and merged into "usbnet" by Stanislav Brabec <utx@penguin.cz>. */ // control msg write command #define GENELINK_CONNECT_WRITE 0xF0 // interrupt pipe index #define GENELINK_INTERRUPT_PIPE 0x03 // interrupt read buffer size #define INTERRUPT_BUFSIZE 0x08 // interrupt pipe interval value #define GENELINK_INTERRUPT_INTERVAL 0x10 // max transmit packet number per transmit #define GL_MAX_TRANSMIT_PACKETS 32 // max packet length #define GL_MAX_PACKET_LEN 1514 // max receive buffer size #define GL_RCV_BUF_SIZE \ (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4) struct gl_packet { __le32 packet_length; char packet_data[]; }; struct gl_header { __le32 packet_count; struct gl_packet packets; }; static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct gl_header *header; struct gl_packet *packet; struct sk_buff *gl_skb; u32 size; u32 count; /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) return 0; header = (struct gl_header *) skb->data; // get the packet count of the received skb count = le32_to_cpu(header->packet_count); if (count > GL_MAX_TRANSMIT_PACKETS) { netdev_dbg(dev->net, "genelink: invalid received packet count %u\n", count); return 0; } // set the current packet pointer to the first packet packet = &header->packets; // decrement the length for the packet count size 4 bytes skb_pull(skb, 4); while (count > 1) { // get the packet length size = le32_to_cpu(packet->packet_length); // this may be a broken packet if (size > GL_MAX_PACKET_LEN) { netdev_dbg(dev->net, "genelink: invalid rx length %d\n", size); return 0; } // allocate the skb for the individual packet gl_skb = alloc_skb(size, GFP_ATOMIC); if (gl_skb) { // copy the packet data to the new skb skb_put_data(gl_skb, packet->packet_data, size); usbnet_skb_return(dev, gl_skb); } // advance to the next packet packet = (struct gl_packet *)&packet->packet_data[size]; count--; // shift the data pointer to the next gl_packet skb_pull(skb, size + 4); } // skip the packet length field 4 bytes skb_pull(skb, 4); if (skb->len > GL_MAX_PACKET_LEN) { netdev_dbg(dev->net, "genelink: invalid rx length %d\n", skb->len); return 0; } return 1; } static struct sk_buff * genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int padlen; int length = skb->len; int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); __le32 *packet_count; __le32 *packet_len; // FIXME: magic numbers, bleech padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1; if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (padlen + (4 + 4*1)))) { if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { skb->data = memmove(skb->head + (4 + 4*1), skb->data, skb->len); skb_set_tail_pointer(skb, skb->len); } } else { struct sk_buff *skb2; skb2 = skb_copy_expand(skb, (4 + 4*1) , padlen, flags); dev_kfree_skb_any(skb); skb = skb2; if (!skb) return NULL; } // attach the packet count to the header packet_count = skb_push(skb, (4 + 4 * 1)); packet_len = packet_count + 1; *packet_count = cpu_to_le32(1); *packet_len = cpu_to_le32(length); // add padding byte if ((skb->len % dev->maxpacket) == 0) skb_put(skb, 1); return skb; } static int genelink_bind(struct usbnet *dev, struct usb_interface *intf) { dev->hard_mtu = GL_RCV_BUF_SIZE; dev->net->hard_header_len += 4; return usbnet_get_endpoints(dev, intf); } static const struct driver_info genelink_info = { .description = "Genesys GeneLink", .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_GL | FLAG_NO_SETINT, .bind = genelink_bind, .rx_fixup = genelink_rx_fixup, .tx_fixup = genelink_tx_fixup, .in = 1, .out = 2, #ifdef GENELINK_ACK .check_connect =genelink_check_connect, #endif }; static const struct usb_device_id products [] = { { USB_DEVICE(0x05e3, 0x0502), // GL620USB-A .driver_info = (unsigned long) &genelink_info, }, /* NOT: USB_DEVICE(0x05e3, 0x0501), // GL620USB * that's half duplex, not currently supported */ { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver gl620a_driver = { .name = "gl620a", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(gl620a_driver); MODULE_AUTHOR("Jiun-Jie Huang"); MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables"); MODULE_LICENSE("GPL");
648 652 648 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 // SPDX-License-Identifier: GPL-2.0-only /* * Common framework for low-level network console, dump, and debugger code * * Sep 8 2003 Matt Mackall <mpm@selenic.com> * * based on the netconsole code from: * * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> * Copyright (C) 2002 Red Hat, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/string.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/inet.h> #include <linux/interrupt.h> #include <linux/netpoll.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/if_vlan.h> #include <net/tcp.h> #include <net/udp.h> #include <net/addrconf.h> #include <net/ndisc.h> #include <net/ip6_checksum.h> #include <linux/unaligned.h> #include <trace/events/napi.h> #include <linux/kconfig.h> /* * We maintain a small pool of fully-sized skbs, to make sure the * message gets out even in extreme OOM situations. */ #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 #define USEC_PER_POLL 50 #define MAX_SKB_SIZE \ (sizeof(struct ethhdr) + \ sizeof(struct iphdr) + \ sizeof(struct udphdr) + \ MAX_UDP_CHUNK) static void zap_completion_queue(void); static unsigned int carrier_timeout = 4; module_param(carrier_timeout, uint, 0644); #define np_info(np, fmt, ...) \ pr_info("%s: " fmt, np->name, ##__VA_ARGS__) #define np_err(np, fmt, ...) \ pr_err("%s: " fmt, np->name, ##__VA_ARGS__) #define np_notice(np, fmt, ...) \ pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { netdev_tx_t status = NETDEV_TX_OK; netdev_features_t features; features = netif_skb_features(skb); if (skb_vlan_tag_present(skb) && !vlan_hw_offload_capable(features, skb->vlan_proto)) { skb = __vlan_hwaccel_push_inside(skb); if (unlikely(!skb)) { /* This is actually a packet drop, but we * don't want the code that calls this * function to try and operate on a NULL skb. */ goto out; } } status = netdev_start_xmit(skb, dev, txq, false); out: return status; } static void queue_process(struct work_struct *work) { struct netpoll_info *npinfo = container_of(work, struct netpoll_info, tx_work.work); struct sk_buff *skb; unsigned long flags; while ((skb = skb_dequeue(&npinfo->txq))) { struct net_device *dev = skb->dev; struct netdev_queue *txq; unsigned int q_index; if (!netif_device_present(dev) || !netif_running(dev)) { kfree_skb(skb); continue; } local_irq_save(flags); /* check if skb->queue_mapping is still valid */ q_index = skb_get_queue_mapping(skb); if (unlikely(q_index >= dev->real_num_tx_queues)) { q_index = q_index % dev->real_num_tx_queues; skb_set_queue_mapping(skb, q_index); } txq = netdev_get_tx_queue(dev, q_index); HARD_TX_LOCK(dev, txq, smp_processor_id()); if (netif_xmit_frozen_or_stopped(txq) || !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { skb_queue_head(&npinfo->txq, skb); HARD_TX_UNLOCK(dev, txq); local_irq_restore(flags); schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } HARD_TX_UNLOCK(dev, txq); local_irq_restore(flags); } } static int netif_local_xmit_active(struct net_device *dev) { int i; for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id()) return 1; } return 0; } static void poll_one_napi(struct napi_struct *napi) { int work; /* If we set this bit but see that it has already been set, * that indicates that napi has been disabled and we need * to abort this operation */ if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) return; /* We explicitly pass the polling call a budget of 0 to * indicate that we are clearing the Tx path only. */ work = napi->poll(napi, 0); WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); trace_napi_poll(napi, work, 0); clear_bit(NAPI_STATE_NPSVC, &napi->state); } static void poll_napi(struct net_device *dev) { struct napi_struct *napi; int cpu = smp_processor_id(); list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); smp_store_release(&napi->poll_owner, -1); } } } void netpoll_poll_dev(struct net_device *dev) { struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); const struct net_device_ops *ops; /* Don't do any rx activity if the dev_lock mutex is held * the dev_open/close paths use this to block netpoll activity * while changing device state */ if (!ni || down_trylock(&ni->dev_lock)) return; /* Some drivers will take the same locks in poll and xmit, * we can't poll if local CPU is already in xmit. */ if (!netif_running(dev) || netif_local_xmit_active(dev)) { up(&ni->dev_lock); return; } ops = dev->netdev_ops; if (ops->ndo_poll_controller) ops->ndo_poll_controller(dev); poll_napi(dev); up(&ni->dev_lock); zap_completion_queue(); } EXPORT_SYMBOL(netpoll_poll_dev); void netpoll_poll_disable(struct net_device *dev) { struct netpoll_info *ni; might_sleep(); ni = rtnl_dereference(dev->npinfo); if (ni) down(&ni->dev_lock); } void netpoll_poll_enable(struct net_device *dev) { struct netpoll_info *ni; ni = rtnl_dereference(dev->npinfo); if (ni) up(&ni->dev_lock); } static void refill_skbs(struct netpoll *np) { struct sk_buff_head *skb_pool; struct sk_buff *skb; unsigned long flags; skb_pool = &np->skb_pool; spin_lock_irqsave(&skb_pool->lock, flags); while (skb_pool->qlen < MAX_SKBS) { skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); if (!skb) break; __skb_queue_tail(skb_pool, skb); } spin_unlock_irqrestore(&skb_pool->lock, flags); } static void zap_completion_queue(void) { unsigned long flags; struct softnet_data *sd = &get_cpu_var(softnet_data); if (sd->completion_queue) { struct sk_buff *clist; local_irq_save(flags); clist = sd->completion_queue; sd->completion_queue = NULL; local_irq_restore(flags); while (clist != NULL) { struct sk_buff *skb = clist; clist = clist->next; if (!skb_irq_freeable(skb)) { refcount_set(&skb->users, 1); dev_kfree_skb_any(skb); /* put this one back */ } else { __kfree_skb(skb); } } } put_cpu_var(softnet_data); } static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) { int count = 0; struct sk_buff *skb; zap_completion_queue(); repeat: skb = alloc_skb(len, GFP_ATOMIC); if (!skb) { skb = skb_dequeue(&np->skb_pool); schedule_work(&np->refill_wq); } if (!skb) { if (++count < 10) { netpoll_poll_dev(np->dev); goto repeat; } return NULL; } refcount_set(&skb->users, 1); skb_reserve(skb, reserve); return skb; } static int netpoll_owner_active(struct net_device *dev) { struct napi_struct *napi; list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (READ_ONCE(napi->poll_owner) == smp_processor_id()) return 1; } return 0; } /* call with IRQ disabled */ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; netdev_tx_t ret = NET_XMIT_DROP; struct net_device *dev; unsigned long tries; /* It is up to the caller to keep npinfo alive. */ struct netpoll_info *npinfo; lockdep_assert_irqs_disabled(); dev = np->dev; rcu_read_lock(); npinfo = rcu_dereference_bh(dev->npinfo); if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); goto out; } /* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq; txq = netdev_core_pick_tx(dev, skb, NULL); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (HARD_TX_TRYLOCK(dev, txq)) { if (!netif_xmit_stopped(txq)) status = netpoll_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); if (dev_xmit_complete(status)) break; } /* tickle device maybe there is some cleanup */ netpoll_poll_dev(np->dev); udelay(USEC_PER_POLL); } WARN_ONCE(!irqs_disabled(), "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n", dev->name, dev->netdev_ops->ndo_start_xmit); } if (!dev_xmit_complete(status)) { skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } ret = NETDEV_TX_OK; out: rcu_read_unlock(); return ret; } netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { unsigned long flags; netdev_tx_t ret; if (unlikely(!np)) { dev_kfree_skb_irq(skb); ret = NET_XMIT_DROP; } else { local_irq_save(flags); ret = __netpoll_send_skb(np, skb); local_irq_restore(flags); } return ret; } EXPORT_SYMBOL(netpoll_send_skb); int netpoll_send_udp(struct netpoll *np, const char *msg, int len) { int total_len, ip_len, udp_len; struct sk_buff *skb; struct udphdr *udph; struct iphdr *iph; struct ethhdr *eth; static atomic_t ip_ident; struct ipv6hdr *ip6h; if (!IS_ENABLED(CONFIG_PREEMPT_RT)) WARN_ON_ONCE(!irqs_disabled()); udp_len = len + sizeof(*udph); if (np->ipv6) ip_len = udp_len + sizeof(*ip6h); else ip_len = udp_len + sizeof(*iph); total_len = ip_len + LL_RESERVED_SPACE(np->dev); skb = find_skb(np, total_len + np->dev->needed_tailroom, total_len - len); if (!skb) return -ENOMEM; skb_copy_to_linear_data(skb, msg, len); skb_put(skb, len); skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->source = htons(np->local_port); udph->dest = htons(np->remote_port); udph->len = htons(udp_len); if (np->ipv6) { udph->check = csum_ipv6_magic(&np->local_ip.in6, &np->remote_ip.in6, udp_len, IPPROTO_UDP, csum_partial(udph, udp_len, 0)); if (udph->check == 0) udph->check = CSUM_MANGLED_0; skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); /* ip6h->version = 6; ip6h->priority = 0; */ *(unsigned char *)ip6h = 0x60; ip6h->flow_lbl[0] = 0; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; ip6h->payload_len = htons(sizeof(struct udphdr) + len); ip6h->nexthdr = IPPROTO_UDP; ip6h->hop_limit = 32; ip6h->saddr = np->local_ip.in6; ip6h->daddr = np->remote_ip.in6; eth = skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); skb->protocol = eth->h_proto = htons(ETH_P_IPV6); } else { udph->check = 0; udph->check = csum_tcpudp_magic(np->local_ip.ip, np->remote_ip.ip, udp_len, IPPROTO_UDP, csum_partial(udph, udp_len, 0)); if (udph->check == 0) udph->check = CSUM_MANGLED_0; skb_push(skb, sizeof(*iph)); skb_reset_network_header(skb); iph = ip_hdr(skb); /* iph->version = 4; iph->ihl = 5; */ *(unsigned char *)iph = 0x45; iph->tos = 0; put_unaligned(htons(ip_len), &(iph->tot_len)); iph->id = htons(atomic_inc_return(&ip_ident)); iph->frag_off = 0; iph->ttl = 64; iph->protocol = IPPROTO_UDP; iph->check = 0; put_unaligned(np->local_ip.ip, &(iph->saddr)); put_unaligned(np->remote_ip.ip, &(iph->daddr)); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); eth = skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); skb->protocol = eth->h_proto = htons(ETH_P_IP); } ether_addr_copy(eth->h_source, np->dev->dev_addr); ether_addr_copy(eth->h_dest, np->remote_mac); skb->dev = np->dev; return (int)netpoll_send_skb(np, skb); } EXPORT_SYMBOL(netpoll_send_udp); void netpoll_print_options(struct netpoll *np) { np_info(np, "local port %d\n", np->local_port); if (np->ipv6) np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6); else np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip); np_info(np, "interface name '%s'\n", np->dev_name); np_info(np, "local ethernet address '%pM'\n", np->dev_mac); np_info(np, "remote port %d\n", np->remote_port); if (np->ipv6) np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6); else np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip); np_info(np, "remote ethernet address %pM\n", np->remote_mac); } EXPORT_SYMBOL(netpoll_print_options); static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr) { const char *end; if (!strchr(str, ':') && in4_pton(str, -1, (void *)addr, -1, &end) > 0) { if (!*end) return 0; } if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) { #if IS_ENABLED(CONFIG_IPV6) if (!*end) return 1; #else return -1; #endif } return -1; } static void skb_pool_flush(struct netpoll *np) { struct sk_buff_head *skb_pool; cancel_work_sync(&np->refill_wq); skb_pool = &np->skb_pool; skb_queue_purge_reason(skb_pool, SKB_CONSUMED); } int netpoll_parse_options(struct netpoll *np, char *opt) { char *cur=opt, *delim; int ipv6; bool ipversion_set = false; if (*cur != '@') { if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; *delim = 0; if (kstrtou16(cur, 10, &np->local_port)) goto parse_failed; cur = delim; } cur++; if (*cur != '/') { ipversion_set = true; if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; *delim = 0; ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip); if (ipv6 < 0) goto parse_failed; else np->ipv6 = (bool)ipv6; cur = delim; } cur++; if (*cur != ',') { /* parse out dev_name or dev_mac */ if ((delim = strchr(cur, ',')) == NULL) goto parse_failed; *delim = 0; np->dev_name[0] = '\0'; eth_broadcast_addr(np->dev_mac); if (!strchr(cur, ':')) strscpy(np->dev_name, cur, sizeof(np->dev_name)); else if (!mac_pton(cur, np->dev_mac)) goto parse_failed; cur = delim; } cur++; if (*cur != '@') { /* dst port */ if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; *delim = 0; if (*cur == ' ' || *cur == '\t') np_info(np, "warning: whitespace is not allowed\n"); if (kstrtou16(cur, 10, &np->remote_port)) goto parse_failed; cur = delim; } cur++; /* dst ip */ if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; *delim = 0; ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip); if (ipv6 < 0) goto parse_failed; else if (ipversion_set && np->ipv6 != (bool)ipv6) goto parse_failed; else np->ipv6 = (bool)ipv6; cur = delim + 1; if (*cur != 0) { /* MAC address */ if (!mac_pton(cur, np->remote_mac)) goto parse_failed; } netpoll_print_options(np); return 0; parse_failed: np_info(np, "couldn't parse config at '%s'!\n", cur); return -1; } EXPORT_SYMBOL(netpoll_parse_options); static void refill_skbs_work_handler(struct work_struct *work) { struct netpoll *np = container_of(work, struct netpoll, refill_wq); refill_skbs(np); } int __netpoll_setup(struct netpoll *np, struct net_device *ndev) { struct netpoll_info *npinfo; const struct net_device_ops *ops; int err; skb_queue_head_init(&np->skb_pool); if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { np_err(np, "%s doesn't support polling, aborting\n", ndev->name); err = -ENOTSUPP; goto out; } npinfo = rtnl_dereference(ndev->npinfo); if (!npinfo) { npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); if (!npinfo) { err = -ENOMEM; goto out; } sema_init(&npinfo->dev_lock, 1); skb_queue_head_init(&npinfo->txq); INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); refcount_set(&npinfo->refcnt, 1); ops = ndev->netdev_ops; if (ops->ndo_netpoll_setup) { err = ops->ndo_netpoll_setup(ndev); if (err) goto free_npinfo; } } else { refcount_inc(&npinfo->refcnt); } np->dev = ndev; strscpy(np->dev_name, ndev->name, IFNAMSIZ); npinfo->netpoll = np; /* fill up the skb queue */ refill_skbs(np); INIT_WORK(&np->refill_wq, refill_skbs_work_handler); /* last thing to do is link it to the net device structure */ rcu_assign_pointer(ndev->npinfo, npinfo); return 0; free_npinfo: kfree(npinfo); out: return err; } EXPORT_SYMBOL_GPL(__netpoll_setup); /* * Returns a pointer to a string representation of the identifier used * to select the egress interface for the given netpoll instance. buf * must be a buffer of length at least MAC_ADDR_STR_LEN + 1. */ static char *egress_dev(struct netpoll *np, char *buf) { if (np->dev_name[0]) return np->dev_name; snprintf(buf, MAC_ADDR_STR_LEN, "%pM", np->dev_mac); return buf; } int netpoll_setup(struct netpoll *np) { struct net *net = current->nsproxy->net_ns; char buf[MAC_ADDR_STR_LEN + 1]; struct net_device *ndev = NULL; bool ip_overwritten = false; struct in_device *in_dev; int err; rtnl_lock(); if (np->dev_name[0]) ndev = __dev_get_by_name(net, np->dev_name); else if (is_valid_ether_addr(np->dev_mac)) ndev = dev_getbyhwaddr(net, ARPHRD_ETHER, np->dev_mac); if (!ndev) { np_err(np, "%s doesn't exist, aborting\n", egress_dev(np, buf)); err = -ENODEV; goto unlock; } netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL); if (netdev_master_upper_dev_get(ndev)) { np_err(np, "%s is a slave device, aborting\n", egress_dev(np, buf)); err = -EBUSY; goto put; } if (!netif_running(ndev)) { unsigned long atmost; np_info(np, "device %s not up yet, forcing it\n", egress_dev(np, buf)); err = dev_open(ndev, NULL); if (err) { np_err(np, "failed to open %s\n", ndev->name); goto put; } rtnl_unlock(); atmost = jiffies + carrier_timeout * HZ; while (!netif_carrier_ok(ndev)) { if (time_after(jiffies, atmost)) { np_notice(np, "timeout waiting for carrier\n"); break; } msleep(1); } rtnl_lock(); } if (!np->local_ip.ip) { if (!np->ipv6) { const struct in_ifaddr *ifa; in_dev = __in_dev_get_rtnl(ndev); if (!in_dev) goto put_noaddr; ifa = rtnl_dereference(in_dev->ifa_list); if (!ifa) { put_noaddr: np_err(np, "no IP address for %s, aborting\n", egress_dev(np, buf)); err = -EDESTADDRREQ; goto put; } np->local_ip.ip = ifa->ifa_local; ip_overwritten = true; np_info(np, "local IP %pI4\n", &np->local_ip.ip); } else { #if IS_ENABLED(CONFIG_IPV6) struct inet6_dev *idev; err = -EDESTADDRREQ; idev = __in6_dev_get(ndev); if (idev) { struct inet6_ifaddr *ifp; read_lock_bh(&idev->lock); list_for_each_entry(ifp, &idev->addr_list, if_list) { if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) != !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL)) continue; np->local_ip.in6 = ifp->addr; ip_overwritten = true; err = 0; break; } read_unlock_bh(&idev->lock); } if (err) { np_err(np, "no IPv6 address for %s, aborting\n", egress_dev(np, buf)); goto put; } else np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6); #else np_err(np, "IPv6 is not supported %s, aborting\n", egress_dev(np, buf)); err = -EINVAL; goto put; #endif } } err = __netpoll_setup(np, ndev); if (err) goto flush; rtnl_unlock(); return 0; flush: skb_pool_flush(np); put: DEBUG_NET_WARN_ON_ONCE(np->dev); if (ip_overwritten) memset(&np->local_ip, 0, sizeof(np->local_ip)); netdev_put(ndev, &np->dev_tracker); unlock: rtnl_unlock(); return err; } EXPORT_SYMBOL(netpoll_setup); static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) { struct netpoll_info *npinfo = container_of(rcu_head, struct netpoll_info, rcu); skb_queue_purge(&npinfo->txq); /* we can't call cancel_delayed_work_sync here, as we are in softirq */ cancel_delayed_work(&npinfo->tx_work); /* clean after last, unfinished work */ __skb_queue_purge(&npinfo->txq); /* now cancel it again */ cancel_delayed_work(&npinfo->tx_work); kfree(npinfo); } void __netpoll_cleanup(struct netpoll *np) { struct netpoll_info *npinfo; npinfo = rtnl_dereference(np->dev->npinfo); if (!npinfo) return; if (refcount_dec_and_test(&npinfo->refcnt)) { const struct net_device_ops *ops; ops = np->dev->netdev_ops; if (ops->ndo_netpoll_cleanup) ops->ndo_netpoll_cleanup(np->dev); RCU_INIT_POINTER(np->dev->npinfo, NULL); call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info); } else RCU_INIT_POINTER(np->dev->npinfo, NULL); skb_pool_flush(np); } EXPORT_SYMBOL_GPL(__netpoll_cleanup); void __netpoll_free(struct netpoll *np) { ASSERT_RTNL(); /* Wait for transmitting packets to finish before freeing. */ synchronize_rcu(); __netpoll_cleanup(np); kfree(np); } EXPORT_SYMBOL_GPL(__netpoll_free); void do_netpoll_cleanup(struct netpoll *np) { __netpoll_cleanup(np); netdev_put(np->dev, &np->dev_tracker); np->dev = NULL; } EXPORT_SYMBOL(do_netpoll_cleanup); void netpoll_cleanup(struct netpoll *np) { rtnl_lock(); if (!np->dev) goto out; do_netpoll_cleanup(np); out: rtnl_unlock(); } EXPORT_SYMBOL(netpoll_cleanup);
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 // SPDX-License-Identifier: GPL-2.0-only /* * Scalar fixed time AES core transform * * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> */ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/module.h> static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); return aes_expandkey(ctx, in_key, key_len); } static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); unsigned long flags; /* * Temporarily disable interrupts to avoid races where cachelines are * evicted when the CPU is interrupted to do something else. */ local_irq_save(flags); aes_encrypt(ctx, out, in); local_irq_restore(flags); } static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); unsigned long flags; /* * Temporarily disable interrupts to avoid races where cachelines are * evicted when the CPU is interrupted to do something else. */ local_irq_save(flags); aes_decrypt(ctx, out, in); local_irq_restore(flags); } static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-fixed-time", .cra_priority = 100 + 1, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_module = THIS_MODULE, .cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE, .cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE, .cra_cipher.cia_setkey = aesti_set_key, .cra_cipher.cia_encrypt = aesti_encrypt, .cra_cipher.cia_decrypt = aesti_decrypt }; static int __init aes_init(void) { return crypto_register_alg(&aes_alg); } static void __exit aes_fini(void) { crypto_unregister_alg(&aes_alg); } module_init(aes_init); module_exit(aes_fini); MODULE_DESCRIPTION("Generic fixed time AES"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_LICENSE("GPL v2");
1 1 1 8 1 8 3 2 1 1 8 4 1 4 1 1 4 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo@debian.org> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_masquerade.h> struct nft_masq { u32 flags; u8 sreg_proto_min; u8 sreg_proto_max; }; static const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = { [NFTA_MASQ_FLAGS] = NLA_POLICY_MASK(NLA_BE32, NF_NAT_RANGE_MASK), [NFTA_MASQ_REG_PROTO_MIN] = { .type = NLA_U32 }, [NFTA_MASQ_REG_PROTO_MAX] = { .type = NLA_U32 }, }; static int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { int err; err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); if (err < 0) return err; return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_POST_ROUTING)); } static int nft_masq_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { u32 plen = sizeof_field(struct nf_nat_range, min_proto.all); struct nft_masq *priv = nft_expr_priv(expr); int err; if (tb[NFTA_MASQ_FLAGS]) priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); if (tb[NFTA_MASQ_REG_PROTO_MIN]) { err = nft_parse_register_load(ctx, tb[NFTA_MASQ_REG_PROTO_MIN], &priv->sreg_proto_min, plen); if (err < 0) return err; if (tb[NFTA_MASQ_REG_PROTO_MAX]) { err = nft_parse_register_load(ctx, tb[NFTA_MASQ_REG_PROTO_MAX], &priv->sreg_proto_max, plen); if (err < 0) return err; } else { priv->sreg_proto_max = priv->sreg_proto_min; } } return nf_ct_netns_get(ctx->net, ctx->family); } static int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_masq *priv = nft_expr_priv(expr); if (priv->flags != 0 && nla_put_be32(skb, NFTA_MASQ_FLAGS, htonl(priv->flags))) goto nla_put_failure; if (priv->sreg_proto_min) { if (nft_dump_register(skb, NFTA_MASQ_REG_PROTO_MIN, priv->sreg_proto_min) || nft_dump_register(skb, NFTA_MASQ_REG_PROTO_MAX, priv->sreg_proto_max)) goto nla_put_failure; } return 0; nla_put_failure: return -1; } static void nft_masq_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_masq *priv = nft_expr_priv(expr); struct nf_nat_range2 range; memset(&range, 0, sizeof(range)); range.flags = priv->flags; if (priv->sreg_proto_min) { range.min_proto.all = (__force __be16) nft_reg_load16(&regs->data[priv->sreg_proto_min]); range.max_proto.all = (__force __be16) nft_reg_load16(&regs->data[priv->sreg_proto_max]); } switch (nft_pf(pkt)) { case NFPROTO_IPV4: regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), &range, nft_out(pkt)); break; #ifdef CONFIG_NF_TABLES_IPV6 case NFPROTO_IPV6: regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, nft_out(pkt)); break; #endif default: WARN_ON_ONCE(1); break; } } static void nft_masq_ipv4_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { nf_ct_netns_put(ctx->net, NFPROTO_IPV4); } static struct nft_expr_type nft_masq_ipv4_type; static const struct nft_expr_ops nft_masq_ipv4_ops = { .type = &nft_masq_ipv4_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), .eval = nft_masq_eval, .init = nft_masq_init, .destroy = nft_masq_ipv4_destroy, .dump = nft_masq_dump, .validate = nft_masq_validate, .reduce = NFT_REDUCE_READONLY, }; static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { .family = NFPROTO_IPV4, .name = "masq", .ops = &nft_masq_ipv4_ops, .policy = nft_masq_policy, .maxattr = NFTA_MASQ_MAX, .owner = THIS_MODULE, }; #ifdef CONFIG_NF_TABLES_IPV6 static void nft_masq_ipv6_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { nf_ct_netns_put(ctx->net, NFPROTO_IPV6); } static struct nft_expr_type nft_masq_ipv6_type; static const struct nft_expr_ops nft_masq_ipv6_ops = { .type = &nft_masq_ipv6_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), .eval = nft_masq_eval, .init = nft_masq_init, .destroy = nft_masq_ipv6_destroy, .dump = nft_masq_dump, .validate = nft_masq_validate, .reduce = NFT_REDUCE_READONLY, }; static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { .family = NFPROTO_IPV6, .name = "masq", .ops = &nft_masq_ipv6_ops, .policy = nft_masq_policy, .maxattr = NFTA_MASQ_MAX, .owner = THIS_MODULE, }; static int __init nft_masq_module_init_ipv6(void) { return nft_register_expr(&nft_masq_ipv6_type); } static void nft_masq_module_exit_ipv6(void) { nft_unregister_expr(&nft_masq_ipv6_type); } #else static inline int nft_masq_module_init_ipv6(void) { return 0; } static inline void nft_masq_module_exit_ipv6(void) {} #endif #ifdef CONFIG_NF_TABLES_INET static void nft_masq_inet_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { nf_ct_netns_put(ctx->net, NFPROTO_INET); } static struct nft_expr_type nft_masq_inet_type; static const struct nft_expr_ops nft_masq_inet_ops = { .type = &nft_masq_inet_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), .eval = nft_masq_eval, .init = nft_masq_init, .destroy = nft_masq_inet_destroy, .dump = nft_masq_dump, .validate = nft_masq_validate, .reduce = NFT_REDUCE_READONLY, }; static struct nft_expr_type nft_masq_inet_type __read_mostly = { .family = NFPROTO_INET, .name = "masq", .ops = &nft_masq_inet_ops, .policy = nft_masq_policy, .maxattr = NFTA_MASQ_MAX, .owner = THIS_MODULE, }; static int __init nft_masq_module_init_inet(void) { return nft_register_expr(&nft_masq_inet_type); } static void nft_masq_module_exit_inet(void) { nft_unregister_expr(&nft_masq_inet_type); } #else static inline int nft_masq_module_init_inet(void) { return 0; } static inline void nft_masq_module_exit_inet(void) {} #endif static int __init nft_masq_module_init(void) { int ret; ret = nft_masq_module_init_ipv6(); if (ret < 0) return ret; ret = nft_masq_module_init_inet(); if (ret < 0) { nft_masq_module_exit_ipv6(); return ret; } ret = nft_register_expr(&nft_masq_ipv4_type); if (ret < 0) { nft_masq_module_exit_inet(); nft_masq_module_exit_ipv6(); return ret; } ret = nf_nat_masquerade_inet_register_notifiers(); if (ret < 0) { nft_masq_module_exit_ipv6(); nft_masq_module_exit_inet(); nft_unregister_expr(&nft_masq_ipv4_type); return ret; } return ret; } static void __exit nft_masq_module_exit(void) { nft_masq_module_exit_ipv6(); nft_masq_module_exit_inet(); nft_unregister_expr(&nft_masq_ipv4_type); nf_nat_masquerade_inet_unregister_notifiers(); } module_init(nft_masq_module_init); module_exit(nft_masq_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); MODULE_ALIAS_NFT_EXPR("masq"); MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
31 132 390 2114 3 3 284 2110 2093 1081 1026 92 45 2100 2102 2111 1325 1328 1449 1330 1023 1519 384 45 45 28 17 45 45 45 88 87 88 87 88 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NETFILTER_H #define __LINUX_NETFILTER_H #include <linux/init.h> #include <linux/skbuff.h> #include <linux/net.h> #include <linux/if.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/static_key.h> #include <linux/module.h> #include <linux/netfilter_defs.h> #include <linux/netdevice.h> #include <linux/sockptr.h> #include <net/net_namespace.h> static inline int NF_DROP_GETERR(int verdict) { return -(verdict >> NF_VERDICT_QBITS); } static __always_inline int NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err) { BUILD_BUG_ON(err > 0xffff); kfree_skb_reason(skb, reason); return ((err << 16) | NF_STOLEN); } static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1, const union nf_inet_addr *a2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ul1 = (const unsigned long *)a1; const unsigned long *ul2 = (const unsigned long *)a2; return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; #else return a1->all[0] == a2->all[0] && a1->all[1] == a2->all[1] && a1->all[2] == a2->all[2] && a1->all[3] == a2->all[3]; #endif } static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, union nf_inet_addr *result, const union nf_inet_addr *mask) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 const unsigned long *ua = (const unsigned long *)a1; unsigned long *ur = (unsigned long *)result; const unsigned long *um = (const unsigned long *)mask; ur[0] = ua[0] & um[0]; ur[1] = ua[1] & um[1]; #else result->all[0] = a1->all[0] & mask->all[0]; result->all[1] = a1->all[1] & mask->all[1]; result->all[2] = a1->all[2] & mask->all[2]; result->all[3] = a1->all[3] & mask->all[3]; #endif } int netfilter_init(void); struct sk_buff; struct nf_hook_ops; struct sock; struct nf_hook_state { u8 hook; u8 pf; struct net_device *in; struct net_device *out; struct sock *sk; struct net *net; int (*okfn)(struct net *, struct sock *, struct sk_buff *); }; typedef unsigned int nf_hookfn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); enum nf_hook_ops_type { NF_HOOK_OP_UNDEFINED, NF_HOOK_OP_NF_TABLES, NF_HOOK_OP_BPF, }; struct nf_hook_ops { struct list_head list; struct rcu_head rcu; /* User fills in from here down. */ nf_hookfn *hook; struct net_device *dev; void *priv; u8 pf; enum nf_hook_ops_type hook_ops_type:8; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; }; struct nf_hook_entry { nf_hookfn *hook; void *priv; }; struct nf_hook_entries_rcu_head { struct rcu_head head; void *allocation; }; struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; /* trailer: pointers to original orig_ops of each hook, * followed by rcu_head and scratch space used for freeing * the structure via call_rcu. * * This is not part of struct nf_hook_entry since its only * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] * * For the same reason, we store this at end -- its * only needed when a hook is deleted, not during * packet path processing: * struct nf_hook_entries_rcu_head head */ }; #ifdef CONFIG_NETFILTER static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e) { unsigned int n = e->num_hook_entries; const void *hook_end; hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */ return (struct nf_hook_ops **)hook_end; } static inline int nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb, struct nf_hook_state *state) { return entry->hook(entry->priv, skb, state); } static inline void nf_hook_state_init(struct nf_hook_state *p, unsigned int hook, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, struct net *net, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { p->hook = hook; p->pf = pf; p->in = indev; p->out = outdev; p->sk = sk; p->net = net; p->okfn = okfn; } struct nf_sockopt_ops { struct list_head list; u_int8_t pf; /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; int (*set)(struct sock *sk, int optval, sockptr_t arg, unsigned int len); int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); /* Use the module struct to lock set/get code in place */ struct module *owner; }; /* Function to register/unregister hook points. */ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops); void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops); int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n); /* Functions to register get/setsockopt ranges (non-inclusive). You need to check permissions yourself! */ int nf_register_sockopt(struct nf_sockopt_ops *reg); void nf_unregister_sockopt(struct nf_sockopt_ops *reg); #ifdef CONFIG_JUMP_LABEL extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; #endif int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *e, unsigned int i); void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state, const struct nf_hook_entries *e); /** * nf_hook - call a netfilter hook * * Returns 1 if the hook has allowed the packet to pass. The function * okfn must be invoked by the caller in this case. Any other return * value indicates the packet has been consumed by the hook. */ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) return 1; #endif rcu_read_lock(); switch (pf) { case NFPROTO_IPV4: hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); break; case NFPROTO_IPV6: hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; case NFPROTO_ARP: #ifdef CONFIG_NETFILTER_FAMILY_ARP if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp))) break; hook_head = rcu_dereference(net->nf.hooks_arp[hook]); #endif break; case NFPROTO_BRIDGE: #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); #endif break; default: WARN_ON_ONCE(1); break; } if (hook_head) { struct nf_hook_state state; nf_hook_state_init(&state, hook, pf, indev, outdev, sk, net, okfn); ret = nf_hook_slow(skb, &state, hook_head, 0); } rcu_read_unlock(); return ret; } /* Activate hook; either okfn or kfree_skb called, unless a hook returns NF_STOLEN (in which case, it's up to the hook to deal with the consequences). Returns -ERRNO if packet dropped. Zero means queued, stolen or accepted. */ /* RR: > I don't want nf_hook to return anything because people might forget > about async and trust the return value to mean "packet was ok". AK: Just document it clearly, then you can expect some sense from kernel coders :) */ static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { int ret; if (!cond || ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1)) ret = okfn(net, sk, skb); return ret; } static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn); if (ret == 1) ret = okfn(net, sk, skb); return ret; } static inline void NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { struct nf_hook_entries *hook_head = NULL; #ifdef CONFIG_JUMP_LABEL if (__builtin_constant_p(pf) && __builtin_constant_p(hook) && !static_key_false(&nf_hooks_needed[pf][hook])) return; #endif rcu_read_lock(); switch (pf) { case NFPROTO_IPV4: hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); break; case NFPROTO_IPV6: hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); break; default: WARN_ON_ONCE(1); break; } if (hook_head) { struct nf_hook_state state; nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn); nf_hook_slow_list(head, &state, hook_head); } rcu_read_unlock(); } /* Call setsockopt() */ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); struct flowi; struct nf_queue_entry; __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family); __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol, unsigned short family); int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family); #include <net/flow.h> struct nf_conn; enum nf_nat_manip_type; struct nlattr; struct nf_nat_hook { int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr); void (*decode_session)(struct sk_buff *skb, struct flowi *fl); void (*remove_nat_bysrc)(struct nf_conn *ct); }; extern const struct nf_nat_hook __rcu *nf_nat_hook; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #if IS_ENABLED(CONFIG_NF_NAT) const struct nf_nat_hook *nat_hook; rcu_read_lock(); nat_hook = rcu_dereference(nf_nat_hook); if (nat_hook && nat_hook->decode_session) nat_hook->decode_session(skb, fl); rcu_read_unlock(); #endif } #else /* !CONFIG_NETFILTER */ static inline int NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *), bool cond) { return okfn(net, sk, skb); } static inline int NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return okfn(net, sk, skb); } static inline void NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct list_head *head, struct net_device *in, struct net_device *out, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { /* nothing to do */ } static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { return 1; } struct flowi; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { } #endif /*CONFIG_NETFILTER*/ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_zones_common.h> void nf_ct_attach(struct sk_buff *, const struct sk_buff *); void nf_ct_set_closing(struct nf_conntrack *nfct); struct nf_conntrack_tuple; bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {} struct nf_conntrack_tuple; static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { return false; } #endif struct nf_conn; enum ip_conntrack_info; struct nf_ct_hook { int (*update)(struct net *net, struct sk_buff *skb); void (*destroy)(struct nf_conntrack *); bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb); void (*set_closing)(struct nf_conntrack *nfct); int (*confirm)(struct sk_buff *skb); u32 (*get_id)(const struct nf_conntrack *nfct); }; extern const struct nf_ct_hook __rcu *nf_ct_hook; struct nlattr; struct nfnl_ct_hook { size_t (*build_size)(const struct nf_conn *ct); int (*build)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, u_int16_t ct_attr, u_int16_t ct_info_attr); int (*parse)(const struct nlattr *attr, struct nf_conn *ct); int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report); void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; struct nf_defrag_hook { struct module *owner; int (*enable)(struct net *net); void (*disable)(struct net *net); }; extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook; extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook; /* * Contains bitmask of ctnetlink event subscribers, if any. * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag. */ extern u8 nf_ctnetlink_has_listener; #endif /*__LINUX_NETFILTER_H*/
21140 21153 56 56 56 56 56 56 56 56 55 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 56 2960 2960 2954 2953 2956 2953 2955 2961 28 2955 2960 2956 2954 2956 2954 2959 2961 2956 2961 2957 2956 2961 2955 2958 2961 2951 2957 2961 2961 2954 2961 2961 2951 4862 4843 4855 4856 4863 4862 4862 4863 4854 4861 4862 4863 4854 4862 4855 4863 4863 4851 4849 4863 4860 4850 4863 4850 4853 4789 4793 4785 56 56 56 4786 45 5034 6556 3342 3340 1910 3340 3337 3341 3338 3340 3340 3345 3338 3341 3349 3341 3338 3349 3788 4839 4922 4825 2986 2984 3 5098 3458 3459 3509 4 3792 3784 3786 3778 3786 3786 5043 55 5086 5093 5047 5080 5086 5094 5087 56 56 56 4758 5083 4528 5085 5081 5049 484 4085 560 41 3165 2959 3115 3478 5034 5049 5049 483 484 484 482 484 484 56 56 56 56 56 56 54 56 56 56 54 56 56 56 56 56 56 3426 3459 3453 3459 3459 3454 495 3459 3449 3459 3459 3459 3436 3436 3436 3420 3455 3449 3450 505 3459 3115 507 3079 30 484 484 3428 3424 484 3450 3450 3451 3459 494 3459 3452 3459 3454 1386 3160 1386 41 41 40 40 41 4675 4184 4190 4111 4115 233 4185 4829 4816 4848 4682 4825 4183 4111 4844 4852 4844 4845 4839 4852 3447 4841 3459 3459 3453 4852 4846 4842 4840 4839 4838 4844 4840 2952 56 56 56 56 55 56 56 55 55 42 56 55 56 56 56 42 55 40 41 41 41 41 41 41 41 41 40 41 39 41 39 42 42 42 42 42 41 41 42 41 41 41 41 41 41 41 42 42 2952 2954 2953 2954 2959 2954 64 2949 865 863 865 864 865 863 2955 2951 2959 2955 2959 2950 5 5 5 4 4 4 4 4 4 4 4 3 18 18 18 18 42 4 4 4 4 4 1 1 1 1 1 1 1 1 4 4 2 2 2 2 42 42 42 42 42 42 18 18 5 18 42 42 22 42 41 3 3 3 3 3 2 2 1 1 5 4 4 1 1 5 5 5 5 4836 4846 4836 4844 4833 4835 4837 4841 98 1 1 98 865 864 2953 2953 2954 2958 2957 2958 2949 2 2 2 2 2957 2962 2961 2932 2935 2939 2939 2936 2935 2928 2936 2930 2930 19 19 19 19 19 98 98 98 96 98 98 98 4774 4773 4767 13 4773 4774 2454 2454 2438 10 2453 2452 57 2454 2450 2452 4773 4770 4774 4770 4774 4779 4770 4777 4779 4784 4779 2641 4772 4774 4770 1 4776 4769 4774 1 4776 4770 4788 4790 4788 14 4786 57 57 4781 4777 4787 4772 4772 4777 4617 4622 2577 2000 2921 125 126 4 16 16 16 16 16 13 13 12 12 12 12 16 19 19 19 19 18 19 19 19 19 19 19 19 18 18 19 19 19 19 19 19 19 19 19 19 19 19 19 18 19 19 19 19 19 19 19 19 19 19 18 19 19 19 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 1 1 5 5 5 5 5 5 5 5 5 5 1 5 5 5 5 5 5 5 5 5 5 5 5 5 5 15 15 15 14 14 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1993 Linus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Numa awareness, Christoph Lameter, SGI, June 2005 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/set_memory.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/rbtree.h> #include <linux/xarray.h> #include <linux/io.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/memcontrol.h> #include <linux/llist.h> #include <linux/uio.h> #include <linux/bitops.h> #include <linux/rbtree_augmented.h> #include <linux/overflow.h> #include <linux/pgtable.h> #include <linux/hugetlb.h> #include <linux/sched/mm.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> #include <linux/page_owner.h> #define CREATE_TRACE_POINTS #include <trace/events/vmalloc.h> #include "internal.h" #include "pgalloc-track.h" #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; static int __init set_nohugeiomap(char *str) { ioremap_max_page_shift = PAGE_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC static bool __ro_after_init vmap_allow_huge = true; static int __init set_nohugevmalloc(char *str) { vmap_allow_huge = false; return 0; } early_param("nohugevmalloc", set_nohugevmalloc); #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ static const bool vmap_allow_huge = false; #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ bool is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)kasan_reset_tag(x); return addr >= VMALLOC_START && addr < VMALLOC_END; } EXPORT_SYMBOL(is_vmalloc_addr); struct vfree_deferred { struct llist_head list; struct work_struct wq; }; static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); /*** Page table manipulation functions ***/ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pte_t *pte; u64 pfn; struct page *page; unsigned long size = PAGE_SIZE; pfn = phys_addr >> PAGE_SHIFT; pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { if (unlikely(!pte_none(ptep_get(pte)))) { if (pfn_valid(pfn)) { page = pfn_to_page(pfn); dump_page(page, "remapping already mapped page"); } BUG(); } #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); if (size != PAGE_SIZE) { pte_t entry = pfn_pte(pfn, prot); entry = arch_make_huge_pte(entry, ilog2(size), 0); set_huge_pte_at(&init_mm, addr, pte, entry, size); pfn += PFN_DOWN(size); continue; } #endif set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte += PFN_DOWN(size), addr += size, addr != end); arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; return 0; } static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < PMD_SHIFT) return 0; if (!arch_vmap_pmd_supported(prot)) return 0; if ((end - addr) != PMD_SIZE) return 0; if (!IS_ALIGNED(addr, PMD_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, PMD_SIZE)) return 0; if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) return 0; return pmd_set_huge(pmd, phys_addr, prot); } static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_PMD_MODIFIED; continue; } if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < PUD_SHIFT) return 0; if (!arch_vmap_pud_supported(prot)) return 0; if ((end - addr) != PUD_SIZE) return 0; if (!IS_ALIGNED(addr, PUD_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, PUD_SIZE)) return 0; if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) return 0; return pud_set_huge(pud, phys_addr, prot); } static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_PUD_MODIFIED; continue; } if (vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < P4D_SHIFT) return 0; if (!arch_vmap_p4d_supported(prot)) return 0; if ((end - addr) != P4D_SIZE) return 0; if (!IS_ALIGNED(addr, P4D_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, P4D_SIZE)) return 0; if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) return 0; return p4d_set_huge(p4d, phys_addr, prot); } static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_P4D_MODIFIED; continue; } if (vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_range_noflush(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; unsigned long next; int err; pgtbl_mod_mask mask = 0; might_sleep(); BUG_ON(addr >= end); start = addr; pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, max_page_shift, &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); return err; } int vmap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { int err; err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), ioremap_max_page_shift); flush_cache_vmap(addr, end); if (!err) err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, ioremap_max_page_shift); return err; } int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { struct vm_struct *area; area = find_vm_area((void *)addr); if (!area || !(area->flags & VM_IOREMAP)) { WARN_ONCE(1, "vm_area at addr %lx is not marked as VM_IOREMAP\n", addr); return -EINVAL; } if (addr != (unsigned long)area->addr || (void *)end != area->addr + get_vm_area_size(area)) { WARN_ONCE(1, "ioremap request [%lx,%lx) doesn't match vm_area [%lx, %lx)\n", addr, end, (long)area->addr, (long)area->addr + get_vm_area_size(area)); return -ERANGE; } return vmap_page_range(addr, end, phys_addr, prot); } static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pte_t *pte; pte_t ptent; unsigned long size = PAGE_SIZE; pte = pte_offset_kernel(pmd, addr); arch_enter_lazy_mmu_mode(); do { #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_unmap_size(addr, pte); if (size != PAGE_SIZE) { if (WARN_ON(!IS_ALIGNED(addr, size))) { addr = ALIGN_DOWN(addr, size); pte = PTR_ALIGN_DOWN(pte, sizeof(*pte) * (size >> PAGE_SHIFT)); } ptent = huge_ptep_get_and_clear(&init_mm, addr, pte, size); if (WARN_ON(end - addr < size)) size = end - addr; } else #endif ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end); arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; } static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; int cleared; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); cleared = pmd_clear_huge(pmd); if (cleared || pmd_bad(*pmd)) *mask |= PGTBL_PMD_MODIFIED; if (cleared) { WARN_ON(next - addr < PMD_SIZE); continue; } if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next, mask); cond_resched(); } while (pmd++, addr = next, addr != end); } static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; int cleared; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); cleared = pud_clear_huge(pud); if (cleared || pud_bad(*pud)) *mask |= PGTBL_PUD_MODIFIED; if (cleared) { WARN_ON(next - addr < PUD_SIZE); continue; } if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next, mask); } while (pud++, addr = next, addr != end); } static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); p4d_clear_huge(p4d); if (p4d_bad(*p4d)) *mask |= PGTBL_P4D_MODIFIED; if (p4d_none_or_clear_bad(p4d)) continue; vunmap_pud_range(p4d, addr, next, mask); } while (p4d++, addr = next, addr != end); } /* * vunmap_range_noflush is similar to vunmap_range, but does not * flush caches or TLBs. * * The caller is responsible for calling flush_cache_vmap() before calling * this function, and flush_tlb_kernel_range after it has returned * successfully (and before the addresses are expected to cause a page fault * or be re-mapped for something else, if TLB flushes are being delayed or * coalesced). * * This is an internal function only. Do not use outside mm/. */ void __vunmap_range_noflush(unsigned long start, unsigned long end) { unsigned long next; pgd_t *pgd; unsigned long addr = start; pgtbl_mod_mask mask = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_bad(*pgd)) mask |= PGTBL_PGD_MODIFIED; if (pgd_none_or_clear_bad(pgd)) continue; vunmap_p4d_range(pgd, addr, next, &mask); } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); } void vunmap_range_noflush(unsigned long start, unsigned long end) { kmsan_vunmap_range_noflush(start, end); __vunmap_range_noflush(start, end); } /** * vunmap_range - unmap kernel virtual addresses * @addr: start of the VM area to unmap * @end: end of the VM area to unmap (non-inclusive) * * Clears any present PTEs in the virtual address range, flushes TLBs and * caches. Any subsequent access to the address before it has been re-mapped * is a kernel bug. */ void vunmap_range(unsigned long addr, unsigned long end) { flush_cache_vunmap(addr, end); vunmap_range_noflush(addr, end); flush_tlb_kernel_range(addr, end); } static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pte_t *pte; /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(ptep_get(pte)))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; if (WARN_ON(!pfn_valid(page_to_pfn(page)))) return -EINVAL; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; return 0; } static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (p4d++, addr = next, addr != end); return 0; } static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages) { unsigned long start = addr; pgd_t *pgd; unsigned long next; int err = 0; int nr = 0; pgtbl_mod_mask mask = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_bad(*pgd)) mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) break; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); return err; } /* * vmap_pages_range_noflush is similar to vmap_pages_range, but does not * flush caches. * * The caller is responsible for calling flush_cache_vmap() after this * function returns successfully and before the addresses are accessed. * * This is an internal function only. Do not use outside mm/. */ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { unsigned int i, nr = (end - addr) >> PAGE_SHIFT; WARN_ON(page_shift < PAGE_SHIFT); if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || page_shift == PAGE_SHIFT) return vmap_small_pages_range_noflush(addr, end, prot, pages); for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { int err; err = vmap_range_noflush(addr, addr + (1UL << page_shift), page_to_phys(pages[i]), prot, page_shift); if (err) return err; addr += 1UL << page_shift; } return 0; } int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift); if (ret) return ret; return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); } /** * vmap_pages_range - map pages to a kernel virtual address * @addr: start of the VM area to map * @end: end of the VM area to map (non-inclusive) * @prot: page protection flags to use * @pages: pages to map (always PAGE_SIZE pages) * @page_shift: maximum shift that the pages may be mapped with, @pages must * be aligned and contiguous up to at least this shift. * * RETURNS: * 0 on success, -errno on failure. */ int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { int err; err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); flush_cache_vmap(addr, end); return err; } static int check_sparse_vm_area(struct vm_struct *area, unsigned long start, unsigned long end) { might_sleep(); if (WARN_ON_ONCE(area->flags & VM_FLUSH_RESET_PERMS)) return -EINVAL; if (WARN_ON_ONCE(area->flags & VM_NO_GUARD)) return -EINVAL; if (WARN_ON_ONCE(!(area->flags & VM_SPARSE))) return -EINVAL; if ((end - start) >> PAGE_SHIFT > totalram_pages()) return -E2BIG; if (start < (unsigned long)area->addr || (void *)end > area->addr + get_vm_area_size(area)) return -ERANGE; return 0; } /** * vm_area_map_pages - map pages inside given sparse vm_area * @area: vm_area * @start: start address inside vm_area * @end: end address inside vm_area * @pages: pages to map (always PAGE_SIZE pages) */ int vm_area_map_pages(struct vm_struct *area, unsigned long start, unsigned long end, struct page **pages) { int err; err = check_sparse_vm_area(area, start, end); if (err) return err; return vmap_pages_range(start, end, PAGE_KERNEL, pages, PAGE_SHIFT); } /** * vm_area_unmap_pages - unmap pages inside given sparse vm_area * @area: vm_area * @start: start address inside vm_area * @end: end address inside vm_area */ void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, unsigned long end) { if (check_sparse_vm_area(area, start, end)) return; vunmap_range(start, end); } int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)kasan_reset_tag(x); if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); } EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); /* * Walk a vmap address to the struct page it maps. Huge vmap mappings will * return the tail page that corresponds to the base page address, which * matches small vmap mappings. */ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (pgd_none(*pgd)) return NULL; if (WARN_ON_ONCE(pgd_leaf(*pgd))) return NULL; /* XXX: no allowance for huge pgd */ if (WARN_ON_ONCE(pgd_bad(*pgd))) return NULL; p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return NULL; if (p4d_leaf(*p4d)) return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(p4d_bad(*p4d))) return NULL; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return NULL; if (pud_leaf(*pud)) return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(pud_bad(*pud))) return NULL; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; if (pmd_leaf(*pmd)) return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(pmd_bad(*pmd))) return NULL; ptep = pte_offset_kernel(pmd, addr); pte = ptep_get(ptep); if (pte_present(pte)) page = pte_page(pte); return page; } EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 static DEFINE_SPINLOCK(free_vmap_area_lock); static bool vmap_initialized __read_mostly; /* * This kmem_cache is used for vmap_area objects. Instead of * allocating from slab we reuse an object from this cache to * make things faster. Especially in "no edge" splitting of * free block. */ static struct kmem_cache *vmap_area_cachep; /* * This linked list is used in pair with free_vmap_area_root. * It gives O(1) access to prev/next to perform fast coalescing. */ static LIST_HEAD(free_vmap_area_list); /* * This augment red-black tree represents the free vmap space. * All vmap_area objects in this tree are sorted by va->va_start * address. It is used for allocation and merging when a vmap * object is released. * * Each vmap_area node contains a maximum available free block * of its sub-tree, right or left. Therefore it is possible to * find a lowest match of free area. */ static struct rb_root free_vmap_area_root = RB_ROOT; /* * Preload a CPU with one object for "no edge" split case. The * aim is to get rid of allocations from the atomic context, thus * to use more permissive allocation masks. */ static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); /* * This structure defines a single, solid model where a list and * rb-tree are part of one entity protected by the lock. Nodes are * sorted in ascending order, thus for O(1) access to left/right * neighbors a list is used as well as for sequential traversal. */ struct rb_list { struct rb_root root; struct list_head head; spinlock_t lock; }; /* * A fast size storage contains VAs up to 1M size. A pool consists * of linked between each other ready to go VAs of certain sizes. * An index in the pool-array corresponds to number of pages + 1. */ #define MAX_VA_SIZE_PAGES 256 struct vmap_pool { struct list_head head; unsigned long len; }; /* * An effective vmap-node logic. Users make use of nodes instead * of a global heap. It allows to balance an access and mitigate * contention. */ static struct vmap_node { /* Simple size segregated storage. */ struct vmap_pool pool[MAX_VA_SIZE_PAGES]; spinlock_t pool_lock; bool skip_populate; /* Bookkeeping data of this node. */ struct rb_list busy; struct rb_list lazy; /* * Ready-to-free areas. */ struct list_head purge_list; struct work_struct purge_work; unsigned long nr_purged; } single; /* * Initial setup consists of one single node, i.e. a balancing * is fully disabled. Later on, after vmap is initialized these * parameters are updated based on a system capacity. */ static struct vmap_node *vmap_nodes = &single; static __read_mostly unsigned int nr_vmap_nodes = 1; static __read_mostly unsigned int vmap_zone_size = 1; /* A simple iterator over all vmap-nodes. */ #define for_each_vmap_node(vn) \ for ((vn) = &vmap_nodes[0]; \ (vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++) static inline unsigned int addr_to_node_id(unsigned long addr) { return (addr / vmap_zone_size) % nr_vmap_nodes; } static inline struct vmap_node * addr_to_node(unsigned long addr) { return &vmap_nodes[addr_to_node_id(addr)]; } static inline struct vmap_node * id_to_node(unsigned int id) { return &vmap_nodes[id % nr_vmap_nodes]; } static inline unsigned int node_to_id(struct vmap_node *node) { /* Pointer arithmetic. */ unsigned int id = node - vmap_nodes; if (likely(id < nr_vmap_nodes)) return id; WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node); return 0; } /* * We use the value 0 to represent "no node", that is why * an encoded value will be the node-id incremented by 1. * It is always greater then 0. A valid node_id which can * be encoded is [0:nr_vmap_nodes - 1]. If a passed node_id * is not valid 0 is returned. */ static unsigned int encode_vn_id(unsigned int node_id) { /* Can store U8_MAX [0:254] nodes. */ if (node_id < nr_vmap_nodes) return (node_id + 1) << BITS_PER_BYTE; /* Warn and no node encoded. */ WARN_ONCE(1, "Encode wrong node id (%u)\n", node_id); return 0; } /* * Returns an encoded node-id, the valid range is within * [0:nr_vmap_nodes-1] values. Otherwise nr_vmap_nodes is * returned if extracted data is wrong. */ static unsigned int decode_vn_id(unsigned int val) { unsigned int node_id = (val >> BITS_PER_BYTE) - 1; /* Can store U8_MAX [0:254] nodes. */ if (node_id < nr_vmap_nodes) return node_id; /* If it was _not_ zero, warn. */ WARN_ONCE(node_id != UINT_MAX, "Decode wrong node id (%d)\n", node_id); return nr_vmap_nodes; } static bool is_vn_id_valid(unsigned int node_id) { if (node_id < nr_vmap_nodes) return true; return false; } static __always_inline unsigned long va_size(struct vmap_area *va) { return (va->va_end - va->va_start); } static __always_inline unsigned long get_subtree_max_size(struct rb_node *node) { struct vmap_area *va; va = rb_entry_safe(node, struct vmap_area, rb_node); return va ? va->subtree_max_size : 0; } RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) static void reclaim_and_purge_vmap_areas(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); static void drain_vmap_area_work(struct work_struct *work); static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); static __cacheline_aligned_in_smp atomic_long_t nr_vmalloc_pages; static __cacheline_aligned_in_smp atomic_long_t vmap_lazy_nr; unsigned long vmalloc_nr_pages(void) { return atomic_long_read(&nr_vmalloc_pages); } static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) { struct rb_node *n = root->rb_node; addr = (unsigned long)kasan_reset_tag((void *)addr); while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr >= va->va_end) n = n->rb_right; else return va; } return NULL; } /* Look up the first VA which satisfies addr < va_end, NULL if none. */ static struct vmap_area * __find_vmap_area_exceed_addr(unsigned long addr, struct rb_root *root) { struct vmap_area *va = NULL; struct rb_node *n = root->rb_node; addr = (unsigned long)kasan_reset_tag((void *)addr); while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end > addr) { va = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } return va; } /* * Returns a node where a first VA, that satisfies addr < va_end, resides. * If success, a node is locked. A user is responsible to unlock it when a * VA is no longer needed to be accessed. * * Returns NULL if nothing found. */ static struct vmap_node * find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va) { unsigned long va_start_lowest; struct vmap_node *vn; repeat: va_start_lowest = 0; for_each_vmap_node(vn) { spin_lock(&vn->busy.lock); *va = __find_vmap_area_exceed_addr(addr, &vn->busy.root); if (*va) if (!va_start_lowest || (*va)->va_start < va_start_lowest) va_start_lowest = (*va)->va_start; spin_unlock(&vn->busy.lock); } /* * Check if found VA exists, it might have gone away. In this case we * repeat the search because a VA has been removed concurrently and we * need to proceed to the next one, which is a rare case. */ if (va_start_lowest) { vn = addr_to_node(va_start_lowest); spin_lock(&vn->busy.lock); *va = __find_vmap_area(va_start_lowest, &vn->busy.root); if (*va) return vn; spin_unlock(&vn->busy.lock); goto repeat; } return NULL; } /* * This function returns back addresses of parent node * and its left or right link for further processing. * * Otherwise NULL is returned. In that case all further * steps regarding inserting of conflicting overlap range * have to be declined and actually considered as a bug. */ static __always_inline struct rb_node ** find_va_links(struct vmap_area *va, struct rb_root *root, struct rb_node *from, struct rb_node **parent) { struct vmap_area *tmp_va; struct rb_node **link; if (root) { link = &root->rb_node; if (unlikely(!*link)) { *parent = NULL; return link; } } else { link = &from; } /* * Go to the bottom of the tree. When we hit the last point * we end up with parent rb_node and correct direction, i name * it link, where the new va->rb_node will be attached to. */ do { tmp_va = rb_entry(*link, struct vmap_area, rb_node); /* * During the traversal we also do some sanity check. * Trigger the BUG() if there are sides(left/right) * or full overlaps. */ if (va->va_end <= tmp_va->va_start) link = &(*link)->rb_left; else if (va->va_start >= tmp_va->va_end) link = &(*link)->rb_right; else { WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); return NULL; } } while (*link); *parent = &tmp_va->rb_node; return link; } static __always_inline struct list_head * get_va_next_sibling(struct rb_node *parent, struct rb_node **link) { struct list_head *list; if (unlikely(!parent)) /* * The red-black tree where we try to find VA neighbors * before merging or inserting is empty, i.e. it means * there is no free vmap space. Normally it does not * happen but we handle this case anyway. */ return NULL; list = &rb_entry(parent, struct vmap_area, rb_node)->list; return (&parent->rb_right == link ? list->next : list); } static __always_inline void __link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head, bool augment) { /* * VA is still not in the list, but we can * identify its future previous list_head node. */ if (likely(parent)) { head = &rb_entry(parent, struct vmap_area, rb_node)->list; if (&parent->rb_right != link) head = head->prev; } /* Insert to the rb-tree */ rb_link_node(&va->rb_node, parent, link); if (augment) { /* * Some explanation here. Just perform simple insertion * to the tree. We do not set va->subtree_max_size to * its current size before calling rb_insert_augmented(). * It is because we populate the tree from the bottom * to parent levels when the node _is_ in the tree. * * Therefore we set subtree_max_size to zero after insertion, * to let __augment_tree_propagate_from() puts everything to * the correct order later on. */ rb_insert_augmented(&va->rb_node, root, &free_vmap_area_rb_augment_cb); va->subtree_max_size = 0; } else { rb_insert_color(&va->rb_node, root); } /* Address-sort this list */ list_add(&va->list, head); } static __always_inline void link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { __link_va(va, root, parent, link, head, false); } static __always_inline void link_va_augment(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { __link_va(va, root, parent, link, head, true); } static __always_inline void __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) { if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) return; if (augment) rb_erase_augmented(&va->rb_node, root, &free_vmap_area_rb_augment_cb); else rb_erase(&va->rb_node, root); list_del_init(&va->list); RB_CLEAR_NODE(&va->rb_node); } static __always_inline void unlink_va(struct vmap_area *va, struct rb_root *root) { __unlink_va(va, root, false); } static __always_inline void unlink_va_augment(struct vmap_area *va, struct rb_root *root) { __unlink_va(va, root, true); } #if DEBUG_AUGMENT_PROPAGATE_CHECK /* * Gets called when remove the node and rotate. */ static __always_inline unsigned long compute_subtree_max_size(struct vmap_area *va) { return max3(va_size(va), get_subtree_max_size(va->rb_node.rb_left), get_subtree_max_size(va->rb_node.rb_right)); } static void augment_tree_propagate_check(void) { struct vmap_area *va; unsigned long computed_size; list_for_each_entry(va, &free_vmap_area_list, list) { computed_size = compute_subtree_max_size(va); if (computed_size != va->subtree_max_size) pr_emerg("tree is corrupted: %lu, %lu\n", va_size(va), va->subtree_max_size); } } #endif /* * This function populates subtree_max_size from bottom to upper * levels starting from VA point. The propagation must be done * when VA size is modified by changing its va_start/va_end. Or * in case of newly inserting of VA to the tree. * * It means that __augment_tree_propagate_from() must be called: * - After VA has been inserted to the tree(free path); * - After VA has been shrunk(allocation path); * - After VA has been increased(merging path). * * Please note that, it does not mean that upper parent nodes * and their subtree_max_size are recalculated all the time up * to the root node. * * 4--8 * /\ * / \ * / \ * 2--2 8--8 * * For example if we modify the node 4, shrinking it to 2, then * no any modification is required. If we shrink the node 2 to 1 * its subtree_max_size is updated only, and set to 1. If we shrink * the node 8 to 6, then its subtree_max_size is set to 6 and parent * node becomes 4--6. */ static __always_inline void augment_tree_propagate_from(struct vmap_area *va) { /* * Populate the tree from bottom towards the root until * the calculated maximum available size of checked node * is equal to its current one. */ free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); #if DEBUG_AUGMENT_PROPAGATE_CHECK augment_tree_propagate_check(); #endif } static void insert_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; link = find_va_links(va, root, NULL, &parent); if (link) link_va(va, root, parent, link, head); } static void insert_vmap_area_augment(struct vmap_area *va, struct rb_node *from, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; if (from) link = find_va_links(va, NULL, from, &parent); else link = find_va_links(va, root, NULL, &parent); if (link) { link_va_augment(va, root, parent, link, head); augment_tree_propagate_from(va); } } /* * Merge de-allocated chunk of VA memory with previous * and next free blocks. If coalesce is not done a new * free area is inserted. If VA has been merged, it is * freed. * * Please note, it can return NULL in case of overlap * ranges, followed by WARN() report. Despite it is a * buggy behaviour, a system can be alive and keep * ongoing. */ static __always_inline struct vmap_area * __merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head, bool augment) { struct vmap_area *sibling; struct list_head *next; struct rb_node **link; struct rb_node *parent; bool merged = false; /* * Find a place in the tree where VA potentially will be * inserted, unless it is merged with its sibling/siblings. */ link = find_va_links(va, root, NULL, &parent); if (!link) return NULL; /* * Get next node of VA to check if merging can be done. */ next = get_va_next_sibling(parent, link); if (unlikely(next == NULL)) goto insert; /* * start end * | | * |<------VA------>|<-----Next----->| * | | * start end */ if (next != head) { sibling = list_entry(next, struct vmap_area, list); if (sibling->va_start == va->va_end) { sibling->va_start = va->va_start; /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); /* Point to the new merged area. */ va = sibling; merged = true; } } /* * start end * | | * |<-----Prev----->|<------VA------>| * | | * start end */ if (next->prev != head) { sibling = list_entry(next->prev, struct vmap_area, list); if (sibling->va_end == va->va_start) { /* * If both neighbors are coalesced, it is important * to unlink the "next" node first, followed by merging * with "previous" one. Otherwise the tree might not be * fully populated if a sibling's augmented value is * "normalized" because of rotation operations. */ if (merged) __unlink_va(va, root, augment); sibling->va_end = va->va_end; /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); /* Point to the new merged area. */ va = sibling; merged = true; } } insert: if (!merged) __link_va(va, root, parent, link, head, augment); return va; } static __always_inline struct vmap_area * merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { return __merge_or_add_vmap_area(va, root, head, false); } static __always_inline struct vmap_area * merge_or_add_vmap_area_augment(struct vmap_area *va, struct rb_root *root, struct list_head *head) { va = __merge_or_add_vmap_area(va, root, head, true); if (va) augment_tree_propagate_from(va); return va; } static __always_inline bool is_within_this_va(struct vmap_area *va, unsigned long size, unsigned long align, unsigned long vstart) { unsigned long nva_start_addr; if (va->va_start > vstart) nva_start_addr = ALIGN(va->va_start, align); else nva_start_addr = ALIGN(vstart, align); /* Can be overflowed due to big size or alignment. */ if (nva_start_addr + size < nva_start_addr || nva_start_addr < vstart) return false; return (nva_start_addr + size <= va->va_end); } /* * Find the first free block(lowest start address) in the tree, * that will accomplish the request corresponding to passing * parameters. Please note, with an alignment bigger than PAGE_SIZE, * a search length is adjusted to account for worst case alignment * overhead. */ static __always_inline struct vmap_area * find_vmap_lowest_match(struct rb_root *root, unsigned long size, unsigned long align, unsigned long vstart, bool adjust_search_size) { struct vmap_area *va; struct rb_node *node; unsigned long length; /* Start from the root. */ node = root->rb_node; /* Adjust the search size for alignment overhead. */ length = adjust_search_size ? size + align - 1 : size; while (node) { va = rb_entry(node, struct vmap_area, rb_node); if (get_subtree_max_size(node->rb_left) >= length && vstart < va->va_start) { node = node->rb_left; } else { if (is_within_this_va(va, size, align, vstart)) return va; /* * Does not make sense to go deeper towards the right * sub-tree if it does not have a free block that is * equal or bigger to the requested search length. */ if (get_subtree_max_size(node->rb_right) >= length) { node = node->rb_right; continue; } /* * OK. We roll back and find the first right sub-tree, * that will satisfy the search criteria. It can happen * due to "vstart" restriction or an alignment overhead * that is bigger then PAGE_SIZE. */ while ((node = rb_parent(node))) { va = rb_entry(node, struct vmap_area, rb_node); if (is_within_this_va(va, size, align, vstart)) return va; if (get_subtree_max_size(node->rb_right) >= length && vstart <= va->va_start) { /* * Shift the vstart forward. Please note, we update it with * parent's start address adding "1" because we do not want * to enter same sub-tree after it has already been checked * and no suitable free block found there. */ vstart = va->va_start + 1; node = node->rb_right; break; } } } } return NULL; } #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK #include <linux/random.h> static struct vmap_area * find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart) { struct vmap_area *va; list_for_each_entry(va, head, list) { if (!is_within_this_va(va, size, align, vstart)) continue; return va; } return NULL; } static void find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align) { struct vmap_area *va_1, *va_2; unsigned long vstart; unsigned int rnd; get_random_bytes(&rnd, sizeof(rnd)); vstart = VMALLOC_START + rnd; va_1 = find_vmap_lowest_match(root, size, align, vstart, false); va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); if (va_1 != va_2) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", va_1, va_2, vstart); } #endif enum fit_type { NOTHING_FIT = 0, FL_FIT_TYPE = 1, /* full fit */ LE_FIT_TYPE = 2, /* left edge fit */ RE_FIT_TYPE = 3, /* right edge fit */ NE_FIT_TYPE = 4 /* no edge fit */ }; static __always_inline enum fit_type classify_va_fit_type(struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { enum fit_type type; /* Check if it is within VA. */ if (nva_start_addr < va->va_start || nva_start_addr + size > va->va_end) return NOTHING_FIT; /* Now classify. */ if (va->va_start == nva_start_addr) { if (va->va_end == nva_start_addr + size) type = FL_FIT_TYPE; else type = LE_FIT_TYPE; } else if (va->va_end == nva_start_addr + size) { type = RE_FIT_TYPE; } else { type = NE_FIT_TYPE; } return type; } static __always_inline int va_clip(struct rb_root *root, struct list_head *head, struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { struct vmap_area *lva = NULL; enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); if (type == FL_FIT_TYPE) { /* * No need to split VA, it fully fits. * * | | * V NVA V * |---------------| */ unlink_va_augment(va, root); kmem_cache_free(vmap_area_cachep, va); } else if (type == LE_FIT_TYPE) { /* * Split left edge of fit VA. * * | | * V NVA V R * |-------|-------| */ va->va_start += size; } else if (type == RE_FIT_TYPE) { /* * Split right edge of fit VA. * * | | * L V NVA V * |-------|-------| */ va->va_end = nva_start_addr; } else if (type == NE_FIT_TYPE) { /* * Split no edge of fit VA. * * | | * L V NVA V R * |---|-------|---| */ lva = __this_cpu_xchg(ne_fit_preload_node, NULL); if (unlikely(!lva)) { /* * For percpu allocator we do not do any pre-allocation * and leave it as it is. The reason is it most likely * never ends up with NE_FIT_TYPE splitting. In case of * percpu allocations offsets and sizes are aligned to * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE * are its main fitting cases. * * There are a few exceptions though, as an example it is * a first allocation (early boot up) when we have "one" * big free space that has to be split. * * Also we can hit this path in case of regular "vmap" * allocations, if "this" current CPU was not preloaded. * See the comment in alloc_vmap_area() why. If so, then * GFP_NOWAIT is used instead to get an extra object for * split purpose. That is rare and most time does not * occur. * * What happens if an allocation gets failed. Basically, * an "overflow" path is triggered to purge lazily freed * areas to free some memory, then, the "retry" path is * triggered to repeat one more time. See more details * in alloc_vmap_area() function. */ lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); if (!lva) return -ENOMEM; } /* * Build the remainder. */ lva->va_start = va->va_start; lva->va_end = nva_start_addr; /* * Shrink this VA to remaining size. */ va->va_start = nva_start_addr + size; } else { return -EINVAL; } if (type != FL_FIT_TYPE) { augment_tree_propagate_from(va); if (lva) /* type == NE_FIT_TYPE */ insert_vmap_area_augment(lva, &va->rb_node, root, head); } return 0; } static unsigned long va_alloc(struct vmap_area *va, struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend) { unsigned long nva_start_addr; int ret; if (va->va_start > vstart) nva_start_addr = ALIGN(va->va_start, align); else nva_start_addr = ALIGN(vstart, align); /* Check the "vend" restriction. */ if (nva_start_addr + size > vend) return -ERANGE; /* Update the free vmap_area. */ ret = va_clip(root, head, va, nva_start_addr, size); if (WARN_ON_ONCE(ret)) return ret; return nva_start_addr; } /* * Returns a start address of the newly allocated area, if success. * Otherwise an error value is returned that indicates failure. */ static __always_inline unsigned long __alloc_vmap_area(struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend) { bool adjust_search_size = true; unsigned long nva_start_addr; struct vmap_area *va; /* * Do not adjust when: * a) align <= PAGE_SIZE, because it does not make any sense. * All blocks(their start addresses) are at least PAGE_SIZE * aligned anyway; * b) a short range where a requested size corresponds to exactly * specified [vstart:vend] interval and an alignment > PAGE_SIZE. * With adjusted search length an allocation would not succeed. */ if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) adjust_search_size = false; va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); if (unlikely(!va)) return -ENOENT; nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend); #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK if (!IS_ERR_VALUE(nva_start_addr)) find_vmap_lowest_match_check(root, head, size, align); #endif return nva_start_addr; } /* * Free a region of KVA allocated by alloc_vmap_area */ static void free_vmap_area(struct vmap_area *va) { struct vmap_node *vn = addr_to_node(va->va_start); /* * Remove from the busy tree/list. */ spin_lock(&vn->busy.lock); unlink_va(va, &vn->busy.root); spin_unlock(&vn->busy.lock); /* * Insert/Merge it back to the free tree/list. */ spin_lock(&free_vmap_area_lock); merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); spin_unlock(&free_vmap_area_lock); } static inline void preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) { struct vmap_area *va = NULL, *tmp; /* * Preload this CPU with one extra vmap_area object. It is used * when fit type of free area is NE_FIT_TYPE. It guarantees that * a CPU that does an allocation is preloaded. * * We do it in non-atomic context, thus it allows us to use more * permissive allocation masks to be more stable under low memory * condition and high memory pressure. */ if (!this_cpu_read(ne_fit_preload_node)) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); spin_lock(lock); tmp = NULL; if (va && !__this_cpu_try_cmpxchg(ne_fit_preload_node, &tmp, va)) kmem_cache_free(vmap_area_cachep, va); } static struct vmap_pool * size_to_va_pool(struct vmap_node *vn, unsigned long size) { unsigned int idx = (size - 1) / PAGE_SIZE; if (idx < MAX_VA_SIZE_PAGES) return &vn->pool[idx]; return NULL; } static bool node_pool_add_va(struct vmap_node *n, struct vmap_area *va) { struct vmap_pool *vp; vp = size_to_va_pool(n, va_size(va)); if (!vp) return false; spin_lock(&n->pool_lock); list_add(&va->list, &vp->head); WRITE_ONCE(vp->len, vp->len + 1); spin_unlock(&n->pool_lock); return true; } static struct vmap_area * node_pool_del_va(struct vmap_node *vn, unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend) { struct vmap_area *va = NULL; struct vmap_pool *vp; int err = 0; vp = size_to_va_pool(vn, size); if (!vp || list_empty(&vp->head)) return NULL; spin_lock(&vn->pool_lock); if (!list_empty(&vp->head)) { va = list_first_entry(&vp->head, struct vmap_area, list); if (IS_ALIGNED(va->va_start, align)) { /* * Do some sanity check and emit a warning * if one of below checks detects an error. */ err |= (va_size(va) != size); err |= (va->va_start < vstart); err |= (va->va_end > vend); if (!WARN_ON_ONCE(err)) { list_del_init(&va->list); WRITE_ONCE(vp->len, vp->len - 1); } else { va = NULL; } } else { list_move_tail(&va->list, &vp->head); va = NULL; } } spin_unlock(&vn->pool_lock); return va; } static struct vmap_area * node_alloc(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, unsigned long *addr, unsigned int *vn_id) { struct vmap_area *va; *vn_id = 0; *addr = -EINVAL; /* * Fallback to a global heap if not vmalloc or there * is only one node. */ if (vstart != VMALLOC_START || vend != VMALLOC_END || nr_vmap_nodes == 1) return NULL; *vn_id = raw_smp_processor_id() % nr_vmap_nodes; va = node_pool_del_va(id_to_node(*vn_id), size, align, vstart, vend); *vn_id = encode_vn_id(*vn_id); if (va) *addr = va->va_start; return va; } static inline void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; vm->size = vm->requested_size = va_size(va); vm->caller = caller; va->vm = vm; } /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. If vm is passed in, the two will also be bound. */ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask, unsigned long va_flags, struct vm_struct *vm) { struct vmap_node *vn; struct vmap_area *va; unsigned long freed; unsigned long addr; unsigned int vn_id; int purged = 0; int ret; if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) return ERR_PTR(-EINVAL); if (unlikely(!vmap_initialized)) return ERR_PTR(-EBUSY); might_sleep(); /* * If a VA is obtained from a global heap(if it fails here) * it is anyway marked with this "vn_id" so it is returned * to this pool's node later. Such way gives a possibility * to populate pools based on users demand. * * On success a ready to go VA is returned. */ va = node_alloc(size, align, vstart, vend, &addr, &vn_id); if (!va) { gfp_mask = gfp_mask & GFP_RECLAIM_MASK; va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); /* * Only scan the relevant parts containing pointers to other objects * to avoid false negatives. */ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); } retry: if (IS_ERR_VALUE(addr)) { preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, size, align, vstart, vend); spin_unlock(&free_vmap_area_lock); } trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr)); /* * If an allocation fails, the error value is * returned. Therefore trigger the overflow path. */ if (IS_ERR_VALUE(addr)) goto overflow; va->va_start = addr; va->va_end = addr + size; va->vm = NULL; va->flags = (va_flags | vn_id); if (vm) { vm->addr = (void *)va->va_start; vm->size = va_size(va); va->vm = vm; } vn = addr_to_node(va->va_start); spin_lock(&vn->busy.lock); insert_vmap_area(va, &vn->busy.root, &vn->busy.head); spin_unlock(&vn->busy.lock); BUG_ON(!IS_ALIGNED(va->va_start, align)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); ret = kasan_populate_vmalloc(addr, size); if (ret) { free_vmap_area(va); return ERR_PTR(ret); } return va; overflow: if (!purged) { reclaim_and_purge_vmap_areas(); purged = 1; goto retry; } freed = 0; blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); if (freed > 0) { purged = 0; goto retry; } if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) pr_warn("vmalloc_node_range for size %lu failed: Address range restricted to %#lx - %#lx\n", size, vstart, vend); kmem_cache_free(vmap_area_cachep, va); return ERR_PTR(-EBUSY); } int register_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmap_notify_list, nb); } EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); int unregister_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmap_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */ static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); } /* * Serialize vmap purging. There is no actual critical section protected * by this lock, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ static DEFINE_MUTEX(vmap_purge_lock); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); static void reclaim_list_global(struct list_head *head) { struct vmap_area *va, *n; if (list_empty(head)) return; spin_lock(&free_vmap_area_lock); list_for_each_entry_safe(va, n, head, list) merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); spin_unlock(&free_vmap_area_lock); } static void decay_va_pool_node(struct vmap_node *vn, bool full_decay) { LIST_HEAD(decay_list); struct rb_root decay_root = RB_ROOT; struct vmap_area *va, *nva; unsigned long n_decay, pool_len; int i; for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { LIST_HEAD(tmp_list); if (list_empty(&vn->pool[i].head)) continue; /* Detach the pool, so no-one can access it. */ spin_lock(&vn->pool_lock); list_replace_init(&vn->pool[i].head, &tmp_list); spin_unlock(&vn->pool_lock); pool_len = n_decay = vn->pool[i].len; WRITE_ONCE(vn->pool[i].len, 0); /* Decay a pool by ~25% out of left objects. */ if (!full_decay) n_decay >>= 2; pool_len -= n_decay; list_for_each_entry_safe(va, nva, &tmp_list, list) { if (!n_decay--) break; list_del_init(&va->list); merge_or_add_vmap_area(va, &decay_root, &decay_list); } /* * Attach the pool back if it has been partly decayed. * Please note, it is supposed that nobody(other contexts) * can populate the pool therefore a simple list replace * operation takes place here. */ if (!list_empty(&tmp_list)) { spin_lock(&vn->pool_lock); list_replace_init(&tmp_list, &vn->pool[i].head); WRITE_ONCE(vn->pool[i].len, pool_len); spin_unlock(&vn->pool_lock); } } reclaim_list_global(&decay_list); } static void kasan_release_vmalloc_node(struct vmap_node *vn) { struct vmap_area *va; unsigned long start, end; start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start; end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end; list_for_each_entry(va, &vn->purge_list, list) { if (is_vmalloc_or_module_addr((void *) va->va_start)) kasan_release_vmalloc(va->va_start, va->va_end, va->va_start, va->va_end, KASAN_VMALLOC_PAGE_RANGE); } kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH); } static void purge_vmap_node(struct work_struct *work) { struct vmap_node *vn = container_of(work, struct vmap_node, purge_work); unsigned long nr_purged_pages = 0; struct vmap_area *va, *n_va; LIST_HEAD(local_list); if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) kasan_release_vmalloc_node(vn); vn->nr_purged = 0; list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { unsigned long nr = va_size(va) >> PAGE_SHIFT; unsigned int vn_id = decode_vn_id(va->flags); list_del_init(&va->list); nr_purged_pages += nr; vn->nr_purged++; if (is_vn_id_valid(vn_id) && !vn->skip_populate) if (node_pool_add_va(vn, va)) continue; /* Go back to global. */ list_add(&va->list, &local_list); } atomic_long_sub(nr_purged_pages, &vmap_lazy_nr); reclaim_list_global(&local_list); } /* * Purges all lazily-freed vmap areas. */ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end, bool full_pool_decay) { unsigned long nr_purged_areas = 0; unsigned int nr_purge_helpers; static cpumask_t purge_nodes; unsigned int nr_purge_nodes; struct vmap_node *vn; int i; lockdep_assert_held(&vmap_purge_lock); /* * Use cpumask to mark which node has to be processed. */ purge_nodes = CPU_MASK_NONE; for_each_vmap_node(vn) { INIT_LIST_HEAD(&vn->purge_list); vn->skip_populate = full_pool_decay; decay_va_pool_node(vn, full_pool_decay); if (RB_EMPTY_ROOT(&vn->lazy.root)) continue; spin_lock(&vn->lazy.lock); WRITE_ONCE(vn->lazy.root.rb_node, NULL); list_replace_init(&vn->lazy.head, &vn->purge_list); spin_unlock(&vn->lazy.lock); start = min(start, list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start); end = max(end, list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end); cpumask_set_cpu(node_to_id(vn), &purge_nodes); } nr_purge_nodes = cpumask_weight(&purge_nodes); if (nr_purge_nodes > 0) { flush_tlb_kernel_range(start, end); /* One extra worker is per a lazy_max_pages() full set minus one. */ nr_purge_helpers = atomic_long_read(&vmap_lazy_nr) / lazy_max_pages(); nr_purge_helpers = clamp(nr_purge_helpers, 1U, nr_purge_nodes) - 1; for_each_cpu(i, &purge_nodes) { vn = &vmap_nodes[i]; if (nr_purge_helpers > 0) { INIT_WORK(&vn->purge_work, purge_vmap_node); if (cpumask_test_cpu(i, cpu_online_mask)) schedule_work_on(i, &vn->purge_work); else schedule_work(&vn->purge_work); nr_purge_helpers--; } else { vn->purge_work.func = NULL; purge_vmap_node(&vn->purge_work); nr_purged_areas += vn->nr_purged; } } for_each_cpu(i, &purge_nodes) { vn = &vmap_nodes[i]; if (vn->purge_work.func) { flush_work(&vn->purge_work); nr_purged_areas += vn->nr_purged; } } } trace_purge_vmap_area_lazy(start, end, nr_purged_areas); return nr_purged_areas > 0; } /* * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. */ static void reclaim_and_purge_vmap_areas(void) { mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); __purge_vmap_area_lazy(ULONG_MAX, 0, true); mutex_unlock(&vmap_purge_lock); } static void drain_vmap_area_work(struct work_struct *work) { mutex_lock(&vmap_purge_lock); __purge_vmap_area_lazy(ULONG_MAX, 0, false); mutex_unlock(&vmap_purge_lock); } /* * Free a vmap area, caller ensuring that the area has been unmapped, * unlinked and flush_cache_vunmap had been called for the correct * range previously. */ static void free_vmap_area_noflush(struct vmap_area *va) { unsigned long nr_lazy_max = lazy_max_pages(); unsigned long va_start = va->va_start; unsigned int vn_id = decode_vn_id(va->flags); struct vmap_node *vn; unsigned long nr_lazy; if (WARN_ON_ONCE(!list_empty(&va->list))) return; nr_lazy = atomic_long_add_return_relaxed(va_size(va) >> PAGE_SHIFT, &vmap_lazy_nr); /* * If it was request by a certain node we would like to * return it to that node, i.e. its pool for later reuse. */ vn = is_vn_id_valid(vn_id) ? id_to_node(vn_id):addr_to_node(va->va_start); spin_lock(&vn->lazy.lock); insert_vmap_area(va, &vn->lazy.root, &vn->lazy.head); spin_unlock(&vn->lazy.lock); trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); /* After this point, we may free va at any time */ if (unlikely(nr_lazy > nr_lazy_max)) schedule_work(&drain_vmap_work); } /* * Free and unmap a vmap area */ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); vunmap_range_noflush(va->va_start, va->va_end); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); free_vmap_area_noflush(va); } struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_node *vn; struct vmap_area *va; int i, j; if (unlikely(!vmap_initialized)) return NULL; /* * An addr_to_node_id(addr) converts an address to a node index * where a VA is located. If VA spans several zones and passed * addr is not the same as va->va_start, what is not common, we * may need to scan extra nodes. See an example: * * <----va----> * -|-----|-----|-----|-----|- * 1 2 0 1 * * VA resides in node 1 whereas it spans 1, 2 an 0. If passed * addr is within 2 or 0 nodes we should do extra work. */ i = j = addr_to_node_id(addr); do { vn = &vmap_nodes[i]; spin_lock(&vn->busy.lock); va = __find_vmap_area(addr, &vn->busy.root); spin_unlock(&vn->busy.lock); if (va) return va; } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j); return NULL; } static struct vmap_area *find_unlink_vmap_area(unsigned long addr) { struct vmap_node *vn; struct vmap_area *va; int i, j; /* * Check the comment in the find_vmap_area() about the loop. */ i = j = addr_to_node_id(addr); do { vn = &vmap_nodes[i]; spin_lock(&vn->busy.lock); va = __find_vmap_area(addr, &vn->busy.root); if (va) unlink_va(va, &vn->busy.root); spin_unlock(&vn->busy.lock); if (va) return va; } while ((i = (i + nr_vmap_nodes - 1) % nr_vmap_nodes) != j); return NULL; } /*** Per cpu kva allocator ***/ /* * vmap space is limited especially on 32 bit architectures. Ensure there is * room for at least 16 percpu vmap blocks per CPU. */ /* * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess * instead (we just need a rough idea) */ #if BITS_PER_LONG == 32 #define VMALLOC_SPACE (128UL*1024*1024) #else #define VMALLOC_SPACE (128UL*1024*1024*1024) #endif #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ #define VMAP_BBMAP_BITS \ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) /* * Purge threshold to prevent overeager purging of fragmented blocks for * regular operations: Purge if vb->free is less than 1/4 of the capacity. */ #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ #define VMAP_FLAGS_MASK 0x3 struct vmap_block_queue { spinlock_t lock; struct list_head free; /* * An xarray requires an extra memory dynamically to * be allocated. If it is an issue, we can use rb-tree * instead. */ struct xarray vmap_blocks; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; unsigned long free, dirty; DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); unsigned long dirty_min, dirty_max; /*< dirty range */ struct list_head free_list; struct rcu_head rcu_head; struct list_head purge; unsigned int cpu; }; /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); /* * In order to fast access to any "vmap_block" associated with a * specific address, we use a hash. * * A per-cpu vmap_block_queue is used in both ways, to serialize * an access to free block chains among CPUs(alloc path) and it * also acts as a vmap_block hash(alloc/free paths). It means we * overload it, since we already have the per-cpu array which is * used as a hash table. When used as a hash a 'cpu' passed to * per_cpu() is not actually a CPU but rather a hash index. * * A hash function is addr_to_vb_xa() which hashes any address * to a specific index(in a hash) it belongs to. This then uses a * per_cpu() macro to access an array with generated index. * * An example: * * CPU_1 CPU_2 CPU_0 * | | | * V V V * 0 10 20 30 40 50 60 * |------|------|------|------|------|------|...<vmap address space> * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 * * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; * * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; * * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. * * This technique almost always avoids lock contention on insert/remove, * however xarray spinlocks protect against any contention that remains. */ static struct xarray * addr_to_vb_xa(unsigned long addr) { int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; /* * Please note, nr_cpu_ids points on a highest set * possible bit, i.e. we never invoke cpumask_next() * if an index points on it which is nr_cpu_ids - 1. */ if (!cpu_possible(index)) index = cpumask_next(index, cpu_possible_mask); return &per_cpu(vmap_block_queue, index).vmap_blocks; } /* * We should probably have a fallback mechanism to allocate virtual memory * out of partially filled vmap blocks. However vmap block sizing should be * fairly reasonable according to the vmalloc size, so it shouldn't be a * big problem. */ static unsigned long addr_to_vb_idx(unsigned long addr) { addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); addr /= VMAP_BLOCK_SIZE; return addr; } static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) { unsigned long addr; addr = va_start + (pages_off << PAGE_SHIFT); BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); return (void *)addr; } /** * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this * block. Of course pages number can't exceed VMAP_BBMAP_BITS * @order: how many 2^order pages should be occupied in newly allocated block * @gfp_mask: flags for the page level allocator * * Return: virtual address in a newly allocated block or ERR_PTR(-errno) */ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; struct vmap_area *va; struct xarray *xa; unsigned long vb_idx; int node, err; void *vaddr; node = numa_node_id(); vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask, VMAP_RAM|VMAP_BLOCK, NULL); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } vaddr = vmap_block_vaddr(va->va_start, 0); spin_lock_init(&vb->lock); vb->va = va; /* At least something should be left free */ BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); vb->free = VMAP_BBMAP_BITS - (1UL << order); vb->dirty = 0; vb->dirty_min = VMAP_BBMAP_BITS; vb->dirty_max = 0; bitmap_set(vb->used_map, 0, (1UL << order)); INIT_LIST_HEAD(&vb->free_list); vb->cpu = raw_smp_processor_id(); xa = addr_to_vb_xa(va->va_start); vb_idx = addr_to_vb_idx(va->va_start); err = xa_insert(xa, vb_idx, vb, gfp_mask); if (err) { kfree(vb); free_vmap_area(va); return ERR_PTR(err); } /* * list_add_tail_rcu could happened in another core * rather than vb->cpu due to task migration, which * is safe as list_add_tail_rcu will ensure the list's * integrity together with list_for_each_rcu from read * side. */ vbq = per_cpu_ptr(&vmap_block_queue, vb->cpu); spin_lock(&vbq->lock); list_add_tail_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); return vaddr; } static void free_vmap_block(struct vmap_block *vb) { struct vmap_node *vn; struct vmap_block *tmp; struct xarray *xa; xa = addr_to_vb_xa(vb->va->va_start); tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); BUG_ON(tmp != vb); vn = addr_to_node(vb->va->va_start); spin_lock(&vn->busy.lock); unlink_va(vb->va, &vn->busy.root); spin_unlock(&vn->busy.lock); free_vmap_area_noflush(vb->va); kfree_rcu(vb, rcu_head); } static bool purge_fragmented_block(struct vmap_block *vb, struct list_head *purge_list, bool force_purge) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, vb->cpu); if (vb->free + vb->dirty != VMAP_BBMAP_BITS || vb->dirty == VMAP_BBMAP_BITS) return false; /* Don't overeagerly purge usable blocks unless requested */ if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) return false; /* prevent further allocs after releasing lock */ WRITE_ONCE(vb->free, 0); /* prevent purging it again */ WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); vb->dirty_min = 0; vb->dirty_max = VMAP_BBMAP_BITS; spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); list_add_tail(&vb->purge, purge_list); return true; } static void free_purged_blocks(struct list_head *purge_list) { struct vmap_block *vb, *n_vb; list_for_each_entry_safe(vb, n_vb, purge_list, purge) { list_del(&vb->purge); free_vmap_block(vb); } } static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long free = READ_ONCE(vb->free); unsigned long dirty = READ_ONCE(vb->dirty); if (free + dirty != VMAP_BBMAP_BITS || dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); purge_fragmented_block(vb, &purge, true); spin_unlock(&vb->lock); } rcu_read_unlock(); free_purged_blocks(&purge); } static void purge_fragmented_blocks_allcpus(void) { int cpu; for_each_possible_cpu(cpu) purge_fragmented_blocks(cpu); } static void *vb_alloc(unsigned long size, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; void *vaddr = NULL; unsigned int order; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); if (WARN_ON(size == 0)) { /* * Allocating 0 bytes isn't what caller wants since * get_order(0) returns funny result. Just warn and terminate * early. */ return ERR_PTR(-EINVAL); } order = get_order(size); rcu_read_lock(); vbq = raw_cpu_ptr(&vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; if (READ_ONCE(vb->free) < (1UL << order)) continue; spin_lock(&vb->lock); if (vb->free < (1UL << order)) { spin_unlock(&vb->lock); continue; } pages_off = VMAP_BBMAP_BITS - vb->free; vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); WRITE_ONCE(vb->free, vb->free - (1UL << order)); bitmap_set(vb->used_map, pages_off, (1UL << order)); if (vb->free == 0) { spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); } spin_unlock(&vb->lock); break; } rcu_read_unlock(); /* Allocate new block if nothing was found */ if (!vaddr) vaddr = new_vmap_block(order, gfp_mask); return vaddr; } static void vb_free(unsigned long addr, unsigned long size) { unsigned long offset; unsigned int order; struct vmap_block *vb; struct xarray *xa; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); flush_cache_vunmap(addr, addr + size); order = get_order(size); offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; xa = addr_to_vb_xa(addr); vb = xa_load(xa, addr_to_vb_idx(addr)); spin_lock(&vb->lock); bitmap_clear(vb->used_map, offset, (1UL << order)); spin_unlock(&vb->lock); vunmap_range_noflush(addr, addr + size); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(addr, addr + size); spin_lock(&vb->lock); /* Expand the not yet TLB flushed dirty range */ vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); free_vmap_block(vb); } else spin_unlock(&vb->lock); } static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) { LIST_HEAD(purge_list); int cpu; if (unlikely(!vmap_initialized)) return; mutex_lock(&vmap_purge_lock); for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; unsigned long idx; rcu_read_lock(); xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); /* * Try to purge a fragmented block first. If it's * not purgeable, check whether there is dirty * space to be flushed. */ if (!purge_fragmented_block(vb, &purge_list, false) && vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; s = va_start + (vb->dirty_min << PAGE_SHIFT); e = va_start + (vb->dirty_max << PAGE_SHIFT); start = min(s, start); end = max(e, end); /* Prevent that this is flushed again */ vb->dirty_min = VMAP_BBMAP_BITS; vb->dirty_max = 0; flush = 1; } spin_unlock(&vb->lock); } rcu_read_unlock(); } free_purged_blocks(&purge_list); if (!__purge_vmap_area_lazy(start, end, false) && flush) flush_tlb_kernel_range(start, end); mutex_unlock(&vmap_purge_lock); } /** * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer * * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily * to amortize TLB flushing overheads. What this means is that any page you * have now, may, in a former life, have been mapped into kernel virtual * address by the vmap layer and so there might be some CPUs with TLB entries * still referencing that page (additional to the regular 1:1 kernel mapping). * * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can * be sure that none of the pages we have control over will have any aliases * from the vmap layer. */ void vm_unmap_aliases(void) { _vm_unmap_aliases(ULONG_MAX, 0, 0); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram * @mem: the pointer returned by vm_map_ram * @count: the count passed to that vm_map_ram call (cannot unmap partial) */ void vm_unmap_ram(const void *mem, unsigned int count) { unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr = (unsigned long)kasan_reset_tag(mem); struct vmap_area *va; might_sleep(); BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); BUG_ON(!PAGE_ALIGNED(addr)); kasan_poison_vmalloc(mem, size); if (likely(count <= VMAP_MAX_ALLOC)) { debug_check_no_locks_freed(mem, size); vb_free(addr, size); return; } va = find_unlink_vmap_area(addr); if (WARN_ON_ONCE(!va)) return; debug_check_no_locks_freed((void *)va->va_start, va_size(va)); free_unmap_vmap_area(va); } EXPORT_SYMBOL(vm_unmap_ram); /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node * * If you use this function for less than VMAP_MAX_ALLOC pages, it could be * faster than vmap so it's good. But if you mix long-life and short-life * objects with vm_map_ram(), it could consume lots of address space through * fragmentation (especially on a 32bit machine). You could see failures in * the end. Please use this function for short-lived objects. * * Returns: a pointer to the address that has been mapped, or %NULL on failure */ void *vm_map_ram(struct page **pages, unsigned int count, int node) { unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr; void *mem; if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL, VMAP_RAM, NULL); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; } if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, PAGE_SHIFT) < 0) { vm_unmap_ram(mem, count); return NULL; } /* * Mark the pages as accessible, now that they are mapped. * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); return mem; } EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; static inline unsigned int vm_area_page_order(struct vm_struct *vm) { #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC return vm->page_order; #else return 0; #endif } unsigned int get_vm_area_page_order(struct vm_struct *vm) { return vm_area_page_order(vm); } static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) { #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC vm->page_order = order; #else BUG_ON(order != 0); #endif } /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add * * This function is used to add fixed kernel vm area to vmlist before * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags * should contain proper values and the other fields should be zero. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_add_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { BUG_ON(tmp->addr < vm->addr + vm->size); break; } else BUG_ON(tmp->addr + tmp->size > vm->addr); } vm->next = *p; *p = vm; } /** * vm_area_register_early - register vmap area early during boot * @vm: vm_struct to register * @align: requested alignment * * This function is used to register kernel vm area before * vmalloc_init() is called. @vm->size and @vm->flags should contain * proper values on entry and other fields should be zero. On return, * vm->addr contains the allocated address. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { unsigned long addr = ALIGN(VMALLOC_START, align); struct vm_struct *cur, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { if ((unsigned long)cur->addr - addr >= vm->size) break; addr = ALIGN((unsigned long)cur->addr + cur->size, align); } BUG_ON(addr > VMALLOC_END - vm->size); vm->addr = (void *)addr; vm->next = *p; *p = vm; kasan_populate_early_vm_area_shadow(vm->addr, vm->size); } static void clear_vm_uninitialized_flag(struct vm_struct *vm) { /* * Before removing VM_UNINITIALIZED, * we should make sure that vm has proper values. * Pair with smp_rmb() in vread_iter() and vmalloc_info_show(). */ smp_wmb(); vm->flags &= ~VM_UNINITIALIZED; } struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long shift, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; unsigned long requested_size = size; BUG_ON(in_interrupt()); size = ALIGN(size, 1ul << shift); if (unlikely(!size)) return NULL; if (flags & VM_IOREMAP) align = 1ul << clamp_t(int, get_count_order_long(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) return NULL; if (!(flags & VM_NO_GUARD)) size += PAGE_SIZE; area->flags = flags; area->caller = caller; area->requested_size = requested_size; va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); if (IS_ERR(va)) { kfree(area); return NULL; } /* * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a * best-effort approach, as they can be mapped outside of vmalloc code. * For VM_ALLOC mappings, the pages are marked as accessible after * getting mapped in __vmalloc_node_range(). * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ if (!(flags & VM_ALLOC)) area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, KASAN_VMALLOC_PROT_NORMAL); return area; } struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * * Search an area of @size in the kernel virtual mapping area, * and reserved it for out purposes. Returns the area descriptor * on success or %NULL on failure. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * find_vm_area - find a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and return it. * It is up to the caller to do all required locking to keep the returned * pointer valid. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (!va) return NULL; return va->vm; } /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. * This function returns the found VM area, but using it is NOT safe * on SMP machines, except for its size or flags. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; struct vm_struct *vm; might_sleep(); if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", addr)) return NULL; va = find_unlink_vmap_area((unsigned long)addr); if (!va || !va->vm) return NULL; vm = va->vm; debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); kasan_free_module_shadow(vm); kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); free_unmap_vmap_area(va); return vm; } static inline void set_area_direct_map(const struct vm_struct *area, int (*set_direct_map)(struct page *page)) { int i; /* HUGE_VMALLOC passes small pages to set_direct_map */ for (i = 0; i < area->nr_pages; i++) if (page_address(area->pages[i])) set_direct_map(area->pages[i]); } /* * Flush the vm mapping and reset the direct map. */ static void vm_reset_perms(struct vm_struct *area) { unsigned long start = ULONG_MAX, end = 0; unsigned int page_order = vm_area_page_order(area); int flush_dmap = 0; int i; /* * Find the start and end range of the direct mappings to make sure that * the vm_unmap_aliases() flush includes the direct map. */ for (i = 0; i < area->nr_pages; i += 1U << page_order) { unsigned long addr = (unsigned long)page_address(area->pages[i]); if (addr) { unsigned long page_size; page_size = PAGE_SIZE << page_order; start = min(addr, start); end = max(addr + page_size, end); flush_dmap = 1; } } /* * Set direct map to something invalid so that it won't be cached if * there are any accesses after the TLB flush, then flush the TLB and * reset the direct map permissions to the default. */ set_area_direct_map(area, set_direct_map_invalid_noflush); _vm_unmap_aliases(start, end, flush_dmap); set_area_direct_map(area, set_direct_map_default_noflush); } static void delayed_vfree_work(struct work_struct *w) { struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); struct llist_node *t, *llnode; llist_for_each_safe(llnode, t, llist_del_all(&p->list)) vfree(llnode); } /** * vfree_atomic - release memory allocated by vmalloc() * @addr: memory base address * * This one is just like vfree() but can be called in any atomic context * except NMIs. */ void vfree_atomic(const void *addr) { struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); BUG_ON(in_nmi()); kmemleak_free(addr); /* * Use raw_cpu_ptr() because this can be called from preemptible * context. Preemption is absolutely fine here, because the llist_add() * implementation is lockless, so it works even if we are adding to * another cpu's list. schedule_work() should be fine with this too. */ if (addr && llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); } /** * vfree - Release memory allocated by vmalloc() * @addr: Memory base address * * Free the virtually continuous memory area starting at @addr, as obtained * from one of the vmalloc() family of APIs. This will usually also free the * physical memory underlying the virtual allocation, but that memory is * reference counted, so it will not be freed until the last user goes away. * * If @addr is NULL, no operation is performed. * * Context: * May sleep if called *not* from interrupt context. * Must not be called in NMI context (strictly speaking, it could be * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling * conventions for vfree() arch-dependent would be a really bad idea). */ void vfree(const void *addr) { struct vm_struct *vm; int i; if (unlikely(in_interrupt())) { vfree_atomic(addr); return; } BUG_ON(in_nmi()); kmemleak_free(addr); might_sleep(); if (!addr) return; vm = remove_vm_area(addr); if (unlikely(!vm)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) vm_reset_perms(vm); /* All pages of vm should be charged to same memcg, so use first one. */ if (vm->nr_pages && !(vm->flags & VM_MAP_PUT_PAGES)) mod_memcg_page_state(vm->pages[0], MEMCG_VMALLOC, -vm->nr_pages); for (i = 0; i < vm->nr_pages; i++) { struct page *page = vm->pages[i]; BUG_ON(!page); /* * High-order allocs for huge vmallocs are split, so * can be freed as an array of order-0 allocations */ __free_page(page); cond_resched(); } if (!(vm->flags & VM_MAP_PUT_PAGES)) atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); kvfree(vm->pages); kfree(vm); } EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, * which was created from the page array passed to vmap(). * * Must not be called in interrupt context. */ void vunmap(const void *addr) { struct vm_struct *vm; BUG_ON(in_interrupt()); might_sleep(); if (!addr) return; vm = remove_vm_area(addr); if (unlikely(!vm)) { WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", addr); return; } kfree(vm); } EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags * @prot: page protection for the mapping * * Maps @count pages from @pages into contiguous kernel virtual space. * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself * (which must be kmalloc or vmalloc memory) and one reference per pages in it * are transferred from the caller to vmap(), and will be freed / dropped when * vfree() is called on the return value. * * Return: the address of the area or %NULL on failure */ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; unsigned long addr; unsigned long size; /* In bytes */ might_sleep(); if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) return NULL; /* * Your top guard is someone else's bottom guard. Not having a top * guard compromises someone else's mappings too. */ if (WARN_ON_ONCE(flags & VM_NO_GUARD)) flags &= ~VM_NO_GUARD; if (count > totalram_pages()) return NULL; size = (unsigned long)count << PAGE_SHIFT; area = get_vm_area_caller(size, flags, __builtin_return_address(0)); if (!area) return NULL; addr = (unsigned long)area->addr; if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), pages, PAGE_SHIFT) < 0) { vunmap(area->addr); return NULL; } if (flags & VM_MAP_PUT_PAGES) { area->pages = pages; area->nr_pages = count; } return area->addr; } EXPORT_SYMBOL(vmap); #ifdef CONFIG_VMAP_PFN struct vmap_pfn_data { unsigned long *pfns; pgprot_t prot; unsigned int idx; }; static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) { struct vmap_pfn_data *data = private; unsigned long pfn = data->pfns[data->idx]; pte_t ptent; if (WARN_ON_ONCE(pfn_valid(pfn))) return -EINVAL; ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); set_pte_at(&init_mm, addr, pte, ptent); data->idx++; return 0; } /** * vmap_pfn - map an array of PFNs into virtually contiguous space * @pfns: array of PFNs * @count: number of pages to map * @prot: page protection for the mapping * * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns * the start address of the mapping. */ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) { struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; struct vm_struct *area; area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, __builtin_return_address(0)); if (!area) return NULL; if (apply_to_page_range(&init_mm, (unsigned long)area->addr, count * PAGE_SIZE, vmap_pfn_apply, &data)) { free_vm_area(area); return NULL; } flush_cache_vmap((unsigned long)area->addr, (unsigned long)area->addr + count * PAGE_SIZE); return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ static inline unsigned int vm_area_alloc_pages(gfp_t gfp, int nid, unsigned int order, unsigned int nr_pages, struct page **pages) { unsigned int nr_allocated = 0; struct page *page; int i; /* * For order-0 pages we make use of bulk allocator, if * the page array is partly or not at all populated due * to fails, fallback to a single page allocator that is * more permissive. */ if (!order) { while (nr_allocated < nr_pages) { unsigned int nr, nr_pages_request; /* * A maximum allowed request is hard-coded and is 100 * pages per call. That is done in order to prevent a * long preemption off scenario in the bulk-allocator * so the range is [1:100]. */ nr_pages_request = min(100U, nr_pages - nr_allocated); /* memory allocation should consider mempolicy, we can't * wrongly use nearest node when nid == NUMA_NO_NODE, * otherwise memory may be allocated in only one node, * but mempolicy wants to alloc memory by interleaving. */ if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) nr = alloc_pages_bulk_mempolicy_noprof(gfp, nr_pages_request, pages + nr_allocated); else nr = alloc_pages_bulk_node_noprof(gfp, nid, nr_pages_request, pages + nr_allocated); nr_allocated += nr; cond_resched(); /* * If zero or pages were obtained partly, * fallback to a single page allocator. */ if (nr != nr_pages_request) break; } } /* High-order pages or fallback path if "bulk" fails. */ while (nr_allocated < nr_pages) { if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current)) break; if (nid == NUMA_NO_NODE) page = alloc_pages_noprof(gfp, order); else page = alloc_pages_node_noprof(nid, gfp, order); if (unlikely(!page)) break; /* * High-order allocations must be able to be treated as * independent small pages by callers (as they can with * small-page vmallocs). Some drivers do their own refcounting * on vmalloc_to_page() pages, some use page->mapping, * page->lru, etc. */ if (order) split_page(page, order); /* * Careful, we allocate and map page-order pages, but * tracking is done per PAGE_SIZE page so as to keep the * vm_struct APIs independent of the physical/mapped size. */ for (i = 0; i < (1U << order); i++) pages[nr_allocated + i] = page + i; cond_resched(); nr_allocated += 1U << order; } return nr_allocated; } static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node) { const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; bool nofail = gfp_mask & __GFP_NOFAIL; unsigned long addr = (unsigned long)area->addr; unsigned long size = get_vm_area_size(area); unsigned long array_size; unsigned int nr_small_pages = size >> PAGE_SHIFT; unsigned int page_order; unsigned int flags; int ret; array_size = (unsigned long)nr_small_pages * sizeof(struct page *); if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) gfp_mask |= __GFP_HIGHMEM; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, area->caller); } else { area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); } if (!area->pages) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to allocated page array size %lu", nr_small_pages * PAGE_SIZE, array_size); free_vm_area(area); return NULL; } set_vm_area_page_order(area, page_shift - PAGE_SHIFT); page_order = vm_area_page_order(area); /* * High-order nofail allocations are really expensive and * potentially dangerous (pre-mature OOM, disruptive reclaim * and compaction etc. * * Please note, the __vmalloc_node_range_noprof() falls-back * to order-0 pages if high-order attempt is unsuccessful. */ area->nr_pages = vm_area_alloc_pages((page_order ? gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN, node, page_order, nr_small_pages, area->pages); atomic_long_add(area->nr_pages, &nr_vmalloc_pages); /* All pages of vm should be charged to same memcg, so use first one. */ if (gfp_mask & __GFP_ACCOUNT && area->nr_pages) mod_memcg_page_state(area->pages[0], MEMCG_VMALLOC, area->nr_pages); /* * If not enough pages were obtained to accomplish an * allocation request, free them via vfree() if any. */ if (area->nr_pages != nr_small_pages) { /* * vm_area_alloc_pages() can fail due to insufficient memory but * also:- * * - a pending fatal signal * - insufficient huge page-order pages * * Since we always retry allocations at order-0 in the huge page * case a warning for either is spurious. */ if (!fatal_signal_pending(current) && page_order == 0) warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to allocate pages", area->nr_pages * PAGE_SIZE); goto fail; } /* * page tables allocations ignore external gfp mask, enforce it * by the scope API */ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) flags = memalloc_nofs_save(); else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) flags = memalloc_noio_save(); do { ret = vmap_pages_range(addr, addr + size, prot, area->pages, page_shift); if (nofail && (ret < 0)) schedule_timeout_uninterruptible(1); } while (nofail && (ret < 0)); if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) memalloc_nofs_restore(flags); else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) memalloc_noio_restore(flags); if (ret < 0) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to map pages", area->nr_pages * PAGE_SIZE); goto fail; } return area->addr; fail: vfree(area->addr); return NULL; } /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @start: vm area range start * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Please note that the full set of gfp * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all * supported. * Zone modifiers are not supported. From the reclaim modifiers * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and * __GFP_RETRY_MAYFAIL are not supported). * * __GFP_NOWARN can be used to suppress failures messages. * * Map them into contiguous kernel virtual space, using a pagetable * protection of @prot. * * Return: the address of the area or %NULL on failure */ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) { struct vm_struct *area; void *ret; kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; unsigned long original_align = align; unsigned int shift = PAGE_SHIFT; if (WARN_ON_ONCE(!size)) return NULL; if ((size >> PAGE_SHIFT) > totalram_pages()) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, exceeds total pages", size); return NULL; } if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { /* * Try huge pages. Only try for PAGE_KERNEL allocations, * others like modules don't yet expect huge pages in * their allocations due to apply_to_page_range not * supporting them. */ if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE) shift = PMD_SHIFT; else shift = arch_vmap_pte_supported_shift(size); align = max(original_align, 1UL << shift); } again: area = __get_vm_area_node(size, align, shift, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) { bool nofail = gfp_mask & __GFP_NOFAIL; warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, vm_struct allocation failed%s", size, (nofail) ? ". Retrying." : ""); if (nofail) { schedule_timeout_uninterruptible(1); goto again; } goto fail; } /* * Prepare arguments for __vmalloc_area_node() and * kasan_unpoison_vmalloc(). */ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { if (kasan_hw_tags_enabled()) { /* * Modify protection bits to allow tagging. * This must be done before mapping. */ prot = arch_vmap_pgprot_tagged(prot); /* * Skip page_alloc poisoning and zeroing for physical * pages backing VM_ALLOC mapping. Memory is instead * poisoned and zeroed by kasan_unpoison_vmalloc(). */ gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; } /* Take note that the mapping is PAGE_KERNEL. */ kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; } /* Allocate physical pages and map them into vmalloc space. */ ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!ret) goto fail; /* * Mark the pages as accessible, now that they are mapped. * The condition for setting KASAN_VMALLOC_INIT should complement the * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check * to make sure that memory is initialized under the same conditions. * Tag-based KASAN modes only assign tags to normal non-executable * allocations, see __kasan_unpoison_vmalloc(). */ kasan_flags |= KASAN_VMALLOC_VM_ALLOC; if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && (gfp_mask & __GFP_SKIP_ZERO)) kasan_flags |= KASAN_VMALLOC_INIT; /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ area->addr = kasan_unpoison_vmalloc(area->addr, size, kasan_flags); /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED * flag. It means that vm_struct is not fully initialized. * Now, it is fully initialized, so remove this flag here. */ clear_vm_uninitialized_flag(area); if (!(vm_flags & VM_DEFER_KMEMLEAK)) kmemleak_vmalloc(area, PAGE_ALIGN(size), gfp_mask); return area->addr; fail: if (shift > PAGE_SHIFT) { shift = PAGE_SHIFT; align = original_align; goto again; } return NULL; } /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @gfp_mask: flags for the page level allocator * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level allocator with * @gfp_mask flags. Map them into contiguous kernel virtual space. * * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL * and __GFP_NOFAIL are not supported * * Any use of gfp flags outside of GFP_KERNEL should be consulted * with mm people. * * Return: pointer to the allocated memory or %NULL on error */ void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) { return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, 0, node, caller); } /* * This is only for performance analysis of vmalloc and stress purpose. * It is required by vmalloc test module, therefore do not use it other * than that. */ #ifdef CONFIG_TEST_VMALLOC_MODULE EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); #endif void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) { return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc_noprof); /** * vmalloc - allocate virtually contiguous memory * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_noprof(unsigned long size) { return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_noprof); /** * vmalloc_huge_node - allocate virtually contiguous memory, allow huge pages * @size: allocation size * @gfp_mask: flags for the page level allocator * @node: node to use for allocation or NUMA_NO_NODE * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * If @size is greater than or equal to PMD_SIZE, allow using * huge pages for the memory * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) { return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(vmalloc_huge_node_noprof); /** * vzalloc - allocate virtually contiguous memory with zero fill * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vzalloc_noprof(unsigned long size) { return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vzalloc_noprof); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_user_noprof(unsigned long size) { return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_user_noprof); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_node_noprof(unsigned long size, int node) { return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node_noprof); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * Return: pointer to the allocated memory or %NULL on error */ void *vzalloc_node_noprof(unsigned long size, int node) { return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vzalloc_node_noprof); /** * vrealloc - reallocate virtually contiguous memory; contents remain unchanged * @p: object to reallocate memory for * @size: the size to reallocate * @flags: the flags for the page level allocator * * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and * @p is not a %NULL pointer, the object pointed to is freed. * * If __GFP_ZERO logic is requested, callers must ensure that, starting with the * initial memory allocation, every subsequent call to this API for the same * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that * __GFP_ZERO is not fully honored by this API. * * In any case, the contents of the object pointed to are preserved up to the * lesser of the new and old sizes. * * This function must not be called concurrently with itself or vfree() for the * same memory allocation. * * Return: pointer to the allocated memory; %NULL if @size is zero or in case of * failure */ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) { struct vm_struct *vm = NULL; size_t alloced_size = 0; size_t old_size = 0; void *n; if (!size) { vfree(p); return NULL; } if (p) { vm = find_vm_area(p); if (unlikely(!vm)) { WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); return NULL; } alloced_size = get_vm_area_size(vm); old_size = vm->requested_size; if (WARN(alloced_size < old_size, "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) return NULL; } /* * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What * would be a good heuristic for when to shrink the vm_area? */ if (size <= old_size) { /* Zero out "freed" memory, potentially for future realloc. */ if (want_init_on_free() || want_init_on_alloc(flags)) memset((void *)p + size, 0, old_size - size); vm->requested_size = size; kasan_poison_vmalloc(p + size, old_size - size); return (void *)p; } /* * We already have the bytes available in the allocation; use them. */ if (size <= alloced_size) { kasan_unpoison_vmalloc(p + old_size, size - old_size, KASAN_VMALLOC_PROT_NORMAL); /* * No need to zero memory here, as unused memory will have * already been zeroed at initial allocation time or during * realloc shrink time. */ vm->requested_size = size; return (void *)p; } /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ n = __vmalloc_noprof(size, flags); if (!n) return NULL; if (p) { memcpy(n, p, old_size); vfree(p); } return n; } #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) #else /* * 64b systems should always have either DMA or DMA32 zones. For others * GFP_DMA32 should do the right thing and use the normal zone. */ #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #endif /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_32_noprof(unsigned long size) { return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32_noprof); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_32_user_noprof(unsigned long size) { return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32_user_noprof); /* * Atomically zero bytes in the iterator. * * Returns the number of zeroed bytes. */ static size_t zero_iter(struct iov_iter *iter, size_t count) { size_t remains = count; while (remains > 0) { size_t num, copied; num = min_t(size_t, remains, PAGE_SIZE); copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); remains -= copied; if (copied < num) break; } return count - remains; } /* * small helper routine, copy contents to iter from addr. * If the page is not present, fill zero. * * Returns the number of copied bytes. */ static size_t aligned_vread_iter(struct iov_iter *iter, const char *addr, size_t count) { size_t remains = count; struct page *page; while (remains > 0) { unsigned long offset, length; size_t copied = 0; offset = offset_in_page(addr); length = PAGE_SIZE - offset; if (length > remains) length = remains; page = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need lock. But * adding lock here means that we need to add overhead of * vmalloc()/vfree() calls for this _debug_ interface, rarely * used. Instead of that, we'll use an local mapping via * copy_page_to_iter_nofault() and accept a small overhead in * this access function. */ if (page) copied = copy_page_to_iter_nofault(page, offset, length, iter); else copied = zero_iter(iter, length); addr += copied; remains -= copied; if (copied != length) break; } return count - remains; } /* * Read from a vm_map_ram region of memory. * * Returns the number of copied bytes. */ static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, size_t count, unsigned long flags) { char *start; struct vmap_block *vb; struct xarray *xa; unsigned long offset; unsigned int rs, re; size_t remains, n; /* * If it's area created by vm_map_ram() interface directly, but * not further subdividing and delegating management to vmap_block, * handle it here. */ if (!(flags & VMAP_BLOCK)) return aligned_vread_iter(iter, addr, count); remains = count; /* * Area is split into regions and tracked with vmap_block, read out * each region and zero fill the hole between regions. */ xa = addr_to_vb_xa((unsigned long) addr); vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); if (!vb) goto finished_zero; spin_lock(&vb->lock); if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { spin_unlock(&vb->lock); goto finished_zero; } for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { size_t copied; if (remains == 0) goto finished; start = vmap_block_vaddr(vb->va->va_start, rs); if (addr < start) { size_t to_zero = min_t(size_t, start - addr, remains); size_t zeroed = zero_iter(iter, to_zero); addr += zeroed; remains -= zeroed; if (remains == 0 || zeroed != to_zero) goto finished; } /*it could start reading from the middle of used region*/ offset = offset_in_page(addr); n = ((re - rs + 1) << PAGE_SHIFT) - offset; if (n > remains) n = remains; copied = aligned_vread_iter(iter, start + offset, n); addr += copied; remains -= copied; if (copied != n) goto finished; } spin_unlock(&vb->lock); finished_zero: /* zero-fill the left dirty or free regions */ return count - remains + zero_iter(iter, remains); finished: /* We couldn't copy/zero everything */ spin_unlock(&vb->lock); return count - remains; } /** * vread_iter() - read vmalloc area in a safe way to an iterator. * @iter: the iterator to which data should be written. * @addr: vm address. * @count: number of bytes to be read. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range * of [addr...addr+count) includes some valid address, data is copied to * proper area of @buf. If there are memory holes, they'll be zero-filled. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. @buf should be kernel's buffer. * * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any information, as /proc/kcore. * * Return: number of bytes for which addr and buf should be increased * (same number as @count) or %0 if [addr...addr+count) doesn't * include any intersection with valid vmalloc area */ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) { struct vmap_node *vn; struct vmap_area *va; struct vm_struct *vm; char *vaddr; size_t n, size, flags, remains; unsigned long next; addr = kasan_reset_tag(addr); /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; remains = count; vn = find_vmap_area_exceed_addr_lock((unsigned long) addr, &va); if (!vn) goto finished_zero; /* no intersects with alive vmap_area */ if ((unsigned long)addr + remains <= va->va_start) goto finished_zero; do { size_t copied; if (remains == 0) goto finished; vm = va->vm; flags = va->flags & VMAP_FLAGS_MASK; /* * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need * be set together with VMAP_RAM. */ WARN_ON(flags == VMAP_BLOCK); if (!vm && !flags) goto next_va; if (vm && (vm->flags & VM_UNINITIALIZED)) goto next_va; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); vaddr = (char *) va->va_start; size = vm ? get_vm_area_size(vm) : va_size(va); if (addr >= vaddr + size) goto next_va; if (addr < vaddr) { size_t to_zero = min_t(size_t, vaddr - addr, remains); size_t zeroed = zero_iter(iter, to_zero); addr += zeroed; remains -= zeroed; if (remains == 0 || zeroed != to_zero) goto finished; } n = vaddr + size - addr; if (n > remains) n = remains; if (flags & VMAP_RAM) copied = vmap_ram_vread_iter(iter, addr, n, flags); else if (!(vm && (vm->flags & (VM_IOREMAP | VM_SPARSE)))) copied = aligned_vread_iter(iter, addr, n); else /* IOREMAP | SPARSE area is treated as memory hole */ copied = zero_iter(iter, n); addr += copied; remains -= copied; if (copied != n) goto finished; next_va: next = va->va_end; spin_unlock(&vn->busy.lock); } while ((vn = find_vmap_area_exceed_addr_lock(next, &va))); finished_zero: if (vn) spin_unlock(&vn->busy.lock); /* zero-fill memory holes */ return count - remains + zero_iter(iter, remains); finished: /* Nothing remains, or We couldn't copy/zero everything. */ if (vn) spin_unlock(&vn->busy.lock); return count - remains; } /** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory * @pgoff: offset from @kaddr to start at * @size: size of map area * * Returns: 0 for success, -Exxx on failure * * This function checks that @kaddr is a valid vmalloc'ed area, * and that it is big enough to cover the range starting at * @uaddr in @vma. Will return failure if that criteria isn't * met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long pgoff, unsigned long size) { struct vm_struct *area; unsigned long off; unsigned long end_index; if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) return -EINVAL; size = PAGE_ALIGN(size); if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) return -EINVAL; area = find_vm_area(kaddr); if (!area) return -EINVAL; if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) return -EINVAL; if (check_add_overflow(size, off, &end_index) || end_index > get_vm_area_size(area)) return -EINVAL; kaddr += off; do { struct page *page = vmalloc_to_page(kaddr); int ret; ret = vm_insert_page(vma, uaddr, page); if (ret) return ret; uaddr += PAGE_SIZE; kaddr += PAGE_SIZE; size -= PAGE_SIZE; } while (size > 0); vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); return 0; } /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) * @addr: vmalloc memory * @pgoff: number of pages into addr before first page to map * * Returns: 0 for success, -Exxx on failure * * This function checks that addr is a valid vmalloc'ed area, and * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, addr, pgoff, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); void free_vm_area(struct vm_struct *area) { struct vm_struct *ret; ret = remove_vm_area(area->addr); BUG_ON(ret != area); kfree(area); } EXPORT_SYMBOL_GPL(free_vm_area); #ifdef CONFIG_SMP static struct vmap_area *node_to_va(struct rb_node *n) { return rb_entry_safe(n, struct vmap_area, rb_node); } /** * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to * @addr: target address * * Returns: vmap_area if it is found. If there is no such area * the first highest(reverse order) vmap_area is returned * i.e. va->va_start < addr && va->va_end < addr or NULL * if there are no any areas before @addr. */ static struct vmap_area * pvm_find_va_enclose_addr(unsigned long addr) { struct vmap_area *va, *tmp; struct rb_node *n; n = free_vmap_area_root.rb_node; va = NULL; while (n) { tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_start <= addr) { va = tmp; if (tmp->va_end >= addr) break; n = n->rb_right; } else { n = n->rb_left; } } return va; } /** * pvm_determine_end_from_reverse - find the highest aligned address * of free block below VMALLOC_END * @va: * in - the VA we start the search(reverse order); * out - the VA with the highest aligned end address. * @align: alignment for required highest address * * Returns: determined end address within vmap_area */ static unsigned long pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) { unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; if (likely(*va)) { list_for_each_entry_from_reverse((*va), &free_vmap_area_list, list) { addr = min((*va)->va_end & ~(align - 1), vmalloc_end); if ((*va)->va_start < addr) return addr; } } return 0; } /** * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator * @offsets: array containing offset of each area * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to * be scattered pretty far, distance between two areas easily going up * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans free blocks from the end looking * for matching base. While scanning, if any of the areas do not fit the * base address is pulled down to fit the area. Scanning is repeated till * all the areas fit and then all necessary data structures are inserted * and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); struct vmap_area **vas, *va; struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, size, end, last_end, orig_start, orig_end; bool purged = false; /* verify parameters and allocate data structures */ BUG_ON(offset_in_page(align) || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { start = offsets[area]; end = start + sizes[area]; /* is everything aligned properly? */ BUG_ON(!IS_ALIGNED(offsets[area], align)); BUG_ON(!IS_ALIGNED(sizes[area], align)); /* detect the area with the highest address */ if (start > offsets[last_area]) last_area = area; for (area2 = area + 1; area2 < nr_vms; area2++) { unsigned long start2 = offsets[area2]; unsigned long end2 = start2 + sizes[area2]; BUG_ON(start2 < end && start < end2); } } last_end = offsets[last_area] + sizes[last_area]; if (vmalloc_end - vmalloc_start < last_end) { WARN_ON(true); return NULL; } vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); if (!vas || !vms) goto err_free2; for (area = 0; area < nr_vms; area++) { vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } retry: spin_lock(&free_vmap_area_lock); /* start scanning - we scan from the top, begin with the last area */ area = term_area = last_area; start = offsets[area]; end = start + sizes[area]; va = pvm_find_va_enclose_addr(vmalloc_end); base = pvm_determine_end_from_reverse(&va, align) - end; while (true) { /* * base might have underflowed, add last_end before * comparing. */ if (base + last_end < vmalloc_start + last_end) goto overflow; /* * Fitting base has not been found. */ if (va == NULL) goto overflow; /* * If required width exceeds current VA block, move * base downwards and then recheck. */ if (base + end > va->va_end) { base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } /* * If this VA does not fit, move base downwards and recheck. */ if (base + start < va->va_start) { va = node_to_va(rb_prev(&va->rb_node)); base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } /* * This area fits, move on to the previous one. If * the previous one is the terminal one, we're done. */ area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; start = offsets[area]; end = start + sizes[area]; va = pvm_find_va_enclose_addr(base + end); } /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { int ret; start = base + offsets[area]; size = sizes[area]; va = pvm_find_va_enclose_addr(start); if (WARN_ON_ONCE(va == NULL)) /* It is a BUG(), but trigger recovery instead. */ goto recovery; ret = va_clip(&free_vmap_area_root, &free_vmap_area_list, va, start, size); if (WARN_ON_ONCE(unlikely(ret))) /* It is a BUG(), but trigger recovery instead. */ goto recovery; /* Allocated area. */ va = vas[area]; va->va_start = start; va->va_end = start + size; } spin_unlock(&free_vmap_area_lock); /* populate the kasan shadow space */ for (area = 0; area < nr_vms; area++) { if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) goto err_free_shadow; } /* insert all vm's */ for (area = 0; area < nr_vms; area++) { struct vmap_node *vn = addr_to_node(vas[area]->va_start); spin_lock(&vn->busy.lock); insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); spin_unlock(&vn->busy.lock); } /* * Mark allocated areas as accessible. Do it now as a best-effort * approach, as they can be mapped outside of vmalloc code. * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ for (area = 0; area < nr_vms; area++) vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); kfree(vas); return vms; recovery: /* * Remove previously allocated areas. There is no * need in removing these areas from the busy tree, * because they are inserted only on the final step * and when pcpu_get_vm_areas() is success. */ while (area--) { orig_start = vas[area]->va_start; orig_end = vas[area]->va_end; va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, &free_vmap_area_list); if (va) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end, KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH); vas[area] = NULL; } overflow: spin_unlock(&free_vmap_area_lock); if (!purged) { reclaim_and_purge_vmap_areas(); purged = true; /* Before "retry", check if we recover. */ for (area = 0; area < nr_vms; area++) { if (vas[area]) continue; vas[area] = kmem_cache_zalloc( vmap_area_cachep, GFP_KERNEL); if (!vas[area]) goto err_free; } goto retry; } err_free: for (area = 0; area < nr_vms; area++) { if (vas[area]) kmem_cache_free(vmap_area_cachep, vas[area]); kfree(vms[area]); } err_free2: kfree(vas); kfree(vms); return NULL; err_free_shadow: spin_lock(&free_vmap_area_lock); /* * We release all the vmalloc shadows, even the ones for regions that * hadn't been successfully added. This relies on kasan_release_vmalloc * being able to tolerate this case. */ for (area = 0; area < nr_vms; area++) { orig_start = vas[area]->va_start; orig_end = vas[area]->va_end; va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, &free_vmap_area_list); if (va) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end, KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH); vas[area] = NULL; kfree(vms[area]); } spin_unlock(&free_vmap_area_lock); kfree(vas); kfree(vms); return NULL; } /** * pcpu_free_vm_areas - free vmalloc areas for percpu allocator * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() * @nr_vms: the number of allocated areas * * Free vm_structs and the array allocated by pcpu_get_vm_areas(). */ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { int i; for (i = 0; i < nr_vms; i++) free_vm_area(vms[i]); kfree(vms); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PRINTK bool vmalloc_dump_obj(void *object) { const void *caller; struct vm_struct *vm; struct vmap_area *va; struct vmap_node *vn; unsigned long addr; unsigned int nr_pages; addr = PAGE_ALIGN((unsigned long) object); vn = addr_to_node(addr); if (!spin_trylock(&vn->busy.lock)) return false; va = __find_vmap_area(addr, &vn->busy.root); if (!va || !va->vm) { spin_unlock(&vn->busy.lock); return false; } vm = va->vm; addr = (unsigned long) vm->addr; caller = vm->caller; nr_pages = vm->nr_pages; spin_unlock(&vn->busy.lock); pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", nr_pages, addr, caller); return true; } #endif #ifdef CONFIG_PROC_FS /* * Print number of pages allocated on each memory node. * * This function can only be called if CONFIG_NUMA is enabled * and VM_UNINITIALIZED bit in v->flags is disabled. */ static void show_numa_info(struct seq_file *m, struct vm_struct *v, unsigned int *counters) { unsigned int nr; unsigned int step = 1U << vm_area_page_order(v); if (!counters) return; memset(counters, 0, nr_node_ids * sizeof(unsigned int)); for (nr = 0; nr < v->nr_pages; nr += step) counters[page_to_nid(v->pages[nr])] += step; for_each_node_state(nr, N_HIGH_MEMORY) if (counters[nr]) seq_printf(m, " N%u=%u", nr, counters[nr]); } static void show_purge_info(struct seq_file *m) { struct vmap_node *vn; struct vmap_area *va; for_each_vmap_node(vn) { spin_lock(&vn->lazy.lock); list_for_each_entry(va, &vn->lazy.head, list) { seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", (void *)va->va_start, (void *)va->va_end, va_size(va)); } spin_unlock(&vn->lazy.lock); } } static int vmalloc_info_show(struct seq_file *m, void *p) { struct vmap_node *vn; struct vmap_area *va; struct vm_struct *v; unsigned int *counters; if (IS_ENABLED(CONFIG_NUMA)) counters = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); for_each_vmap_node(vn) { spin_lock(&vn->busy.lock); list_for_each_entry(va, &vn->busy.head, list) { if (!va->vm) { if (va->flags & VMAP_RAM) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", (void *)va->va_start, (void *)va->va_end, va_size(va)); continue; } v = va->vm; if (v->flags & VM_UNINITIALIZED) continue; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); seq_printf(m, "0x%pK-0x%pK %7ld", v->addr, v->addr + v->size, v->size); if (v->caller) seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); if (v->phys_addr) seq_printf(m, " phys=%pa", &v->phys_addr); if (v->flags & VM_IOREMAP) seq_puts(m, " ioremap"); if (v->flags & VM_SPARSE) seq_puts(m, " sparse"); if (v->flags & VM_ALLOC) seq_puts(m, " vmalloc"); if (v->flags & VM_MAP) seq_puts(m, " vmap"); if (v->flags & VM_USERMAP) seq_puts(m, " user"); if (v->flags & VM_DMA_COHERENT) seq_puts(m, " dma-coherent"); if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages"); if (IS_ENABLED(CONFIG_NUMA)) show_numa_info(m, v, counters); seq_putc(m, '\n'); } spin_unlock(&vn->busy.lock); } /* * As a final step, dump "unpurged" areas. */ show_purge_info(m); if (IS_ENABLED(CONFIG_NUMA)) kfree(counters); return 0; } static int __init proc_vmalloc_init(void) { proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show); return 0; } module_init(proc_vmalloc_init); #endif static void __init vmap_init_free_space(void) { unsigned long vmap_start = 1; const unsigned long vmap_end = ULONG_MAX; struct vmap_area *free; struct vm_struct *busy; /* * B F B B B F * -|-----|.....|-----|-----|-----|.....|- * | The KVA space | * |<--------------------------------->| */ for (busy = vmlist; busy; busy = busy->next) { if ((unsigned long) busy->addr - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (!WARN_ON_ONCE(!free)) { free->va_start = vmap_start; free->va_end = (unsigned long) busy->addr; insert_vmap_area_augment(free, NULL, &free_vmap_area_root, &free_vmap_area_list); } } vmap_start = (unsigned long) busy->addr + busy->size; } if (vmap_end - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (!WARN_ON_ONCE(!free)) { free->va_start = vmap_start; free->va_end = vmap_end; insert_vmap_area_augment(free, NULL, &free_vmap_area_root, &free_vmap_area_list); } } } static void vmap_init_nodes(void) { struct vmap_node *vn; int i; #if BITS_PER_LONG == 64 /* * A high threshold of max nodes is fixed and bound to 128, * thus a scale factor is 1 for systems where number of cores * are less or equal to specified threshold. * * As for NUMA-aware notes. For bigger systems, for example * NUMA with multi-sockets, where we can end-up with thousands * of cores in total, a "sub-numa-clustering" should be added. * * In this case a NUMA domain is considered as a single entity * with dedicated sub-nodes in it which describe one group or * set of cores. Therefore a per-domain purging is supposed to * be added as well as a per-domain balancing. */ int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128); if (n > 1) { vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN); if (vn) { /* Node partition is 16 pages. */ vmap_zone_size = (1 << 4) * PAGE_SIZE; nr_vmap_nodes = n; vmap_nodes = vn; } else { pr_err("Failed to allocate an array. Disable a node layer\n"); } } #endif for_each_vmap_node(vn) { vn->busy.root = RB_ROOT; INIT_LIST_HEAD(&vn->busy.head); spin_lock_init(&vn->busy.lock); vn->lazy.root = RB_ROOT; INIT_LIST_HEAD(&vn->lazy.head); spin_lock_init(&vn->lazy.lock); for (i = 0; i < MAX_VA_SIZE_PAGES; i++) { INIT_LIST_HEAD(&vn->pool[i].head); WRITE_ONCE(vn->pool[i].len, 0); } spin_lock_init(&vn->pool_lock); } } static unsigned long vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned long count = 0; struct vmap_node *vn; int i; for_each_vmap_node(vn) { for (i = 0; i < MAX_VA_SIZE_PAGES; i++) count += READ_ONCE(vn->pool[i].len); } return count ? count : SHRINK_EMPTY; } static unsigned long vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { struct vmap_node *vn; for_each_vmap_node(vn) decay_va_pool_node(vn, true); return SHRINK_STOP; } void __init vmalloc_init(void) { struct shrinker *vmap_node_shrinker; struct vmap_area *va; struct vmap_node *vn; struct vm_struct *tmp; int i; /* * Create the cache for vmap_area objects. */ vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); for_each_possible_cpu(i) { struct vmap_block_queue *vbq; struct vfree_deferred *p; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); p = &per_cpu(vfree_deferred, i); init_llist_head(&p->list); INIT_WORK(&p->wq, delayed_vfree_work); xa_init(&vbq->vmap_blocks); } /* * Setup nodes before importing vmlist. */ vmap_init_nodes(); /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (WARN_ON_ONCE(!va)) continue; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; vn = addr_to_node(va->va_start); insert_vmap_area(va, &vn->busy.root, &vn->busy.head); } /* * Now we can initialize a free vmap space. */ vmap_init_free_space(); vmap_initialized = true; vmap_node_shrinker = shrinker_alloc(0, "vmap-node"); if (!vmap_node_shrinker) { pr_err("Failed to allocate vmap-node shrinker!\n"); return; } vmap_node_shrinker->count_objects = vmap_node_shrink_count; vmap_node_shrinker->scan_objects = vmap_node_shrink_scan; shrinker_register(vmap_node_shrinker); }
2 1 1 1 3 3 1 2 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for SmartJoy PLUS PS2->USB adapter * * Copyright (c) 2009 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based of hid-pl.c and hid-gaff.c * Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com> * Copyright (c) 2008 Lukasz Lubojanski <lukasz@lubojanski.info> */ /* */ /* #define DEBUG */ #include <linux/input.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_SMARTJOYPLUS_FF struct sjoyff_device { struct hid_report *report; }; static int hid_sjoyff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct sjoyff_device *sjoyff = data; u32 left, right; left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dev_dbg(&dev->dev, "called with 0x%08x 0x%08x\n", left, right); left = left * 0xff / 0xffff; right = (right != 0); /* on/off only */ sjoyff->report->field[0]->value[1] = right; sjoyff->report->field[0]->value[2] = left; dev_dbg(&dev->dev, "running with 0x%02x 0x%02x\n", left, right); hid_hw_request(hid, sjoyff->report, HID_REQ_SET_REPORT); return 0; } static int sjoyff_init(struct hid_device *hid) { struct sjoyff_device *sjoyff; struct hid_report *report; struct hid_input *hidinput; struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct list_head *report_ptr = report_list; struct input_dev *dev; int error; if (list_empty(report_list)) { hid_err(hid, "no output reports found\n"); return -ENODEV; } list_for_each_entry(hidinput, &hid->inputs, list) { report_ptr = report_ptr->next; if (report_ptr == report_list) { hid_err(hid, "required output report is missing\n"); return -ENODEV; } report = list_entry(report_ptr, struct hid_report, list); if (report->maxfield < 1) { hid_err(hid, "no fields in the report\n"); return -ENODEV; } if (report->field[0]->report_count < 3) { hid_err(hid, "not enough values in the field\n"); return -ENODEV; } sjoyff = kzalloc(sizeof(struct sjoyff_device), GFP_KERNEL); if (!sjoyff) return -ENOMEM; dev = hidinput->input; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, sjoyff, hid_sjoyff_play); if (error) { kfree(sjoyff); return error; } sjoyff->report = report; sjoyff->report->field[0]->value[0] = 0x01; sjoyff->report->field[0]->value[1] = 0x00; sjoyff->report->field[0]->value[2] = 0x00; hid_hw_request(hid, sjoyff->report, HID_REQ_SET_REPORT); } hid_info(hid, "Force feedback for SmartJoy PLUS PS2/USB adapter\n"); return 0; } #else static inline int sjoyff_init(struct hid_device *hid) { return 0; } #endif static int sjoy_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= id->driver_data; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } sjoyff_init(hdev); return 0; err: return ret; } static const struct hid_device_id sjoy_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO), .driver_data = HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII), .driver_data = HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { } }; MODULE_DEVICE_TABLE(hid, sjoy_devices); static struct hid_driver sjoy_driver = { .name = "smartjoyplus", .id_table = sjoy_devices, .probe = sjoy_probe, }; module_hid_driver(sjoy_driver); MODULE_DESCRIPTION("Force feedback support for SmartJoy PLUS PS2->USB adapter"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jussi Kivilinna");
120 120 120 120 120 120 185 184 184 24 24 48 48 185 176 176 175 176 176 176 176 176 23 46 1 176 69 69 179 179 179 179 179 23 47 1 70 179 179 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 // SPDX-License-Identifier: GPL-2.0-or-later /* * Advanced Linux Sound Architecture * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/time.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <linux/sound.h> #include <linux/mutex.h> #define SNDRV_OSS_MINORS 256 static struct snd_minor *snd_oss_minors[SNDRV_OSS_MINORS]; static DEFINE_MUTEX(sound_oss_mutex); /* NOTE: This function increments the refcount of the associated card like * snd_lookup_minor_data(); the caller must call snd_card_unref() appropriately */ void *snd_lookup_oss_minor_data(unsigned int minor, int type) { struct snd_minor *mreg; void *private_data; if (minor >= ARRAY_SIZE(snd_oss_minors)) return NULL; guard(mutex)(&sound_oss_mutex); mreg = snd_oss_minors[minor]; if (mreg && mreg->type == type) { private_data = mreg->private_data; if (private_data && mreg->card_ptr) get_device(&mreg->card_ptr->card_dev); } else private_data = NULL; return private_data; } EXPORT_SYMBOL(snd_lookup_oss_minor_data); static int snd_oss_kernel_minor(int type, struct snd_card *card, int dev) { int minor; switch (type) { case SNDRV_OSS_DEVICE_TYPE_MIXER: if (snd_BUG_ON(!card || dev < 0 || dev > 1)) return -EINVAL; minor = SNDRV_MINOR_OSS(card->number, (dev ? SNDRV_MINOR_OSS_MIXER1 : SNDRV_MINOR_OSS_MIXER)); break; case SNDRV_OSS_DEVICE_TYPE_SEQUENCER: minor = SNDRV_MINOR_OSS_SEQUENCER; break; case SNDRV_OSS_DEVICE_TYPE_MUSIC: minor = SNDRV_MINOR_OSS_MUSIC; break; case SNDRV_OSS_DEVICE_TYPE_PCM: if (snd_BUG_ON(!card || dev < 0 || dev > 1)) return -EINVAL; minor = SNDRV_MINOR_OSS(card->number, (dev ? SNDRV_MINOR_OSS_PCM1 : SNDRV_MINOR_OSS_PCM)); break; case SNDRV_OSS_DEVICE_TYPE_MIDI: if (snd_BUG_ON(!card || dev < 0 || dev > 1)) return -EINVAL; minor = SNDRV_MINOR_OSS(card->number, (dev ? SNDRV_MINOR_OSS_MIDI1 : SNDRV_MINOR_OSS_MIDI)); break; case SNDRV_OSS_DEVICE_TYPE_DMFM: minor = SNDRV_MINOR_OSS(card->number, SNDRV_MINOR_OSS_DMFM); break; case SNDRV_OSS_DEVICE_TYPE_SNDSTAT: minor = SNDRV_MINOR_OSS_SNDSTAT; break; default: return -EINVAL; } if (minor < 0 || minor >= SNDRV_OSS_MINORS) return -EINVAL; return minor; } int snd_register_oss_device(int type, struct snd_card *card, int dev, const struct file_operations *f_ops, void *private_data) { int minor = snd_oss_kernel_minor(type, card, dev); int minor_unit; struct snd_minor *preg; int cidx = SNDRV_MINOR_OSS_CARD(minor); int track2 = -1; int register1 = -1, register2 = -1; struct device *carddev = snd_card_get_device_link(card); if (card && card->number >= SNDRV_MINOR_OSS_DEVICES) return 0; /* ignore silently */ if (minor < 0) return minor; preg = kmalloc(sizeof(struct snd_minor), GFP_KERNEL); if (preg == NULL) return -ENOMEM; preg->type = type; preg->card = card ? card->number : -1; preg->device = dev; preg->f_ops = f_ops; preg->private_data = private_data; preg->card_ptr = card; guard(mutex)(&sound_oss_mutex); snd_oss_minors[minor] = preg; minor_unit = SNDRV_MINOR_OSS_DEVICE(minor); switch (minor_unit) { case SNDRV_MINOR_OSS_PCM: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO); break; case SNDRV_MINOR_OSS_MIDI: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI); break; case SNDRV_MINOR_OSS_MIDI1: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1); break; } register1 = register_sound_special_device(f_ops, minor, carddev); if (register1 != minor) goto __end; if (track2 >= 0) { register2 = register_sound_special_device(f_ops, track2, carddev); if (register2 != track2) goto __end; snd_oss_minors[track2] = preg; } return 0; __end: if (register2 >= 0) unregister_sound_special(register2); if (register1 >= 0) unregister_sound_special(register1); snd_oss_minors[minor] = NULL; kfree(preg); return -EBUSY; } EXPORT_SYMBOL(snd_register_oss_device); int snd_unregister_oss_device(int type, struct snd_card *card, int dev) { int minor = snd_oss_kernel_minor(type, card, dev); int cidx = SNDRV_MINOR_OSS_CARD(minor); int track2 = -1; struct snd_minor *mptr; if (card && card->number >= SNDRV_MINOR_OSS_DEVICES) return 0; if (minor < 0) return minor; guard(mutex)(&sound_oss_mutex); mptr = snd_oss_minors[minor]; if (mptr == NULL) return -ENOENT; switch (SNDRV_MINOR_OSS_DEVICE(minor)) { case SNDRV_MINOR_OSS_PCM: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_AUDIO); break; case SNDRV_MINOR_OSS_MIDI: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI); break; case SNDRV_MINOR_OSS_MIDI1: track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1); break; } if (track2 >= 0) snd_oss_minors[track2] = NULL; snd_oss_minors[minor] = NULL; /* call unregister_sound_special() outside sound_oss_mutex; * otherwise may deadlock, as it can trigger the release of a card */ unregister_sound_special(minor); if (track2 >= 0) unregister_sound_special(track2); kfree(mptr); return 0; } EXPORT_SYMBOL(snd_unregister_oss_device); /* * INFO PART */ #ifdef CONFIG_SND_PROC_FS static const char *snd_oss_device_type_name(int type) { switch (type) { case SNDRV_OSS_DEVICE_TYPE_MIXER: return "mixer"; case SNDRV_OSS_DEVICE_TYPE_SEQUENCER: case SNDRV_OSS_DEVICE_TYPE_MUSIC: return "sequencer"; case SNDRV_OSS_DEVICE_TYPE_PCM: return "digital audio"; case SNDRV_OSS_DEVICE_TYPE_MIDI: return "raw midi"; case SNDRV_OSS_DEVICE_TYPE_DMFM: return "hardware dependent"; default: return "?"; } } static void snd_minor_info_oss_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { int minor; struct snd_minor *mptr; guard(mutex)(&sound_oss_mutex); for (minor = 0; minor < SNDRV_OSS_MINORS; ++minor) { mptr = snd_oss_minors[minor]; if (!mptr) continue; if (mptr->card >= 0) snd_iprintf(buffer, "%3i: [%i-%2i]: %s\n", minor, mptr->card, mptr->device, snd_oss_device_type_name(mptr->type)); else snd_iprintf(buffer, "%3i: : %s\n", minor, snd_oss_device_type_name(mptr->type)); } } int __init snd_minor_info_oss_init(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, "devices", snd_oss_root); if (!entry) return -ENOMEM; entry->c.text.read = snd_minor_info_oss_read; return snd_info_register(entry); /* freed in error path */ } #endif /* CONFIG_SND_PROC_FS */
3 3 2 2 1 1 4 4 4 4 4 4 4 4 4 4 4 1 3 3 1 2 1 1 1 1 1 1 1 1 1 1 3 4 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 // SPDX-License-Identifier: GPL-2.0-or-later /**************************************************************** * * kaweth.c - driver for KL5KUSB101 based USB->Ethernet * * (c) 2000 Interlan Communications * (c) 2000 Stephane Alnet * (C) 2001 Brad Hards * (C) 2002 Oliver Neukum * * Original author: The Zapman <zapman@interlan.net> * Inspired by, and much credit goes to Michael Rothwell * <rothwell@interlan.net> for the test equipment, help, and patience * Based off of (and with thanks to) Petko Manolov's pegaus.c driver. * Also many thanks to Joel Silverman and Ed Surprenant at Kawasaki * for providing the firmware and driver resources. * ****************************************************************/ /* TODO: * Develop test procedures for USB net interfaces * Run test procedures * Fix bugs from previous two steps * Snoop other OSs for any tricks we're not doing * Reduce arbitrary timeouts * Smart multicast support * Temporary MAC change support * Tunable SOFs parameter - ioctl()? * Ethernet stats collection * Code formatting improvements */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/types.h> #include <linux/ethtool.h> #include <linux/dma-mapping.h> #include <linux/wait.h> #include <linux/firmware.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #undef DEBUG #define KAWETH_MTU 1514 #define KAWETH_BUF_SIZE 1664 #define KAWETH_TX_TIMEOUT (5 * HZ) #define KAWETH_SCRATCH_SIZE 32 #define KAWETH_FIRMWARE_BUF_SIZE 4096 #define KAWETH_CONTROL_TIMEOUT (30000) #define KAWETH_STATUS_BROKEN 0x0000001 #define KAWETH_STATUS_CLOSING 0x0000002 #define KAWETH_STATUS_SUSPENDING 0x0000004 #define KAWETH_STATUS_BLOCKED (KAWETH_STATUS_CLOSING | KAWETH_STATUS_SUSPENDING) #define KAWETH_PACKET_FILTER_PROMISCUOUS 0x01 #define KAWETH_PACKET_FILTER_ALL_MULTICAST 0x02 #define KAWETH_PACKET_FILTER_DIRECTED 0x04 #define KAWETH_PACKET_FILTER_BROADCAST 0x08 #define KAWETH_PACKET_FILTER_MULTICAST 0x10 /* Table 7 */ #define KAWETH_COMMAND_GET_ETHERNET_DESC 0x00 #define KAWETH_COMMAND_MULTICAST_FILTERS 0x01 #define KAWETH_COMMAND_SET_PACKET_FILTER 0x02 #define KAWETH_COMMAND_STATISTICS 0x03 #define KAWETH_COMMAND_SET_TEMP_MAC 0x06 #define KAWETH_COMMAND_GET_TEMP_MAC 0x07 #define KAWETH_COMMAND_SET_URB_SIZE 0x08 #define KAWETH_COMMAND_SET_SOFS_WAIT 0x09 #define KAWETH_COMMAND_SCAN 0xFF #define KAWETH_SOFS_TO_WAIT 0x05 #define INTBUFFERSIZE 4 #define STATE_OFFSET 0 #define STATE_MASK 0x40 #define STATE_SHIFT 5 #define IS_BLOCKED(s) (s & KAWETH_STATUS_BLOCKED) MODULE_AUTHOR("Michael Zappe <zapman@interlan.net>, Stephane Alnet <stephane@u-picardie.fr>, Brad Hards <bhards@bigpond.net.au> and Oliver Neukum <oliver@neukum.org>"); MODULE_DESCRIPTION("KL5USB101 USB Ethernet driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("kaweth/new_code.bin"); MODULE_FIRMWARE("kaweth/new_code_fix.bin"); MODULE_FIRMWARE("kaweth/trigger_code.bin"); MODULE_FIRMWARE("kaweth/trigger_code_fix.bin"); static const char driver_name[] = "kaweth"; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ); static void kaweth_disconnect(struct usb_interface *intf); static int kaweth_suspend(struct usb_interface *intf, pm_message_t message); static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ static const struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ { USB_DEVICE(0x0506, 0x11f8) }, /* 3Com 3C460 */ { USB_DEVICE(0x0557, 0x2002) }, /* ATEN USB Ethernet */ { USB_DEVICE(0x0557, 0x4000) }, /* D-Link DSB-650C */ { USB_DEVICE(0x0565, 0x0002) }, /* Peracom Enet */ { USB_DEVICE(0x0565, 0x0003) }, /* Optus@Home UEP1045A */ { USB_DEVICE(0x0565, 0x0005) }, /* Peracom Enet2 */ { USB_DEVICE(0x05e9, 0x0008) }, /* KLSI KL5KUSB101B */ { USB_DEVICE(0x05e9, 0x0009) }, /* KLSI KL5KUSB101B (Board change) */ { USB_DEVICE(0x066b, 0x2202) }, /* Linksys USB10T */ { USB_DEVICE(0x06e1, 0x0008) }, /* ADS USB-10BT */ { USB_DEVICE(0x06e1, 0x0009) }, /* ADS USB-10BT */ { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */ { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x085a, 0x0009) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x087d, 0x5704) }, /* Jaton USB Ethernet Device Adapter */ { USB_DEVICE(0x0951, 0x0008) }, /* Kingston Technology USB Ethernet Adapter */ { USB_DEVICE(0x095a, 0x3003) }, /* Portsmith Express Ethernet Adapter */ { USB_DEVICE(0x10bd, 0x1427) }, /* ASANTE USB To Ethernet Adapter */ { USB_DEVICE(0x1342, 0x0204) }, /* Mobility USB-Ethernet Adapter */ { USB_DEVICE(0x13d2, 0x0400) }, /* Shark Pocket Adapter */ { USB_DEVICE(0x1485, 0x0001) }, /* Silicom U2E */ { USB_DEVICE(0x1485, 0x0002) }, /* Psion Dacom Gold Port Ethernet */ { USB_DEVICE(0x1645, 0x0005) }, /* Entrega E45 */ { USB_DEVICE(0x1645, 0x0008) }, /* Entrega USB Ethernet Adapter */ { USB_DEVICE(0x1645, 0x8005) }, /* PortGear Ethernet Adapter */ { USB_DEVICE(0x1668, 0x0323) }, /* Actiontec USB Ethernet */ { USB_DEVICE(0x2001, 0x4000) }, /* D-link DSB-650C */ {} /* Null terminator */ }; MODULE_DEVICE_TABLE (usb, usb_klsi_table); /**************************************************************** * kaweth_driver ****************************************************************/ static struct usb_driver kaweth_driver = { .name = driver_name, .probe = kaweth_probe, .disconnect = kaweth_disconnect, .suspend = kaweth_suspend, .resume = kaweth_resume, .id_table = usb_klsi_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; typedef __u8 eth_addr_t[6]; /**************************************************************** * usb_eth_dev ****************************************************************/ struct usb_eth_dev { char *name; __u16 vendor; __u16 device; void *pdata; }; /**************************************************************** * kaweth_ethernet_configuration * Refer Table 8 ****************************************************************/ struct kaweth_ethernet_configuration { __u8 size; __u8 reserved1; __u8 reserved2; eth_addr_t hw_addr; __u32 statistics_mask; __le16 segment_size; __u16 max_multicast_filters; __u8 reserved3; } __packed; /**************************************************************** * kaweth_device ****************************************************************/ struct kaweth_device { spinlock_t device_lock; __u32 status; int end; int suspend_lowmem_rx; int suspend_lowmem_ctrl; int linkstate; int opened; struct delayed_work lowmem_work; struct usb_device *dev; struct usb_interface *intf; struct net_device *net; wait_queue_head_t term_wait; struct urb *rx_urb; struct urb *tx_urb; struct urb *irq_urb; dma_addr_t intbufferhandle; __u8 *intbuffer; dma_addr_t rxbufferhandle; __u8 *rx_buf; struct sk_buff *tx_skb; __u8 *firmware_buf; __u8 scratch[KAWETH_SCRATCH_SIZE]; __u16 packet_filter_bitmap; struct kaweth_ethernet_configuration configuration; }; /**************************************************************** * kaweth_read_configuration ****************************************************************/ static int kaweth_read_configuration(struct kaweth_device *kaweth) { return usb_control_msg(kaweth->dev, usb_rcvctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_GET_ETHERNET_DESC, USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, &kaweth->configuration, sizeof(kaweth->configuration), KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_urb_size ****************************************************************/ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size) { netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_URB_SIZE, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, urb_size, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_sofs_wait ****************************************************************/ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait) { netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_SOFS_WAIT, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, sofs_wait, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_set_receive_filter ****************************************************************/ static int kaweth_set_receive_filter(struct kaweth_device *kaweth, __u16 receive_filter) { netdev_dbg(kaweth->net, "Set receive filter to %d\n", (unsigned)receive_filter); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, receive_filter, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_download_firmware ****************************************************************/ static int kaweth_download_firmware(struct kaweth_device *kaweth, const char *fwname, __u8 interrupt, __u8 type) { const struct firmware *fw; int data_len; int ret; ret = request_firmware(&fw, fwname, &kaweth->dev->dev); if (ret) { dev_err(&kaweth->intf->dev, "Firmware request failed\n"); return ret; } if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) { dev_err(&kaweth->intf->dev, "Firmware too big: %zu\n", fw->size); release_firmware(fw); return -ENOSPC; } data_len = fw->size; memcpy(kaweth->firmware_buf, fw->data, fw->size); release_firmware(fw); kaweth->firmware_buf[2] = (data_len & 0xFF) - 7; kaweth->firmware_buf[3] = data_len >> 8; kaweth->firmware_buf[4] = type; kaweth->firmware_buf[5] = interrupt; netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3], kaweth->firmware_buf[2]); netdev_dbg(kaweth->net, "Downloading firmware at %p to kaweth device at %p\n", kaweth->firmware_buf, kaweth); netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, kaweth->firmware_buf, data_len, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_trigger_firmware ****************************************************************/ static int kaweth_trigger_firmware(struct kaweth_device *kaweth, __u8 interrupt) { kaweth->firmware_buf[0] = 0xB6; kaweth->firmware_buf[1] = 0xC3; kaweth->firmware_buf[2] = 0x01; kaweth->firmware_buf[3] = 0x00; kaweth->firmware_buf[4] = 0x06; kaweth->firmware_buf[5] = interrupt; kaweth->firmware_buf[6] = 0x00; kaweth->firmware_buf[7] = 0x00; return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SCAN, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, 0, 0, (void *)kaweth->firmware_buf, 8, KAWETH_CONTROL_TIMEOUT); } /**************************************************************** * kaweth_reset ****************************************************************/ static int kaweth_reset(struct kaweth_device *kaweth) { int result; result = usb_reset_configuration(kaweth->dev); mdelay(10); netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result); return result; } static void kaweth_usb_receive(struct urb *); static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t); /**************************************************************** int_callback *****************************************************************/ static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf) { int status; status = usb_submit_urb (kaweth->irq_urb, mf); if (unlikely(status == -ENOMEM)) { kaweth->suspend_lowmem_ctrl = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } else { kaweth->suspend_lowmem_ctrl = 0; } if (status) dev_err(&kaweth->intf->dev, "can't resubmit intr, %s-%s, status %d\n", kaweth->dev->bus->bus_name, kaweth->dev->devpath, status); } static void int_callback(struct urb *u) { struct kaweth_device *kaweth = u->context; int act_state; int status = u->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ goto resubmit; } /* we check the link state to report changes */ if (kaweth->linkstate != (act_state = ( kaweth->intbuffer[STATE_OFFSET] | STATE_MASK) >> STATE_SHIFT)) { if (act_state) netif_carrier_on(kaweth->net); else netif_carrier_off(kaweth->net); kaweth->linkstate = act_state; } resubmit: kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); } static void kaweth_resubmit_tl(struct work_struct *work) { struct kaweth_device *kaweth = container_of(work, struct kaweth_device, lowmem_work.work); if (IS_BLOCKED(kaweth->status)) return; if (kaweth->suspend_lowmem_rx) kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); if (kaweth->suspend_lowmem_ctrl) kaweth_resubmit_int_urb(kaweth, GFP_NOIO); } /**************************************************************** * kaweth_resubmit_rx_urb ****************************************************************/ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth, gfp_t mem_flags) { int result; usb_fill_bulk_urb(kaweth->rx_urb, kaweth->dev, usb_rcvbulkpipe(kaweth->dev, 1), kaweth->rx_buf, KAWETH_BUF_SIZE, kaweth_usb_receive, kaweth); kaweth->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; kaweth->rx_urb->transfer_dma = kaweth->rxbufferhandle; if((result = usb_submit_urb(kaweth->rx_urb, mem_flags))) { if (result == -ENOMEM) { kaweth->suspend_lowmem_rx = 1; schedule_delayed_work(&kaweth->lowmem_work, HZ/4); } dev_err(&kaweth->intf->dev, "resubmitting rx_urb %d failed\n", result); } else { kaweth->suspend_lowmem_rx = 0; } return result; } static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth, bool may_sleep); /**************************************************************** * kaweth_usb_receive ****************************************************************/ static void kaweth_usb_receive(struct urb *urb) { struct device *dev = &urb->dev->dev; struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; unsigned long flags; int count = urb->actual_length; int count2 = urb->transfer_buffer_length; __u16 pkt_len = le16_to_cpup((__le16 *)kaweth->rx_buf); struct sk_buff *skb; if (unlikely(status == -EPIPE)) { net->stats.rx_errors++; kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -EPIPE.\n"); return; } if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { /* we are killed - set a flag and wake the disconnect handler */ kaweth->end = 1; wake_up(&kaweth->term_wait); dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n"); return; } if (unlikely(status == -EPROTO || status == -ETIME || status == -EILSEQ)) { net->stats.rx_errors++; dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n"); return; } if (unlikely(status == -EOVERFLOW)) { net->stats.rx_errors++; dev_dbg(dev, "Status was -EOVERFLOW.\n"); } spin_lock_irqsave(&kaweth->device_lock, flags); if (IS_BLOCKED(kaweth->status)) { spin_unlock_irqrestore(&kaweth->device_lock, flags); return; } spin_unlock_irqrestore(&kaweth->device_lock, flags); if(status && status != -EREMOTEIO && count != 1) { dev_err(&kaweth->intf->dev, "%s RX status: %d count: %d packet_len: %d\n", net->name, status, count, (int)pkt_len); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(kaweth->net && (count > 2)) { if(pkt_len > (count - 2)) { dev_err(&kaweth->intf->dev, "Packet length too long for USB frame (pkt_len: %x, count: %x)\n", pkt_len, count); dev_err(&kaweth->intf->dev, "Packet len & 2047: %x\n", pkt_len & 2047); dev_err(&kaweth->intf->dev, "Count 2: %x\n", count2); kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } if(!(skb = dev_alloc_skb(pkt_len+2))) { kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); return; } skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, net); netif_rx(skb); net->stats.rx_packets++; net->stats.rx_bytes += pkt_len; } kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); } /**************************************************************** * kaweth_open ****************************************************************/ static int kaweth_open(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); int res; res = usb_autopm_get_interface(kaweth->intf); if (res) { dev_err(&kaweth->intf->dev, "Interface cannot be resumed.\n"); return -EIO; } res = kaweth_resubmit_rx_urb(kaweth, GFP_KERNEL); if (res) goto err_out; usb_fill_int_urb( kaweth->irq_urb, kaweth->dev, usb_rcvintpipe(kaweth->dev, 3), kaweth->intbuffer, INTBUFFERSIZE, int_callback, kaweth, 250); /* overriding the descriptor */ kaweth->irq_urb->transfer_dma = kaweth->intbufferhandle; kaweth->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; res = usb_submit_urb(kaweth->irq_urb, GFP_KERNEL); if (res) { usb_kill_urb(kaweth->rx_urb); goto err_out; } kaweth->opened = 1; netif_start_queue(net); kaweth_async_set_rx_mode(kaweth, true); return 0; err_out: usb_autopm_put_interface(kaweth->intf); return -EIO; } /**************************************************************** * kaweth_kill_urbs ****************************************************************/ static void kaweth_kill_urbs(struct kaweth_device *kaweth) { usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); usb_kill_urb(kaweth->tx_urb); cancel_delayed_work_sync(&kaweth->lowmem_work); /* a scheduled work may have resubmitted, we hit them again */ usb_kill_urb(kaweth->irq_urb); usb_kill_urb(kaweth->rx_urb); } /**************************************************************** * kaweth_close ****************************************************************/ static int kaweth_close(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); netif_stop_queue(net); kaweth->opened = 0; kaweth->status |= KAWETH_STATUS_CLOSING; kaweth_kill_urbs(kaweth); kaweth->status &= ~KAWETH_STATUS_CLOSING; usb_autopm_put_interface(kaweth->intf); return 0; } static u32 kaweth_get_link(struct net_device *dev) { struct kaweth_device *kaweth = netdev_priv(dev); return kaweth->linkstate; } static const struct ethtool_ops ops = { .get_link = kaweth_get_link }; /**************************************************************** * kaweth_usb_transmit_complete ****************************************************************/ static void kaweth_usb_transmit_complete(struct urb *urb) { struct kaweth_device *kaweth = urb->context; struct sk_buff *skb = kaweth->tx_skb; int status = urb->status; if (unlikely(status != 0)) if (status != -ENOENT) dev_dbg(&urb->dev->dev, "%s: TX status %d.\n", kaweth->net->name, status); netif_wake_queue(kaweth->net); dev_kfree_skb_irq(skb); } /**************************************************************** * kaweth_start_xmit ****************************************************************/ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb, struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __le16 *private_header; int res; spin_lock_irq(&kaweth->device_lock); kaweth_async_set_rx_mode(kaweth, false); netif_stop_queue(net); if (IS_BLOCKED(kaweth->status)) { goto skip; } /* We now decide whether we can put our special header into the sk_buff */ if (skb_cow_head(skb, 2)) { net->stats.tx_errors++; netif_start_queue(net); spin_unlock_irq(&kaweth->device_lock); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } private_header = __skb_push(skb, 2); *private_header = cpu_to_le16(skb->len-2); kaweth->tx_skb = skb; usb_fill_bulk_urb(kaweth->tx_urb, kaweth->dev, usb_sndbulkpipe(kaweth->dev, 2), private_header, skb->len, kaweth_usb_transmit_complete, kaweth); kaweth->end = 0; if((res = usb_submit_urb(kaweth->tx_urb, GFP_ATOMIC))) { dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res); skip: net->stats.tx_errors++; netif_start_queue(net); dev_kfree_skb_irq(skb); } else { net->stats.tx_packets++; net->stats.tx_bytes += skb->len; } spin_unlock_irq(&kaweth->device_lock); return NETDEV_TX_OK; } /**************************************************************** * kaweth_set_rx_mode ****************************************************************/ static void kaweth_set_rx_mode(struct net_device *net) { struct kaweth_device *kaweth = netdev_priv(net); __u16 packet_filter_bitmap = KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST; netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap); netif_stop_queue(net); if (net->flags & IFF_PROMISC) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS; } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) { packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST; } kaweth->packet_filter_bitmap = packet_filter_bitmap; netif_wake_queue(net); } /**************************************************************** * kaweth_async_set_rx_mode ****************************************************************/ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth, bool may_sleep) { int ret; __u16 packet_filter_bitmap = kaweth->packet_filter_bitmap; kaweth->packet_filter_bitmap = 0; if (packet_filter_bitmap == 0) return; if (!may_sleep) return; ret = usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0), KAWETH_COMMAND_SET_PACKET_FILTER, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, packet_filter_bitmap, 0, &kaweth->scratch, 0, KAWETH_CONTROL_TIMEOUT); if (ret < 0) dev_err(&kaweth->intf->dev, "Failed to set Rx mode: %d\n", ret); else netdev_dbg(kaweth->net, "Set Rx mode to %d\n", packet_filter_bitmap); } /**************************************************************** * kaweth_tx_timeout ****************************************************************/ static void kaweth_tx_timeout(struct net_device *net, unsigned int txqueue) { struct kaweth_device *kaweth = netdev_priv(net); dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name); net->stats.tx_errors++; netif_trans_update(net); usb_unlink_urb(kaweth->tx_urb); } /**************************************************************** * kaweth_suspend ****************************************************************/ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status |= KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); kaweth_kill_urbs(kaweth); return 0; } /**************************************************************** * kaweth_resume ****************************************************************/ static int kaweth_resume(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status &= ~KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); if (!kaweth->opened) return 0; kaweth_resubmit_rx_urb(kaweth, GFP_NOIO); kaweth_resubmit_int_urb(kaweth, GFP_NOIO); return 0; } /**************************************************************** * kaweth_probe ****************************************************************/ static const struct net_device_ops kaweth_netdev_ops = { .ndo_open = kaweth_open, .ndo_stop = kaweth_close, .ndo_start_xmit = kaweth_start_xmit, .ndo_tx_timeout = kaweth_tx_timeout, .ndo_set_rx_mode = kaweth_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int kaweth_probe( struct usb_interface *intf, const struct usb_device_id *id /* from id_table */ ) { struct device *dev = &intf->dev; struct usb_device *udev = interface_to_usbdev(intf); struct kaweth_device *kaweth; struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; int rv = -EIO; dev_dbg(dev, "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", udev->devnum, le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), le16_to_cpu(udev->descriptor.bcdDevice)); dev_dbg(dev, "Device at %p\n", udev); dev_dbg(dev, "Descriptor length: %x type: %x\n", (int)udev->descriptor.bLength, (int)udev->descriptor.bDescriptorType); netdev = alloc_etherdev(sizeof(*kaweth)); if (!netdev) return -ENOMEM; kaweth = netdev_priv(netdev); kaweth->dev = udev; kaweth->net = netdev; kaweth->intf = intf; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); dev_dbg(dev, "Resetting.\n"); kaweth_reset(kaweth); /* * If high byte of bcdDevice is nonzero, firmware is already * downloaded. Don't try to do it again, or we'll hang the device. */ if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) { dev_info(dev, "Firmware present in device.\n"); } else { /* Download the firmware */ dev_info(dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); if (!kaweth->firmware_buf) { rv = -ENOMEM; goto err_free_netdev; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code.bin", 100, 2)) < 0) { dev_err(dev, "Error downloading firmware (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code_fix.bin", 100, 3)) < 0) { dev_err(dev, "Error downloading firmware fix (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code.bin", 126, 2)) < 0) { dev_err(dev, "Error downloading trigger code (%d)\n", result); goto err_fw; } if ((result = kaweth_download_firmware(kaweth, "kaweth/trigger_code_fix.bin", 126, 3)) < 0) { dev_err(dev, "Error downloading trigger code fix (%d)\n", result); goto err_fw; } if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { dev_err(dev, "Error triggering firmware (%d)\n", result); goto err_fw; } /* Device will now disappear for a moment... */ dev_info(dev, "Firmware loaded. I'll be back...\n"); err_fw: free_page((unsigned long)kaweth->firmware_buf); free_netdev(netdev); return -EIO; } result = kaweth_read_configuration(kaweth); if(result < 0) { dev_err(dev, "Error reading configuration (%d), no net device created\n", result); goto err_free_netdev; } dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); if(!memcmp(&kaweth->configuration.hw_addr, &bcast_addr, sizeof(bcast_addr))) { dev_err(dev, "Firmware not functioning properly, no net device created\n"); goto err_free_netdev; } if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { dev_dbg(dev, "Error setting URB size\n"); goto err_free_netdev; } if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { dev_err(dev, "Error setting SOFS wait\n"); goto err_free_netdev; } result = kaweth_set_receive_filter(kaweth, KAWETH_PACKET_FILTER_DIRECTED | KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST); if(result < 0) { dev_err(dev, "Error setting receive filter\n"); goto err_free_netdev; } dev_dbg(dev, "Initializing net device.\n"); kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->tx_urb) goto err_free_netdev; kaweth->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->rx_urb) goto err_only_tx; kaweth->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->irq_urb) goto err_tx_and_rx; kaweth->intbuffer = usb_alloc_coherent( kaweth->dev, INTBUFFERSIZE, GFP_KERNEL, &kaweth->intbufferhandle); if (!kaweth->intbuffer) goto err_tx_and_rx_and_irq; kaweth->rx_buf = usb_alloc_coherent( kaweth->dev, KAWETH_BUF_SIZE, GFP_KERNEL, &kaweth->rxbufferhandle); if (!kaweth->rx_buf) goto err_all_but_rxbuf; memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr)); eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr); netdev->netdev_ops = &kaweth_netdev_ops; netdev->watchdog_timeo = KAWETH_TX_TIMEOUT; netdev->mtu = le16_to_cpu(kaweth->configuration.segment_size); netdev->ethtool_ops = &ops; /* kaweth is zeroed as part of alloc_netdev */ INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); usb_set_intfdata(intf, kaweth); SET_NETDEV_DEV(netdev, dev); if (register_netdev(netdev) != 0) { dev_err(dev, "Error registering netdev.\n"); goto err_intfdata; } dev_info(dev, "kaweth interface created at %s\n", kaweth->net->name); return 0; err_intfdata: usb_set_intfdata(intf, NULL); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); err_all_but_rxbuf: usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); err_tx_and_rx_and_irq: usb_free_urb(kaweth->irq_urb); err_tx_and_rx: usb_free_urb(kaweth->rx_urb); err_only_tx: usb_free_urb(kaweth->tx_urb); err_free_netdev: free_netdev(netdev); return rv; } /**************************************************************** * kaweth_disconnect ****************************************************************/ static void kaweth_disconnect(struct usb_interface *intf) { struct kaweth_device *kaweth = usb_get_intfdata(intf); struct net_device *netdev; usb_set_intfdata(intf, NULL); if (!kaweth) { dev_warn(&intf->dev, "unregistering non-existent device\n"); return; } netdev = kaweth->net; netdev_dbg(kaweth->net, "Unregistering net device\n"); unregister_netdev(netdev); usb_free_urb(kaweth->rx_urb); usb_free_urb(kaweth->tx_urb); usb_free_urb(kaweth->irq_urb); usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); free_netdev(netdev); } module_usb_driver(kaweth_driver);
70 68 13 13 12 1 13 3 3 11 11 11 11 11 9 9 9 9 9 9 9 9 9 9 6 6 1 1 8 8 8 8 10 10 10 1 10 1 1 10 1 1 94 2 93 9 93 11 93 89 2 2 3 2 3 1 1 83 12 77 9 86 12 1 9 85 68 1 12 12 9 9 9 9 9 7 7 7 7 7 7 85 85 85 85 85 85 81 65 5 64 4 60 61 1 62 63 1 63 63 1 2 2 66 66 1 1 1 1 1 1 1 1 1 66 6 6 66 65 8 5 4 66 66 8 65 65 8 2 8 66 18 66 46 66 66 6 6 66 60 7 62 4 3 2 63 12 11 51 64 59 58 7 66 65 65 1 64 63 64 8 8 8 8 8 8 8 8 8 2 2 2 2 2 2 63 63 22 63 63 63 3 63 64 64 64 64 64 62 5 64 11 53 64 65 1 1 1 1 1 1 1 1 1 1 65 65 2 1 1 2 7 7 7 7 7 6 5 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 4 2 1 2 5 4 1 1 1 1 1 1 1 1 1 1 4 3 4 1 4 5 1 20 19 20 18 18 18 16 8 16 16 1 8 7 7 7 16 16 7 16 16 8 7 9 8 1 1 2 1 1 2 3 2 2 1 3 2 1 2 4 3 2 4 3 2 1 3 2 1 1 1 1 1 1 2 3 3 2 2 2 2 4 3 3 4 3 2 4 10 10 10 7 4 19 19 11 11 7 8 8 7 10 10 10 21 19 19 21 2 1 1 1 1 1 1 2 1 1 1 1 1 1 1 6 6 6 6 10 9 7 2 1 10 5 5 5 5 3 3 2 2 1 3 7 6 2 6 1 1 7 6 6 5 6 4 3 2 2 1 4 1 4 4 4 3 4 3 2 3 3 3 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 // SPDX-License-Identifier: GPL-2.0-only /* * vivid-vid-cap.c - video capture support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/videodev2.h> #include <linux/prandom.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-common.h> #include <media/v4l2-event.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-rect.h> #include "vivid-core.h" #include "vivid-vid-common.h" #include "vivid-kthread-cap.h" #include "vivid-vid-cap.h" /* Sizes must be in increasing order */ static const struct v4l2_frmsize_discrete webcam_sizes[] = { { 320, 180 }, { 320, 240 }, { 640, 360 }, { 640, 480 }, { 1280, 720 }, { 1280, 960 }, { 1600, 1200 }, { 1920, 1080 }, { 3840, 2160 }, }; /* * Intervals must be in increasing order. */ static const struct v4l2_fract webcam_intervals[] = { { 1, 1 }, { 1, 2 }, { 1, 4 }, { 1, 5 }, { 1, 10 }, { 2, 25 }, { 1, 15 }, /* 7 - maximum for 2160p */ { 1, 25 }, { 1, 30 }, /* 9 - maximum for 1080p */ { 1, 40 }, { 1, 50 }, { 1, 60 }, /* 12 - maximum for 720p */ { 1, 120 }, }; /* Limit maximum FPS rates for high resolutions */ #define IVAL_COUNT_720P 12 /* 720p and up is limited to 60 fps */ #define IVAL_COUNT_1080P 9 /* 1080p and up is limited to 30 fps */ #define IVAL_COUNT_2160P 7 /* 2160p and up is limited to 15 fps */ static inline unsigned int webcam_ival_count(const struct vivid_dev *dev, unsigned int frmsize_idx) { if (webcam_sizes[frmsize_idx].height >= 2160) return IVAL_COUNT_2160P; if (webcam_sizes[frmsize_idx].height >= 1080) return IVAL_COUNT_1080P; if (webcam_sizes[frmsize_idx].height >= 720) return IVAL_COUNT_720P; /* For low resolutions, allow all FPS rates */ return ARRAY_SIZE(webcam_intervals); } static int vid_cap_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], struct device *alloc_devs[]) { struct vivid_dev *dev = vb2_get_drv_priv(vq); unsigned buffers = tpg_g_buffers(&dev->tpg); unsigned h = dev->fmt_cap_rect.height; unsigned p; if (dev->field_cap == V4L2_FIELD_ALTERNATE) { /* * You cannot use read() with FIELD_ALTERNATE since the field * information (TOP/BOTTOM) cannot be passed back to the user. */ if (vb2_fileio_is_active(vq)) return -EINVAL; } if (dev->queue_setup_error) { /* * Error injection: test what happens if queue_setup() returns * an error. */ dev->queue_setup_error = false; return -EINVAL; } if (*nplanes) { /* * Check if the number of requested planes match * the number of buffers in the current format. You can't mix that. */ if (*nplanes != buffers) return -EINVAL; for (p = 0; p < buffers; p++) { if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h / dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]) return -EINVAL; } } else { for (p = 0; p < buffers; p++) sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; } *nplanes = buffers; dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); for (p = 0; p < buffers; p++) dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); return 0; } static int vid_cap_buf_prepare(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); unsigned long size; unsigned buffers = tpg_g_buffers(&dev->tpg); unsigned p; dprintk(dev, 1, "%s\n", __func__); if (WARN_ON(NULL == dev->fmt_cap)) return -EINVAL; if (dev->buf_prepare_error) { /* * Error injection: test what happens if buf_prepare() returns * an error. */ dev->buf_prepare_error = false; return -EINVAL; } for (p = 0; p < buffers; p++) { size = (tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height) / dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; if (vb2_plane_size(vb, p) < size) { dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", __func__, p, vb2_plane_size(vb, p), size); return -EINVAL; } vb2_set_plane_payload(vb, p, size); vb->planes[p].data_offset = dev->fmt_cap->data_offset[p]; } return 0; } static void vid_cap_buf_finish(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); struct v4l2_timecode *tc = &vbuf->timecode; unsigned fps = 25; unsigned seq = vbuf->sequence; if (!vivid_is_sdtv_cap(dev)) return; /* * Set the timecode. Rarely used, so it is interesting to * test this. */ vbuf->flags |= V4L2_BUF_FLAG_TIMECODE; if (dev->std_cap[dev->input] & V4L2_STD_525_60) fps = 30; tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; tc->flags = 0; tc->frames = seq % fps; tc->seconds = (seq / fps) % 60; tc->minutes = (seq / (60 * fps)) % 60; tc->hours = (seq / (60 * 60 * fps)) % 24; } static void vid_cap_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); dprintk(dev, 1, "%s\n", __func__); spin_lock(&dev->slock); list_add_tail(&buf->list, &dev->vid_cap_active); spin_unlock(&dev->slock); } static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) { struct vivid_dev *dev = vb2_get_drv_priv(vq); unsigned i; int err; dev->vid_cap_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; if (dev->start_streaming_error) { dev->start_streaming_error = false; err = -EINVAL; } else { err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); } if (err) { struct vivid_buffer *buf, *tmp; list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } } return err; } /* abort streaming and wait for last buffer */ static void vid_cap_stop_streaming(struct vb2_queue *vq) { struct vivid_dev *dev = vb2_get_drv_priv(vq); dprintk(dev, 1, "%s\n", __func__); vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); } static void vid_cap_buf_request_complete(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap); } const struct vb2_ops vivid_vid_cap_qops = { .queue_setup = vid_cap_queue_setup, .buf_prepare = vid_cap_buf_prepare, .buf_finish = vid_cap_buf_finish, .buf_queue = vid_cap_buf_queue, .start_streaming = vid_cap_start_streaming, .stop_streaming = vid_cap_stop_streaming, .buf_request_complete = vid_cap_buf_request_complete, }; /* * Determine the 'picture' quality based on the current TV frequency: either * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off * signal or NOISE for no signal. */ void vivid_update_quality(struct vivid_dev *dev) { unsigned freq_modulus; if (dev->input_is_connected_to_output[dev->input]) { /* * The 'noise' will only be replaced by the actual video * if the output video matches the input video settings. */ tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); return; } if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) { tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); return; } if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) { tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); return; } if (!vivid_is_tv_cap(dev)) { tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); return; } /* * There is a fake channel every 6 MHz at 49.25, 55.25, etc. * From +/- 0.25 MHz around the channel there is color, and from * +/- 1 MHz there is grayscale (chroma is lost). * Everywhere else it is just noise. */ freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); if (freq_modulus > 2 * 16) { tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); return; } if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); else tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); } /* * Get the current picture quality and the associated afc value. */ static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) { unsigned freq_modulus; if (afc) *afc = 0; if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) return tpg_g_quality(&dev->tpg); /* * There is a fake channel every 6 MHz at 49.25, 55.25, etc. * From +/- 0.25 MHz around the channel there is color, and from * +/- 1 MHz there is grayscale (chroma is lost). * Everywhere else it is just gray. */ freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); if (afc) *afc = freq_modulus - 1 * 16; return TPG_QUAL_GRAY; } enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) { if (vivid_is_sdtv_cap(dev)) return dev->std_aspect_ratio[dev->input]; if (vivid_is_hdmi_cap(dev)) return dev->dv_timings_aspect_ratio[dev->input]; return TPG_VIDEO_ASPECT_IMAGE; } static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) { if (vivid_is_sdtv_cap(dev)) return (dev->std_cap[dev->input] & V4L2_STD_525_60) ? TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; if (vivid_is_hdmi_cap(dev) && dev->src_rect.width == 720 && dev->src_rect.height <= 576) return dev->src_rect.height == 480 ? TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; return TPG_PIXEL_ASPECT_SQUARE; } /* * Called whenever the format has to be reset which can occur when * changing inputs, standard, timings, etc. */ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) { struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; u32 dims[V4L2_CTRL_MAX_DIMS] = {}; unsigned size; u64 pixelclock; switch (dev->input_type[dev->input]) { case WEBCAM: default: dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; dev->field_cap = V4L2_FIELD_NONE; tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); break; case TV: case SVID: dev->field_cap = dev->tv_field_cap; dev->src_rect.width = 720; if (dev->std_cap[dev->input] & V4L2_STD_525_60) { dev->src_rect.height = 480; dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; dev->service_set_cap = V4L2_SLICED_CAPTION_525; } else { dev->src_rect.height = 576; dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; } tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); break; case HDMI: dev->src_rect.width = bt->width; dev->src_rect.height = bt->height; size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); if (dev->reduced_fps && can_reduce_fps(bt)) { pixelclock = div_u64(bt->pixelclock * 1000, 1001); bt->flags |= V4L2_DV_FL_REDUCED_FPS; } else { pixelclock = bt->pixelclock; bt->flags &= ~V4L2_DV_FL_REDUCED_FPS; } dev->timeperframe_vid_cap = (struct v4l2_fract) { size / 100, (u32)pixelclock / 100 }; if (bt->interlaced) dev->field_cap = V4L2_FIELD_ALTERNATE; else dev->field_cap = V4L2_FIELD_NONE; /* * We can be called from within s_ctrl, in that case we can't * set/get controls. Luckily we don't need to in that case. */ if (keep_controls || !dev->colorspace) break; if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { if (bt->width == 720 && bt->height <= 576) v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); else v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); } else { v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); } tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); break; } vivid_update_quality(dev); tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); dev->crop_cap = dev->src_rect; dev->crop_bounds_cap = dev->src_rect; dev->compose_cap = dev->crop_cap; if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) dev->compose_cap.height /= 2; dev->fmt_cap_rect = dev->compose_cap; tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); tpg_update_mv_step(&dev->tpg); /* * We can be called from within s_ctrl, in that case we can't * modify controls. Luckily we don't need to in that case. */ if (keep_controls) return; dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); } /* Map the field to something that is valid for the current input */ static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) { if (vivid_is_sdtv_cap(dev)) { switch (field) { case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_ALTERNATE: return field; case V4L2_FIELD_INTERLACED: default: return V4L2_FIELD_INTERLACED; } } if (vivid_is_hdmi_cap(dev)) return dev->dv_timings_cap[dev->input].bt.interlaced ? V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; return V4L2_FIELD_NONE; } static unsigned vivid_colorspace_cap(struct vivid_dev *dev) { if (!vivid_input_is_connected_to(dev)) return tpg_g_colorspace(&dev->tpg); return dev->colorspace_out; } static unsigned vivid_xfer_func_cap(struct vivid_dev *dev) { if (!vivid_input_is_connected_to(dev)) return tpg_g_xfer_func(&dev->tpg); return dev->xfer_func_out; } static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) { if (!vivid_input_is_connected_to(dev)) return tpg_g_ycbcr_enc(&dev->tpg); return dev->ycbcr_enc_out; } static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev) { if (!vivid_input_is_connected_to(dev)) return tpg_g_hsv_enc(&dev->tpg); return dev->hsv_enc_out; } static unsigned vivid_quantization_cap(struct vivid_dev *dev) { if (!vivid_input_is_connected_to(dev)) return tpg_g_quantization(&dev->tpg); return dev->quantization_out; } int vivid_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; unsigned p; mp->width = dev->fmt_cap_rect.width; mp->height = dev->fmt_cap_rect.height; mp->field = dev->field_cap; mp->pixelformat = dev->fmt_cap->fourcc; mp->colorspace = vivid_colorspace_cap(dev); mp->xfer_func = vivid_xfer_func_cap(dev); if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV) mp->hsv_enc = vivid_hsv_enc_cap(dev); else mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); mp->quantization = vivid_quantization_cap(dev); mp->num_planes = dev->fmt_cap->buffers; for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); mp->plane_fmt[p].sizeimage = (tpg_g_line_width(&dev->tpg, p) * mp->height) / dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; } return 0; } int vivid_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; struct vivid_dev *dev = video_drvdata(file); const struct vivid_fmt *fmt; unsigned bytesperline, max_bpl; unsigned factor = 1; unsigned w, h; unsigned p; bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC); fmt = vivid_get_format(dev, mp->pixelformat); if (!fmt) { dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", mp->pixelformat); mp->pixelformat = V4L2_PIX_FMT_YUYV; fmt = vivid_get_format(dev, mp->pixelformat); } mp->field = vivid_field_cap(dev, mp->field); if (vivid_is_webcam(dev)) { const struct v4l2_frmsize_discrete *sz = v4l2_find_nearest_size(webcam_sizes, ARRAY_SIZE(webcam_sizes), width, height, mp->width, mp->height); w = sz->width; h = sz->height; } else if (vivid_is_sdtv_cap(dev)) { w = 720; h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576; } else { w = dev->src_rect.width; h = dev->src_rect.height; } if (V4L2_FIELD_HAS_T_OR_B(mp->field)) factor = 2; if (vivid_is_webcam(dev) || (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { mp->width = w; mp->height = h / factor; } else { struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; v4l2_rect_set_min_size(&r, &vivid_min_rect); v4l2_rect_set_max_size(&r, &vivid_max_rect); if (dev->has_scaler_cap && !dev->has_compose_cap) { struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; v4l2_rect_set_max_size(&r, &max_r); } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { v4l2_rect_set_max_size(&r, &dev->src_rect); } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { v4l2_rect_set_min_size(&r, &dev->src_rect); } mp->width = r.width; mp->height = r.height / factor; } /* This driver supports custom bytesperline values */ mp->num_planes = fmt->buffers; for (p = 0; p < fmt->buffers; p++) { /* Calculate the minimum supported bytesperline value */ bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; /* Calculate the maximum supported bytesperline value */ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; if (pfmt[p].bytesperline > max_bpl) pfmt[p].bytesperline = max_bpl; if (pfmt[p].bytesperline < bytesperline) pfmt[p].bytesperline = bytesperline; pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / fmt->vdownsampling[p] + fmt->data_offset[p]; memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); } for (p = fmt->buffers; p < fmt->planes; p++) pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * (fmt->bit_depth[p] / fmt->vdownsampling[p])) / (fmt->bit_depth[0] / fmt->vdownsampling[0]); if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace)) mp->colorspace = vivid_colorspace_cap(dev); if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func)) mp->xfer_func = vivid_xfer_func_cap(dev); if (fmt->color_enc == TGP_COLOR_ENC_HSV) { if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc)) mp->hsv_enc = vivid_hsv_enc_cap(dev); } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) { if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc)) mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); } else { mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); } if (fmt->color_enc == TGP_COLOR_ENC_YCBCR || fmt->color_enc == TGP_COLOR_ENC_RGB) { if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization)) mp->quantization = vivid_quantization_cap(dev); } else { mp->quantization = vivid_quantization_cap(dev); } memset(mp->reserved, 0, sizeof(mp->reserved)); return 0; } int vivid_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_cap; struct v4l2_rect *compose = &dev->compose_cap; struct vb2_queue *q = &dev->vb_vid_cap_q; int ret = vivid_try_fmt_vid_cap(file, priv, f); unsigned factor = 1; unsigned p; unsigned i; if (ret < 0) return ret; if (vb2_is_busy(q)) { dprintk(dev, 1, "%s device busy\n", __func__); return -EBUSY; } dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); if (V4L2_FIELD_HAS_T_OR_B(mp->field)) factor = 2; /* Note: the webcam input doesn't support scaling, cropping or composing */ if (!vivid_is_webcam(dev) && (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { struct v4l2_rect r = { 0, 0, mp->width, mp->height }; if (dev->has_scaler_cap) { if (dev->has_compose_cap) v4l2_rect_map_inside(compose, &r); else *compose = r; if (dev->has_crop_cap && !dev->has_compose_cap) { struct v4l2_rect min_r = { 0, 0, r.width / MAX_ZOOM, factor * r.height / MAX_ZOOM }; struct v4l2_rect max_r = { 0, 0, r.width * MAX_ZOOM, factor * r.height * MAX_ZOOM }; v4l2_rect_set_min_size(crop, &min_r); v4l2_rect_set_max_size(crop, &max_r); v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); } else if (dev->has_crop_cap) { struct v4l2_rect min_r = { 0, 0, compose->width / MAX_ZOOM, factor * compose->height / MAX_ZOOM }; struct v4l2_rect max_r = { 0, 0, compose->width * MAX_ZOOM, factor * compose->height * MAX_ZOOM }; v4l2_rect_set_min_size(crop, &min_r); v4l2_rect_set_max_size(crop, &max_r); v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); } } else if (dev->has_crop_cap && !dev->has_compose_cap) { r.height *= factor; v4l2_rect_set_size_to(crop, &r); v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); r = *crop; r.height /= factor; v4l2_rect_set_size_to(compose, &r); } else if (!dev->has_crop_cap) { v4l2_rect_map_inside(compose, &r); } else { r.height *= factor; v4l2_rect_set_max_size(crop, &r); v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); compose->top *= factor; compose->height *= factor; v4l2_rect_set_size_to(compose, crop); v4l2_rect_map_inside(compose, &r); compose->top /= factor; compose->height /= factor; } } else if (vivid_is_webcam(dev)) { unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); /* Guaranteed to be a match */ for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) if (webcam_sizes[i].width == mp->width && webcam_sizes[i].height == mp->height) break; dev->webcam_size_idx = i; if (dev->webcam_ival_idx >= ival_sz) dev->webcam_ival_idx = ival_sz - 1; vivid_update_format_cap(dev, false); } else { struct v4l2_rect r = { 0, 0, mp->width, mp->height }; v4l2_rect_set_size_to(compose, &r); r.height *= factor; v4l2_rect_set_size_to(crop, &r); } dev->fmt_cap_rect.width = mp->width; dev->fmt_cap_rect.height = mp->height; tpg_s_buf_height(&dev->tpg, mp->height); tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); dev->field_cap = mp->field; if (dev->field_cap == V4L2_FIELD_ALTERNATE) tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); else tpg_s_field(&dev->tpg, dev->field_cap, false); tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); if (vivid_is_sdtv_cap(dev)) dev->tv_field_cap = mp->field; tpg_update_mv_step(&dev->tpg); dev->tpg.colorspace = mp->colorspace; dev->tpg.xfer_func = mp->xfer_func; if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR) dev->tpg.ycbcr_enc = mp->ycbcr_enc; else dev->tpg.hsv_enc = mp->hsv_enc; dev->tpg.quantization = mp->quantization; return 0; } int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vivid_g_fmt_vid_cap(file, priv, f); } int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vivid_try_fmt_vid_cap(file, priv, f); } int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vivid_s_fmt_vid_cap(file, priv, f); } int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); } int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); } int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); } int vivid_vid_cap_g_selection(struct file *file, void *priv, struct v4l2_selection *sel) { struct vivid_dev *dev = video_drvdata(file); if (!dev->has_crop_cap && !dev->has_compose_cap) return -ENOTTY; if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (vivid_is_webcam(dev)) return -ENODATA; sel->r.left = sel->r.top = 0; switch (sel->target) { case V4L2_SEL_TGT_CROP: if (!dev->has_crop_cap) return -EINVAL; sel->r = dev->crop_cap; break; case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_BOUNDS: if (!dev->has_crop_cap) return -EINVAL; sel->r = dev->src_rect; break; case V4L2_SEL_TGT_COMPOSE_BOUNDS: if (!dev->has_compose_cap) return -EINVAL; sel->r = vivid_max_rect; break; case V4L2_SEL_TGT_COMPOSE: if (!dev->has_compose_cap) return -EINVAL; sel->r = dev->compose_cap; break; case V4L2_SEL_TGT_COMPOSE_DEFAULT: if (!dev->has_compose_cap) return -EINVAL; sel->r = dev->fmt_cap_rect; break; default: return -EINVAL; } return 0; } int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_cap; struct v4l2_rect *compose = &dev->compose_cap; unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; int ret; if (!dev->has_crop_cap && !dev->has_compose_cap) return -ENOTTY; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (vivid_is_webcam(dev)) return -ENODATA; switch (s->target) { case V4L2_SEL_TGT_CROP: if (!dev->has_crop_cap) return -EINVAL; ret = vivid_vid_adjust_sel(s->flags, &s->r); if (ret) return ret; v4l2_rect_set_min_size(&s->r, &vivid_min_rect); v4l2_rect_set_max_size(&s->r, &dev->src_rect); v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap); s->r.top /= factor; s->r.height /= factor; if (dev->has_scaler_cap) { struct v4l2_rect fmt = dev->fmt_cap_rect; struct v4l2_rect max_rect = { 0, 0, s->r.width * MAX_ZOOM, s->r.height * MAX_ZOOM }; struct v4l2_rect min_rect = { 0, 0, s->r.width / MAX_ZOOM, s->r.height / MAX_ZOOM }; v4l2_rect_set_min_size(&fmt, &min_rect); if (!dev->has_compose_cap) v4l2_rect_set_max_size(&fmt, &max_rect); if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; if (dev->has_compose_cap) { v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); } v4l2_rect_map_inside(compose, &fmt); dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); } else if (dev->has_compose_cap) { struct v4l2_rect fmt = dev->fmt_cap_rect; v4l2_rect_set_min_size(&fmt, &s->r); if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) && vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); v4l2_rect_set_size_to(compose, &s->r); v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); } else { if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) && vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r); v4l2_rect_set_size_to(compose, &s->r); v4l2_rect_map_inside(compose, &dev->fmt_cap_rect); tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); } s->r.top *= factor; s->r.height *= factor; *crop = s->r; break; case V4L2_SEL_TGT_COMPOSE: if (!dev->has_compose_cap) return -EINVAL; ret = vivid_vid_adjust_sel(s->flags, &s->r); if (ret) return ret; v4l2_rect_set_min_size(&s->r, &vivid_min_rect); v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect); if (dev->has_scaler_cap) { struct v4l2_rect max_rect = { 0, 0, dev->src_rect.width * MAX_ZOOM, (dev->src_rect.height / factor) * MAX_ZOOM }; v4l2_rect_set_max_size(&s->r, &max_rect); if (dev->has_crop_cap) { struct v4l2_rect min_rect = { 0, 0, s->r.width / MAX_ZOOM, (s->r.height * factor) / MAX_ZOOM }; struct v4l2_rect max_rect = { 0, 0, s->r.width * MAX_ZOOM, (s->r.height * factor) * MAX_ZOOM }; v4l2_rect_set_min_size(crop, &min_rect); v4l2_rect_set_max_size(crop, &max_rect); v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); } } else if (dev->has_crop_cap) { s->r.top *= factor; s->r.height *= factor; v4l2_rect_set_max_size(&s->r, &dev->src_rect); v4l2_rect_set_size_to(crop, &s->r); v4l2_rect_map_inside(crop, &dev->crop_bounds_cap); s->r.top /= factor; s->r.height /= factor; } else { v4l2_rect_set_size_to(&s->r, &dev->src_rect); s->r.height /= factor; } v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); *compose = s->r; break; default: return -EINVAL; } tpg_s_crop_compose(&dev->tpg, crop, compose); return 0; } int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f) { struct vivid_dev *dev = video_drvdata(file); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; switch (vivid_get_pixel_aspect(dev)) { case TPG_PIXEL_ASPECT_NTSC: f->numerator = 11; f->denominator = 10; break; case TPG_PIXEL_ASPECT_PAL: f->numerator = 54; f->denominator = 59; break; default: break; } return 0; } static const struct v4l2_audio vivid_audio_inputs[] = { { 0, "TV", V4L2_AUDCAP_STEREO }, { 1, "Line-In", V4L2_AUDCAP_STEREO }, }; int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct vivid_dev *dev = video_drvdata(file); if (inp->index >= dev->num_inputs) return -EINVAL; inp->type = V4L2_INPUT_TYPE_CAMERA; switch (dev->input_type[inp->index]) { case WEBCAM: snprintf(inp->name, sizeof(inp->name), "Webcam %03u-%u", dev->inst, dev->input_name_counter[inp->index]); inp->capabilities = 0; break; case TV: snprintf(inp->name, sizeof(inp->name), "TV %03u-%u", dev->inst, dev->input_name_counter[inp->index]); inp->type = V4L2_INPUT_TYPE_TUNER; inp->std = V4L2_STD_ALL; if (dev->has_audio_inputs) inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; inp->capabilities = V4L2_IN_CAP_STD; break; case SVID: snprintf(inp->name, sizeof(inp->name), "S-Video %03u-%u", dev->inst, dev->input_name_counter[inp->index]); inp->std = V4L2_STD_ALL; if (dev->has_audio_inputs) inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; inp->capabilities = V4L2_IN_CAP_STD; break; case HDMI: snprintf(inp->name, sizeof(inp->name), "HDMI %03u-%u", dev->inst, dev->input_name_counter[inp->index]); inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; if (dev->edid_blocks == 0 || dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL) inp->status |= V4L2_IN_ST_NO_SIGNAL; else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK || dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE) inp->status |= V4L2_IN_ST_NO_H_LOCK; break; } if (dev->sensor_hflip) inp->status |= V4L2_IN_ST_HFLIP; if (dev->sensor_vflip) inp->status |= V4L2_IN_ST_VFLIP; if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { if (dev->std_signal_mode[dev->input] == NO_SIGNAL) { inp->status |= V4L2_IN_ST_NO_SIGNAL; } else if (dev->std_signal_mode[dev->input] == NO_LOCK) { inp->status |= V4L2_IN_ST_NO_H_LOCK; } else if (vivid_is_tv_cap(dev)) { switch (tpg_g_quality(&dev->tpg)) { case TPG_QUAL_GRAY: inp->status |= V4L2_IN_ST_COLOR_KILL; break; case TPG_QUAL_NOISE: inp->status |= V4L2_IN_ST_NO_H_LOCK; break; default: break; } } } return 0; } int vidioc_g_input(struct file *file, void *priv, unsigned *i) { struct vivid_dev *dev = video_drvdata(file); *i = dev->input; return 0; } int vidioc_s_input(struct file *file, void *priv, unsigned i) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt; unsigned brightness; if (i >= dev->num_inputs) return -EINVAL; if (i == dev->input) return 0; if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q) || vb2_is_busy(&dev->vb_meta_cap_q)) return -EBUSY; dev->input = i; dev->vid_cap_dev.tvnorms = 0; if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; } dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; vivid_update_format_cap(dev, false); if (dev->colorspace) { switch (dev->input_type[i]) { case WEBCAM: v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); break; case TV: case SVID: v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); break; case HDMI: if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); else v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); } else { v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); } break; } } /* * Modify the brightness range depending on the input. * This makes it easy to use vivid to test if applications can * handle control range modifications and is also how this is * typically used in practice as different inputs may be hooked * up to different receivers with different control ranges. */ brightness = 128 * i + dev->input_brightness[i]; v4l2_ctrl_modify_range(dev->brightness, 128 * i, 255 + 128 * i, 1, 128 + 128 * i); v4l2_ctrl_s_ctrl(dev->brightness, brightness); /* Restore per-input states. */ v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode, vivid_is_hdmi_cap(dev)); v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) && dev->dv_timings_signal_mode[dev->input] == SELECTED_DV_TIMINGS); v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev)); v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) && dev->std_signal_mode[dev->input]); if (vivid_is_hdmi_cap(dev)) { v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode, dev->dv_timings_signal_mode[dev->input]); v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings, dev->query_dv_timings[dev->input]); } else if (vivid_is_sdtv_cap(dev)) { v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode, dev->std_signal_mode[dev->input]); v4l2_ctrl_s_ctrl(dev->ctrl_standard, dev->std_signal_mode[dev->input]); } return 0; } int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) { if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) return -EINVAL; *vin = vivid_audio_inputs[vin->index]; return 0; } int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_sdtv_cap(dev)) return -EINVAL; *vin = vivid_audio_inputs[dev->tv_audio_input]; return 0; } int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_sdtv_cap(dev)) return -EINVAL; if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) return -EINVAL; dev->tv_audio_input = vin->index; return 0; } int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); if (vf->tuner != 0) return -EINVAL; vf->frequency = dev->tv_freq; return 0; } int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) { struct vivid_dev *dev = video_drvdata(file); if (vf->tuner != 0) return -EINVAL; dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); if (vivid_is_tv_cap(dev)) vivid_update_quality(dev); return 0; } int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) { struct vivid_dev *dev = video_drvdata(file); if (vt->index != 0) return -EINVAL; if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) return -EINVAL; dev->tv_audmode = vt->audmode; return 0; } int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) { struct vivid_dev *dev = video_drvdata(file); enum tpg_quality qual; if (vt->index != 0) return -EINVAL; vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; vt->audmode = dev->tv_audmode; vt->rangelow = MIN_TV_FREQ; vt->rangehigh = MAX_TV_FREQ; qual = vivid_get_quality(dev, &vt->afc); if (qual == TPG_QUAL_COLOR) vt->signal = 0xffff; else if (qual == TPG_QUAL_GRAY) vt->signal = 0x8000; else vt->signal = 0; if (qual == TPG_QUAL_NOISE) { vt->rxsubchans = 0; } else if (qual == TPG_QUAL_GRAY) { vt->rxsubchans = V4L2_TUNER_SUB_MONO; } else { unsigned int channel_nr = dev->tv_freq / (6 * 16); unsigned int options = (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3; switch (channel_nr % options) { case 0: vt->rxsubchans = V4L2_TUNER_SUB_MONO; break; case 1: vt->rxsubchans = V4L2_TUNER_SUB_STEREO; break; case 2: if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; else vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case 3: vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; break; } } strscpy(vt->name, "TV Tuner", sizeof(vt->name)); return 0; } /* Must remain in sync with the vivid_ctrl_standard_strings array */ const v4l2_std_id vivid_standard[] = { V4L2_STD_NTSC_M, V4L2_STD_NTSC_M_JP, V4L2_STD_NTSC_M_KR, V4L2_STD_NTSC_443, V4L2_STD_PAL_BG | V4L2_STD_PAL_H, V4L2_STD_PAL_I, V4L2_STD_PAL_DK, V4L2_STD_PAL_M, V4L2_STD_PAL_N, V4L2_STD_PAL_Nc, V4L2_STD_PAL_60, V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, V4L2_STD_SECAM_DK, V4L2_STD_SECAM_L, V4L2_STD_SECAM_LC, V4L2_STD_UNKNOWN }; /* Must remain in sync with the vivid_standard array */ const char * const vivid_ctrl_standard_strings[] = { "NTSC-M", "NTSC-M-JP", "NTSC-M-KR", "NTSC-443", "PAL-BGH", "PAL-I", "PAL-DK", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60", "SECAM-BGH", "SECAM-DK", "SECAM-L", "SECAM-Lc", NULL, }; int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) { struct vivid_dev *dev = video_drvdata(file); unsigned int last = dev->query_std_last[dev->input]; if (!vivid_is_sdtv_cap(dev)) return -ENODATA; if (dev->std_signal_mode[dev->input] == NO_SIGNAL || dev->std_signal_mode[dev->input] == NO_LOCK) { *id = V4L2_STD_UNKNOWN; return 0; } if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { *id = V4L2_STD_UNKNOWN; } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) { *id = dev->std_cap[dev->input]; } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) { *id = dev->query_std[dev->input]; } else { *id = vivid_standard[last]; dev->query_std_last[dev->input] = (last + 1) % ARRAY_SIZE(vivid_standard); } return 0; } int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_sdtv_cap(dev)) return -ENODATA; if (dev->std_cap[dev->input] == id) return 0; if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) return -EBUSY; dev->std_cap[dev->input] = id; vivid_update_format_cap(dev, false); return 0; } static void find_aspect_ratio(u32 width, u32 height, u32 *num, u32 *denom) { if (!(height % 3) && ((height * 4 / 3) == width)) { *num = 4; *denom = 3; } else if (!(height % 9) && ((height * 16 / 9) == width)) { *num = 16; *denom = 9; } else if (!(height % 10) && ((height * 16 / 10) == width)) { *num = 16; *denom = 10; } else if (!(height % 4) && ((height * 5 / 4) == width)) { *num = 5; *denom = 4; } else if (!(height % 9) && ((height * 15 / 9) == width)) { *num = 15; *denom = 9; } else { /* default to 16:9 */ *num = 16; *denom = 9; } } static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) { struct v4l2_bt_timings *bt = &timings->bt; u32 total_h_pixel; u32 total_v_lines; u32 h_freq; if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, NULL, NULL)) return false; total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); h_freq = (u32)bt->pixelclock / total_h_pixel; if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { struct v4l2_dv_timings cvt = {}; if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, bt->polarities, bt->interlaced, &vivid_dv_timings_cap, &cvt) && cvt.bt.width == bt->width && cvt.bt.height == bt->height) { *timings = cvt; return true; } } if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { struct v4l2_dv_timings gtf = {}; struct v4l2_fract aspect_ratio; find_aspect_ratio(bt->width, bt->height, &aspect_ratio.numerator, &aspect_ratio.denominator); if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, bt->polarities, bt->interlaced, aspect_ratio, &vivid_dv_timings_cap, &gtf) && gtf.bt.width == bt->width && gtf.bt.height == bt->height) { *timings = gtf; return true; } } return false; } int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_hdmi_cap(dev)) return -ENODATA; if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 0, NULL, NULL) && !valid_cvt_gtf_timings(timings)) return -EINVAL; if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input], 0, false)) return 0; if (vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; dev->dv_timings_cap[dev->input] = *timings; vivid_update_format_cap(dev, false); return 0; } int vidioc_query_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings) { struct vivid_dev *dev = video_drvdata(file); unsigned int input = dev->input; unsigned int last = dev->query_dv_timings_last[input]; if (!vivid_is_hdmi_cap(dev)) return -ENODATA; if (dev->dv_timings_signal_mode[input] == NO_SIGNAL || dev->edid_blocks == 0) return -ENOLINK; if (dev->dv_timings_signal_mode[input] == NO_LOCK) return -ENOLCK; if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) { timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; return -ERANGE; } if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) { *timings = dev->dv_timings_cap[input]; } else if (dev->dv_timings_signal_mode[input] == SELECTED_DV_TIMINGS) { *timings = v4l2_dv_timings_presets[dev->query_dv_timings[input]]; } else { *timings = v4l2_dv_timings_presets[last]; dev->query_dv_timings_last[input] = (last + 1) % dev->query_dv_timings_size; } return 0; } void vivid_update_outputs(struct vivid_dev *dev) { u32 edid_present = 0; if (!dev || !dev->num_outputs) return; for (unsigned int i = 0, j = 0; i < dev->num_outputs; i++) { if (dev->output_type[i] != HDMI) continue; struct vivid_dev *dev_rx = dev->output_to_input_instance[i]; if (dev_rx && dev_rx->edid_blocks) edid_present |= 1 << j; j++; } v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, edid_present); v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, edid_present); v4l2_ctrl_s_ctrl(dev->ctrl_tx_rxsense, edid_present); } void vivid_update_connected_outputs(struct vivid_dev *dev) { u16 phys_addr = cec_get_edid_phys_addr(dev->edid, dev->edid_blocks * 128, NULL); for (unsigned int i = 0, j = 0; i < dev->num_inputs; i++) { unsigned int menu_idx = dev->input_is_connected_to_output[i]; if (dev->input_type[i] != HDMI) continue; j++; if (menu_idx < FIXED_MENU_ITEMS) continue; struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; if (!dev_tx) continue; unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; vivid_update_outputs(dev_tx); if (dev->edid_blocks) { cec_s_phys_addr(dev_tx->cec_tx_adap[hdmi_output], v4l2_phys_addr_for_input(phys_addr, j), false); } else { cec_phys_addr_invalidate(dev_tx->cec_tx_adap[hdmi_output]); } } } int vidioc_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid) { struct vivid_dev *dev = video_drvdata(file); u16 phys_addr; int ret; memset(edid->reserved, 0, sizeof(edid->reserved)); if (edid->pad >= dev->num_inputs) return -EINVAL; if (dev->input_type[edid->pad] != HDMI || edid->start_block) return -EINVAL; if (edid->blocks == 0) { if (vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; dev->edid_blocks = 0; vivid_update_connected_outputs(dev); return 0; } if (edid->blocks > dev->edid_max_blocks) { edid->blocks = dev->edid_max_blocks; return -E2BIG; } phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL); ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL); if (ret) return ret; if (vb2_is_busy(&dev->vb_vid_cap_q)) return -EBUSY; dev->edid_blocks = edid->blocks; memcpy(dev->edid, edid->edid, edid->blocks * 128); vivid_update_connected_outputs(dev); return 0; } int vidioc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) return -EINVAL; if (vivid_get_format(dev, fsize->pixel_format) == NULL) return -EINVAL; if (vivid_is_webcam(dev)) { if (fsize->index >= ARRAY_SIZE(webcam_sizes)) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete = webcam_sizes[fsize->index]; return 0; } if (fsize->index) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = MIN_WIDTH; fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; fsize->stepwise.step_width = 2; fsize->stepwise.min_height = MIN_HEIGHT; fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; fsize->stepwise.step_height = 2; return 0; } /* timeperframe is arbitrary and continuous */ int vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival) { struct vivid_dev *dev = video_drvdata(file); const struct vivid_fmt *fmt; int i; fmt = vivid_get_format(dev, fival->pixel_format); if (!fmt) return -EINVAL; if (!vivid_is_webcam(dev)) { if (fival->index) return -EINVAL; if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) return -EINVAL; if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) return -EINVAL; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete = dev->timeperframe_vid_cap; return 0; } for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) if (fival->width == webcam_sizes[i].width && fival->height == webcam_sizes[i].height) break; if (i == ARRAY_SIZE(webcam_sizes)) return -EINVAL; if (fival->index >= webcam_ival_count(dev, i)) return -EINVAL; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete = webcam_intervals[fival->index]; return 0; } int vivid_vid_cap_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct vivid_dev *dev = video_drvdata(file); if (parm->type != (dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE)) return -EINVAL; parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; parm->parm.capture.readbuffers = 1; return 0; } int vivid_vid_cap_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct vivid_dev *dev = video_drvdata(file); unsigned int ival_sz = webcam_ival_count(dev, dev->webcam_size_idx); struct v4l2_fract tpf; unsigned i; if (parm->type != (dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE)) return -EINVAL; if (!vivid_is_webcam(dev)) return vivid_vid_cap_g_parm(file, priv, parm); tpf = parm->parm.capture.timeperframe; if (tpf.denominator == 0) tpf = webcam_intervals[ival_sz - 1]; for (i = 0; i < ival_sz; i++) if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i])) break; if (i == ival_sz) i = ival_sz - 1; dev->webcam_ival_idx = i; tpf = webcam_intervals[dev->webcam_ival_idx]; /* resync the thread's timings */ dev->cap_seq_resync = true; dev->timeperframe_vid_cap = tpf; parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; parm->parm.capture.timeperframe = tpf; parm->parm.capture.readbuffers = 1; return 0; }
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 // SPDX-License-Identifier: GPL-2.0-only /* * "security" table for IPv6 * * This is for use by Mandatory Access Control (MAC) security models, * which need to be able to manage security policy in separate context * to DAC. * * Based on iptable_mangle.c * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> */ #include <linux/module.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/slab.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); MODULE_DESCRIPTION("ip6tables security table, for MAC rules"); #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) static const struct xt_table security_table = { .name = "security", .valid_hooks = SECURITY_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, .priority = NF_IP6_PRI_SECURITY, }; static struct nf_hook_ops *sectbl_ops __read_mostly; static int ip6table_security_table_init(struct net *net) { struct ip6t_replace *repl; int ret; repl = ip6t_alloc_initial_table(&security_table); if (repl == NULL) return -ENOMEM; ret = ip6t_register_table(net, &security_table, repl, sectbl_ops); kfree(repl); return ret; } static void __net_exit ip6table_security_net_pre_exit(struct net *net) { ip6t_unregister_table_pre_exit(net, "security"); } static void __net_exit ip6table_security_net_exit(struct net *net) { ip6t_unregister_table_exit(net, "security"); } static struct pernet_operations ip6table_security_net_ops = { .pre_exit = ip6table_security_net_pre_exit, .exit = ip6table_security_net_exit, }; static int __init ip6table_security_init(void) { int ret = xt_register_template(&security_table, ip6table_security_table_init); if (ret < 0) return ret; sectbl_ops = xt_hook_ops_alloc(&security_table, ip6t_do_table); if (IS_ERR(sectbl_ops)) { xt_unregister_template(&security_table); return PTR_ERR(sectbl_ops); } ret = register_pernet_subsys(&ip6table_security_net_ops); if (ret < 0) { kfree(sectbl_ops); xt_unregister_template(&security_table); return ret; } return ret; } static void __exit ip6table_security_fini(void) { unregister_pernet_subsys(&ip6table_security_net_ops); xt_unregister_template(&security_table); kfree(sectbl_ops); } module_init(ip6table_security_init); module_exit(ip6table_security_fini);
5 2 5 32 3 3 1 32 1596 1596 193 32 31 19 24 20 20 20 20 20 20 20 20 19 5 5 1 1 1 5 5 13 12 11 10 2 1 13 2 1 2 1 1 2 1 9 8 7 6 6 1 1 1 1 9 3 1 2 1 3 1 16 15 13 13 16 9 6 6 5 2 6 9 11 10 4 9 8 7 1 6 3 1 1 2 3 3 3 3 5 5 5 5 11 22 3 22 1 22 2 22 21 20 20 19 19 19 19 19 19 20 19 19 11 11 11 10 10 8 2 17 14 14 22 5 5 5 4 4 4 3 3 4 2 2 5 5 4 3 2 1 1 2 4 15 13 1 12 2 11 11 7 2 6 1 5 4 2 2 6 1 5 5 5 5 4 2 2 13 15 1 1 1 17 2 2 2 2 2 2 11 10 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/netrom.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/ip.h> #include <net/tcp_states.h> #include <net/arp.h> #include <linux/init.h> static int nr_ndevs = 4; int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL; int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS; int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL; int sysctl_netrom_transport_timeout = NR_DEFAULT_T1; int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2; int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2; int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4; int sysctl_netrom_transport_requested_window_si