13 1 1 2 8 1 2 2 2 1 4 2 2 3 1 4 1 5 5 4 4 2 2 4 4 4 4 4 4 63 64 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 | // SPDX-License-Identifier: GPL-2.0-only /* * net/sched/act_sample.c - Packet sampling tc action * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <linux/init.h> #include <linux/gfp.h> #include <net/net_namespace.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <linux/tc_act/tc_sample.h> #include <net/tc_act/tc_sample.h> #include <net/psample.h> #include <net/pkt_cls.h> #include <net/tc_wrapper.h> #include <linux/if_arp.h> static struct tc_action_ops act_sample_ops; static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = { [TCA_SAMPLE_PARMS] = { .len = sizeof(struct tc_sample) }, [TCA_SAMPLE_RATE] = { .type = NLA_U32 }, [TCA_SAMPLE_TRUNC_SIZE] = { .type = NLA_U32 }, [TCA_SAMPLE_PSAMPLE_GROUP] = { .type = NLA_U32 }, }; static int tcf_sample_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_SAMPLE_MAX + 1]; struct psample_group *psample_group; u32 psample_group_num, rate, index; struct tcf_chain *goto_ch = NULL; struct tc_sample *parm; struct tcf_sample *s; bool exists = false; int ret, err; if (!nla) return -EINVAL; ret = nla_parse_nested_deprecated(tb, TCA_SAMPLE_MAX, nla, sample_policy, NULL); if (ret < 0) return ret; if (!tb[TCA_SAMPLE_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_SAMPLE_PARMS]); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return ACT_P_BOUND; if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_sample_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } if (!tb[TCA_SAMPLE_RATE] || !tb[TCA_SAMPLE_PSAMPLE_GROUP]) { NL_SET_ERR_MSG(extack, "sample rate and group are required"); err = -EINVAL; goto release_idr; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); if (!rate) { NL_SET_ERR_MSG(extack, "invalid sample rate"); err = -EINVAL; goto put_chain; } psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); psample_group = psample_group_get(net, psample_group_num); if (!psample_group) { err = -ENOMEM; goto put_chain; } s = to_sample(*a); spin_lock_bh(&s->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); s->rate = rate; s->psample_group_num = psample_group_num; psample_group = rcu_replace_pointer(s->psample_group, psample_group, lockdep_is_held(&s->tcf_lock)); if (tb[TCA_SAMPLE_TRUNC_SIZE]) { s->truncate = true; s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); } spin_unlock_bh(&s->tcf_lock); if (psample_group) psample_group_put(psample_group); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static void tcf_sample_cleanup(struct tc_action *a) { struct tcf_sample *s = to_sample(a); struct psample_group *psample_group; /* last reference to action, no need to lock */ psample_group = rcu_dereference_protected(s->psample_group, 1); RCU_INIT_POINTER(s->psample_group, NULL); if (psample_group) psample_group_put(psample_group); } static bool tcf_sample_dev_ok_push(struct net_device *dev) { switch (dev->type) { case ARPHRD_TUNNEL: case ARPHRD_TUNNEL6: case ARPHRD_SIT: case ARPHRD_IPGRE: case ARPHRD_IP6GRE: case ARPHRD_VOID: case ARPHRD_NONE: return false; default: return true; } } TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_sample *s = to_sample(a); struct psample_group *psample_group; u8 cookie_data[TC_COOKIE_MAX_SIZE]; struct psample_metadata md = {}; struct tc_cookie *user_cookie; int retval; tcf_lastuse_update(&s->tcf_tm); bstats_update(this_cpu_ptr(s->common.cpu_bstats), skb); retval = READ_ONCE(s->tcf_action); psample_group = rcu_dereference_bh(s->psample_group); /* randomly sample packets according to rate */ if (psample_group && (get_random_u32_below(s->rate) == 0)) { if (!skb_at_tc_ingress(skb)) { md.in_ifindex = skb->skb_iif; md.out_ifindex = skb->dev->ifindex; } else { md.in_ifindex = skb->dev->ifindex; } /* on ingress, the mac header gets popped, so push it back */ if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev)) skb_push(skb, skb->mac_len); rcu_read_lock(); user_cookie = rcu_dereference(a->user_cookie); if (user_cookie) { memcpy(cookie_data, user_cookie->data, user_cookie->len); md.user_cookie = cookie_data; md.user_cookie_len = user_cookie->len; } rcu_read_unlock(); md.trunc_size = s->truncate ? s->trunc_size : skb->len; psample_sample_packet(psample_group, skb, s->rate, &md); if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev)) skb_pull(skb, skb->mac_len); } return retval; } static void tcf_sample_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_sample *s = to_sample(a); struct tcf_t *tm = &s->tcf_tm; tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_sample *s = to_sample(a); struct tc_sample opt = { .index = s->tcf_index, .refcnt = refcount_read(&s->tcf_refcnt) - ref, .bindcnt = atomic_read(&s->tcf_bindcnt) - bind, }; struct tcf_t t; spin_lock_bh(&s->tcf_lock); opt.action = s->tcf_action; if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt)) goto nla_put_failure; tcf_tm_dump(&t, &s->tcf_tm); if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD)) goto nla_put_failure; if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate)) goto nla_put_failure; if (s->truncate) if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size)) goto nla_put_failure; if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num)) goto nla_put_failure; spin_unlock_bh(&s->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&s->tcf_lock); nlmsg_trim(skb, b); return -1; } static void tcf_psample_group_put(void *priv) { struct psample_group *group = priv; psample_group_put(group); } static struct psample_group * tcf_sample_get_group(const struct tc_action *a, tc_action_priv_destructor *destructor) { struct tcf_sample *s = to_sample(a); struct psample_group *group; group = rcu_dereference_protected(s->psample_group, lockdep_is_held(&s->tcf_lock)); if (group) { psample_group_take(group); *destructor = tcf_psample_group_put; } return group; } static void tcf_offload_sample_get_group(struct flow_action_entry *entry, const struct tc_action *act) { entry->sample.psample_group = act->ops->get_psample_group(act, &entry->destructor); entry->destructor_priv = entry->sample.psample_group; } static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; entry->id = FLOW_ACTION_SAMPLE; entry->sample.trunc_size = tcf_sample_trunc_size(act); entry->sample.truncate = tcf_sample_truncate(act); entry->sample.rate = tcf_sample_rate(act); tcf_offload_sample_get_group(entry, act); *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; fl_action->id = FLOW_ACTION_SAMPLE; } return 0; } static struct tc_action_ops act_sample_ops = { .kind = "sample", .id = TCA_ID_SAMPLE, .owner = THIS_MODULE, .act = tcf_sample_act, .stats_update = tcf_sample_stats_update, .dump = tcf_sample_dump, .init = tcf_sample_init, .cleanup = tcf_sample_cleanup, .get_psample_group = tcf_sample_get_group, .offload_act_setup = tcf_sample_offload_act_setup, .size = sizeof(struct tcf_sample), }; MODULE_ALIAS_NET_ACT("sample"); static __net_init int sample_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_sample_ops.net_id); return tc_action_net_init(net, tn, &act_sample_ops); } static void __net_exit sample_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_sample_ops.net_id); } static struct pernet_operations sample_net_ops = { .init = sample_init_net, .exit_batch = sample_exit_net, .id = &act_sample_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init sample_init_module(void) { return tcf_register_action(&act_sample_ops, &sample_net_ops); } static void __exit sample_cleanup_module(void) { tcf_unregister_action(&act_sample_ops, &sample_net_ops); } module_init(sample_init_module); module_exit(sample_cleanup_module); MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>"); MODULE_DESCRIPTION("Packet sampling action"); MODULE_LICENSE("GPL v2"); |
75 64 23 61 5 7 9 2 1 9 9 1 17 4 61 8 53 53 50 11 53 50 17 8 8 7 7 4 3 7 7 4 3 94 16 10 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 | // SPDX-License-Identifier: GPL-2.0-or-later /* * tcp_diag.c Module for monitoring TCP transport protocols sockets. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/module.h> #include <linux/net.h> #include <linux/sock_diag.h> #include <linux/inet_diag.h> #include <linux/tcp.h> #include <net/netlink.h> #include <net/tcp.h> static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, void *_info) { struct tcp_info *info = _info; if (inet_sk_state_load(sk) == TCP_LISTEN) { r->idiag_rqueue = READ_ONCE(sk->sk_ack_backlog); r->idiag_wqueue = READ_ONCE(sk->sk_max_ack_backlog); } else if (sk->sk_type == SOCK_STREAM) { const struct tcp_sock *tp = tcp_sk(sk); r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq), 0); r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una; } if (info) tcp_get_info(sk, info); } #ifdef CONFIG_TCP_MD5SIG static void tcp_diag_md5sig_fill(struct tcp_diag_md5sig *info, const struct tcp_md5sig_key *key) { info->tcpm_family = key->family; info->tcpm_prefixlen = key->prefixlen; info->tcpm_keylen = key->keylen; memcpy(info->tcpm_key, key->key, key->keylen); if (key->family == AF_INET) info->tcpm_addr[0] = key->addr.a4.s_addr; #if IS_ENABLED(CONFIG_IPV6) else if (key->family == AF_INET6) memcpy(&info->tcpm_addr, &key->addr.a6, sizeof(info->tcpm_addr)); #endif } static int tcp_diag_put_md5sig(struct sk_buff *skb, const struct tcp_md5sig_info *md5sig) { const struct tcp_md5sig_key *key; struct tcp_diag_md5sig *info; struct nlattr *attr; int md5sig_count = 0; hlist_for_each_entry_rcu(key, &md5sig->head, node) md5sig_count++; if (md5sig_count == 0) return 0; attr = nla_reserve(skb, INET_DIAG_MD5SIG, md5sig_count * sizeof(struct tcp_diag_md5sig)); if (!attr) return -EMSGSIZE; info = nla_data(attr); memset(info, 0, md5sig_count * sizeof(struct tcp_diag_md5sig)); hlist_for_each_entry_rcu(key, &md5sig->head, node) { tcp_diag_md5sig_fill(info++, key); if (--md5sig_count == 0) break; } return 0; } #endif static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk, const struct tcp_ulp_ops *ulp_ops) { struct nlattr *nest; int err; nest = nla_nest_start_noflag(skb, INET_DIAG_ULP_INFO); if (!nest) return -EMSGSIZE; err = nla_put_string(skb, INET_ULP_INFO_NAME, ulp_ops->name); if (err) goto nla_failure; if (ulp_ops->get_info) err = ulp_ops->get_info(sk, skb); if (err) goto nla_failure; nla_nest_end(skb, nest); return 0; nla_failure: nla_nest_cancel(skb, nest); return err; } static int tcp_diag_get_aux(struct sock *sk, bool net_admin, struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); int err = 0; #ifdef CONFIG_TCP_MD5SIG if (net_admin) { struct tcp_md5sig_info *md5sig; rcu_read_lock(); md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info); if (md5sig) err = tcp_diag_put_md5sig(skb, md5sig); rcu_read_unlock(); if (err < 0) return err; } #endif if (net_admin) { const struct tcp_ulp_ops *ulp_ops; ulp_ops = icsk->icsk_ulp_ops; if (ulp_ops) err = tcp_diag_put_ulp(skb, sk, ulp_ops); if (err) return err; } return 0; } static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin) { struct inet_connection_sock *icsk = inet_csk(sk); size_t size = 0; #ifdef CONFIG_TCP_MD5SIG if (net_admin && sk_fullsock(sk)) { const struct tcp_md5sig_info *md5sig; const struct tcp_md5sig_key *key; size_t md5sig_count = 0; rcu_read_lock(); md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info); if (md5sig) { hlist_for_each_entry_rcu(key, &md5sig->head, node) md5sig_count++; } rcu_read_unlock(); size += nla_total_size(md5sig_count * sizeof(struct tcp_diag_md5sig)); } #endif if (net_admin && sk_fullsock(sk)) { const struct tcp_ulp_ops *ulp_ops; ulp_ops = icsk->icsk_ulp_ops; if (ulp_ops) { size += nla_total_size(0) + nla_total_size(TCP_ULP_NAME_MAX); if (ulp_ops->get_info_size) size += ulp_ops->get_info_size(sk); } } return size; } static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { struct inet_hashinfo *hinfo; hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo; inet_diag_dump_icsk(hinfo, skb, cb, r); } static int tcp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { struct inet_hashinfo *hinfo; hinfo = sock_net(cb->skb->sk)->ipv4.tcp_death_row.hashinfo; return inet_diag_dump_one_icsk(hinfo, cb, req); } #ifdef CONFIG_INET_DIAG_DESTROY static int tcp_diag_destroy(struct sk_buff *in_skb, const struct inet_diag_req_v2 *req) { struct net *net = sock_net(in_skb->sk); struct inet_hashinfo *hinfo; struct sock *sk; int err; hinfo = net->ipv4.tcp_death_row.hashinfo; sk = inet_diag_find_one_icsk(net, hinfo, req); if (IS_ERR(sk)) return PTR_ERR(sk); err = sock_diag_destroy(sk, ECONNABORTED); sock_gen_put(sk); return err; } #endif static const struct inet_diag_handler tcp_diag_handler = { .owner = THIS_MODULE, .dump = tcp_diag_dump, .dump_one = tcp_diag_dump_one, .idiag_get_info = tcp_diag_get_info, .idiag_get_aux = tcp_diag_get_aux, .idiag_get_aux_size = tcp_diag_get_aux_size, .idiag_type = IPPROTO_TCP, .idiag_info_size = sizeof(struct tcp_info), #ifdef CONFIG_INET_DIAG_DESTROY .destroy = tcp_diag_destroy, #endif }; static int __init tcp_diag_init(void) { return inet_diag_register(&tcp_diag_handler); } static void __exit tcp_diag_exit(void) { inet_diag_unregister(&tcp_diag_handler); } module_init(tcp_diag_init); module_exit(tcp_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-6 /* AF_INET - IPPROTO_TCP */); |
1 1 149 4 2 143 130 130 118 118 143 138 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 | // SPDX-License-Identifier: GPL-2.0-or-later /* Diffie-Hellman Key Agreement Method [RFC2631] * * Copyright (c) 2016, Intel Corporation * Authors: Salvatore Benedetto <salvatore.benedetto@intel.com> */ #include <linux/fips.h> #include <linux/module.h> #include <crypto/internal/kpp.h> #include <crypto/kpp.h> #include <crypto/dh.h> #include <crypto/rng.h> #include <linux/mpi.h> struct dh_ctx { MPI p; /* Value is guaranteed to be set. */ MPI g; /* Value is guaranteed to be set. */ MPI xa; /* Value is guaranteed to be set. */ }; static void dh_clear_ctx(struct dh_ctx *ctx) { mpi_free(ctx->p); mpi_free(ctx->g); mpi_free(ctx->xa); memset(ctx, 0, sizeof(*ctx)); } /* * If base is g we compute the public key * ya = g^xa mod p; [RFC2631 sec 2.1.1] * else if base if the counterpart public key we compute the shared secret * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] */ static int _compute_val(const struct dh_ctx *ctx, MPI base, MPI val) { /* val = base^xa mod p */ return mpi_powm(val, base, ctx->xa, ctx->p); } static inline struct dh_ctx *dh_get_ctx(struct crypto_kpp *tfm) { return kpp_tfm_ctx(tfm); } static int dh_check_params_length(unsigned int p_len) { if (fips_enabled) return (p_len < 2048) ? -EINVAL : 0; return (p_len < 1536) ? -EINVAL : 0; } static int dh_set_params(struct dh_ctx *ctx, struct dh *params) { if (dh_check_params_length(params->p_size << 3)) return -EINVAL; ctx->p = mpi_read_raw_data(params->p, params->p_size); if (!ctx->p) return -EINVAL; ctx->g = mpi_read_raw_data(params->g, params->g_size); if (!ctx->g) return -EINVAL; return 0; } static int dh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct dh_ctx *ctx = dh_get_ctx(tfm); struct dh params; /* Free the old MPI key if any */ dh_clear_ctx(ctx); if (crypto_dh_decode_key(buf, len, ¶ms) < 0) goto err_clear_ctx; if (dh_set_params(ctx, ¶ms) < 0) goto err_clear_ctx; ctx->xa = mpi_read_raw_data(params.key, params.key_size); if (!ctx->xa) goto err_clear_ctx; return 0; err_clear_ctx: dh_clear_ctx(ctx); return -EINVAL; } /* * SP800-56A public key verification: * * * For the safe-prime groups in FIPS mode, Q can be computed * trivially from P and a full validation according to SP800-56A * section 5.6.2.3.1 is performed. * * * For all other sets of group parameters, only a partial validation * according to SP800-56A section 5.6.2.3.2 is performed. */ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y) { MPI val, q; int ret; if (!fips_enabled) return 0; if (unlikely(!ctx->p)) return -EINVAL; /* * Step 1: Verify that 2 <= y <= p - 2. * * The upper limit check is actually y < p instead of y < p - 1 * in order to save one mpi_sub_ui() invocation here. Note that * p - 1 is the non-trivial element of the subgroup of order 2 and * thus, the check on y^q below would fail if y == p - 1. */ if (mpi_cmp_ui(y, 1) < 1 || mpi_cmp(y, ctx->p) >= 0) return -EINVAL; /* * Step 2: Verify that 1 = y^q mod p * * For the safe-prime groups q = (p - 1)/2. */ val = mpi_alloc(0); if (!val) return -ENOMEM; q = mpi_alloc(mpi_get_nlimbs(ctx->p)); if (!q) { mpi_free(val); return -ENOMEM; } /* * ->p is odd, so no need to explicitly subtract one * from it before shifting to the right. */ ret = mpi_rshift(q, ctx->p, 1) ?: mpi_powm(val, y, q, ctx->p); mpi_free(q); if (ret) { mpi_free(val); return ret; } ret = mpi_cmp_ui(val, 1); mpi_free(val); if (ret != 0) return -EINVAL; return 0; } static int dh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct dh_ctx *ctx = dh_get_ctx(tfm); MPI base, val = mpi_alloc(0); int ret = 0; int sign; if (!val) return -ENOMEM; if (unlikely(!ctx->xa)) { ret = -EINVAL; goto err_free_val; } if (req->src) { base = mpi_read_raw_from_sgl(req->src, req->src_len); if (!base) { ret = -EINVAL; goto err_free_val; } ret = dh_is_pubkey_valid(ctx, base); if (ret) goto err_free_base; } else { base = ctx->g; } ret = _compute_val(ctx, base, val); if (ret) goto err_free_base; if (fips_enabled) { /* SP800-56A rev3 5.7.1.1 check: Validation of shared secret */ if (req->src) { MPI pone; /* z <= 1 */ if (mpi_cmp_ui(val, 1) < 1) { ret = -EBADMSG; goto err_free_base; } /* z == p - 1 */ pone = mpi_alloc(0); if (!pone) { ret = -ENOMEM; goto err_free_base; } ret = mpi_sub_ui(pone, ctx->p, 1); if (!ret && !mpi_cmp(pone, val)) ret = -EBADMSG; mpi_free(pone); if (ret) goto err_free_base; /* SP800-56A rev 3 5.6.2.1.3 key check */ } else { if (dh_is_pubkey_valid(ctx, val)) { ret = -EAGAIN; goto err_free_val; } } } ret = mpi_write_to_sgl(val, req->dst, req->dst_len, &sign); if (ret) goto err_free_base; if (sign < 0) ret = -EBADMSG; err_free_base: if (req->src) mpi_free(base); err_free_val: mpi_free(val); return ret; } static unsigned int dh_max_size(struct crypto_kpp *tfm) { struct dh_ctx *ctx = dh_get_ctx(tfm); return mpi_get_size(ctx->p); } static void dh_exit_tfm(struct crypto_kpp *tfm) { struct dh_ctx *ctx = dh_get_ctx(tfm); dh_clear_ctx(ctx); } static struct kpp_alg dh = { .set_secret = dh_set_secret, .generate_public_key = dh_compute_value, .compute_shared_secret = dh_compute_value, .max_size = dh_max_size, .exit = dh_exit_tfm, .base = { .cra_name = "dh", .cra_driver_name = "dh-generic", .cra_priority = 100, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct dh_ctx), }, }; struct dh_safe_prime { unsigned int max_strength; unsigned int p_size; const char *p; }; static const char safe_prime_g[] = { 2 }; struct dh_safe_prime_instance_ctx { struct crypto_kpp_spawn dh_spawn; const struct dh_safe_prime *safe_prime; }; struct dh_safe_prime_tfm_ctx { struct crypto_kpp *dh_tfm; }; static void dh_safe_prime_free_instance(struct kpp_instance *inst) { struct dh_safe_prime_instance_ctx *ctx = kpp_instance_ctx(inst); crypto_drop_kpp(&ctx->dh_spawn); kfree(inst); } static inline struct dh_safe_prime_instance_ctx *dh_safe_prime_instance_ctx( struct crypto_kpp *tfm) { return kpp_instance_ctx(kpp_alg_instance(tfm)); } static int dh_safe_prime_init_tfm(struct crypto_kpp *tfm) { struct dh_safe_prime_instance_ctx *inst_ctx = dh_safe_prime_instance_ctx(tfm); struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); tfm_ctx->dh_tfm = crypto_spawn_kpp(&inst_ctx->dh_spawn); if (IS_ERR(tfm_ctx->dh_tfm)) return PTR_ERR(tfm_ctx->dh_tfm); kpp_set_reqsize(tfm, sizeof(struct kpp_request) + crypto_kpp_reqsize(tfm_ctx->dh_tfm)); return 0; } static void dh_safe_prime_exit_tfm(struct crypto_kpp *tfm) { struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); crypto_free_kpp(tfm_ctx->dh_tfm); } static u64 __add_u64_to_be(__be64 *dst, unsigned int n, u64 val) { unsigned int i; for (i = n; val && i > 0; --i) { u64 tmp = be64_to_cpu(dst[i - 1]); tmp += val; val = tmp >= val ? 0 : 1; dst[i - 1] = cpu_to_be64(tmp); } return val; } static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime, unsigned int *key_size) { unsigned int n, oversampling_size; __be64 *key; int err; u64 h, o; /* * Generate a private key following NIST SP800-56Ar3, * sec. 5.6.1.1.1 and 5.6.1.1.3 resp.. * * 5.6.1.1.1: choose key length N such that * 2 * ->max_strength <= N <= log2(q) + 1 = ->p_size * 8 - 1 * with q = (p - 1) / 2 for the safe-prime groups. * Choose the lower bound's next power of two for N in order to * avoid excessively large private keys while still * maintaining some extra reserve beyond the bare minimum in * most cases. Note that for each entry in safe_prime_groups[], * the following holds for such N: * - N >= 256, in particular it is a multiple of 2^6 = 64 * bits and * - N < log2(q) + 1, i.e. N respects the upper bound. */ n = roundup_pow_of_two(2 * safe_prime->max_strength); WARN_ON_ONCE(n & ((1u << 6) - 1)); n >>= 6; /* Convert N into units of u64. */ /* * Reserve one extra u64 to hold the extra random bits * required as per 5.6.1.1.3. */ oversampling_size = (n + 1) * sizeof(__be64); key = kmalloc(oversampling_size, GFP_KERNEL); if (!key) return ERR_PTR(-ENOMEM); /* * 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64 * random bits and interpret them as a big endian integer. */ err = -EFAULT; if (crypto_get_default_rng()) goto out_err; err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key, oversampling_size); crypto_put_default_rng(); if (err) goto out_err; /* * 5.6.1.1.3, step 5 is implicit: 2^N < q and thus, * M = min(2^N, q) = 2^N. * * For step 6, calculate * key = (key[] mod (M - 1)) + 1 = (key[] mod (2^N - 1)) + 1. * * In order to avoid expensive divisions, note that * 2^N mod (2^N - 1) = 1 and thus, for any integer h, * 2^N * h mod (2^N - 1) = h mod (2^N - 1) always holds. * The big endian integer key[] composed of n + 1 64bit words * may be written as key[] = h * 2^N + l, with h = key[0] * representing the 64 most significant bits and l * corresponding to the remaining 2^N bits. With the remark * from above, * h * 2^N + l mod (2^N - 1) = l + h mod (2^N - 1). * As both, l and h are less than 2^N, their sum after * this first reduction is guaranteed to be <= 2^(N + 1) - 2. * Or equivalently, that their sum can again be written as * h' * 2^N + l' with h' now either zero or one and if one, * then l' <= 2^N - 2. Thus, all bits at positions >= N will * be zero after a second reduction: * h' * 2^N + l' mod (2^N - 1) = l' + h' mod (2^N - 1). * At this point, it is still possible that * l' + h' = 2^N - 1, i.e. that l' + h' mod (2^N - 1) * is zero. This condition will be detected below by means of * the final increment overflowing in this case. */ h = be64_to_cpu(key[0]); h = __add_u64_to_be(key + 1, n, h); h = __add_u64_to_be(key + 1, n, h); WARN_ON_ONCE(h); /* Increment to obtain the final result. */ o = __add_u64_to_be(key + 1, n, 1); /* * The overflow bit o from the increment is either zero or * one. If zero, key[1:n] holds the final result in big-endian * order. If one, key[1:n] is zero now, but needs to be set to * one, c.f. above. */ if (o) key[n] = cpu_to_be64(1); /* n is in units of u64, convert to bytes. */ *key_size = n << 3; /* Strip the leading extra __be64, which is (virtually) zero by now. */ memmove(key, &key[1], *key_size); return key; out_err: kfree_sensitive(key); return ERR_PTR(err); } static int dh_safe_prime_set_secret(struct crypto_kpp *tfm, const void *buffer, unsigned int len) { struct dh_safe_prime_instance_ctx *inst_ctx = dh_safe_prime_instance_ctx(tfm); struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); struct dh params = {}; void *buf = NULL, *key = NULL; unsigned int buf_size; int err; if (buffer) { err = __crypto_dh_decode_key(buffer, len, ¶ms); if (err) return err; if (params.p_size || params.g_size) return -EINVAL; } params.p = inst_ctx->safe_prime->p; params.p_size = inst_ctx->safe_prime->p_size; params.g = safe_prime_g; params.g_size = sizeof(safe_prime_g); if (!params.key_size) { key = dh_safe_prime_gen_privkey(inst_ctx->safe_prime, ¶ms.key_size); if (IS_ERR(key)) return PTR_ERR(key); params.key = key; } buf_size = crypto_dh_key_len(¶ms); buf = kmalloc(buf_size, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto out; } err = crypto_dh_encode_key(buf, buf_size, ¶ms); if (err) goto out; err = crypto_kpp_set_secret(tfm_ctx->dh_tfm, buf, buf_size); out: kfree_sensitive(buf); kfree_sensitive(key); return err; } static void dh_safe_prime_complete_req(void *data, int err) { struct kpp_request *req = data; kpp_request_complete(req, err); } static struct kpp_request *dh_safe_prime_prepare_dh_req(struct kpp_request *req) { struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(crypto_kpp_reqtfm(req)); struct kpp_request *dh_req = kpp_request_ctx(req); kpp_request_set_tfm(dh_req, tfm_ctx->dh_tfm); kpp_request_set_callback(dh_req, req->base.flags, dh_safe_prime_complete_req, req); kpp_request_set_input(dh_req, req->src, req->src_len); kpp_request_set_output(dh_req, req->dst, req->dst_len); return dh_req; } static int dh_safe_prime_generate_public_key(struct kpp_request *req) { struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req); return crypto_kpp_generate_public_key(dh_req); } static int dh_safe_prime_compute_shared_secret(struct kpp_request *req) { struct kpp_request *dh_req = dh_safe_prime_prepare_dh_req(req); return crypto_kpp_compute_shared_secret(dh_req); } static unsigned int dh_safe_prime_max_size(struct crypto_kpp *tfm) { struct dh_safe_prime_tfm_ctx *tfm_ctx = kpp_tfm_ctx(tfm); return crypto_kpp_maxsize(tfm_ctx->dh_tfm); } static int __maybe_unused __dh_safe_prime_create( struct crypto_template *tmpl, struct rtattr **tb, const struct dh_safe_prime *safe_prime) { struct kpp_instance *inst; struct dh_safe_prime_instance_ctx *ctx; const char *dh_name; struct kpp_alg *dh_alg; u32 mask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_KPP, &mask); if (err) return err; dh_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(dh_name)) return PTR_ERR(dh_name); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = kpp_instance_ctx(inst); err = crypto_grab_kpp(&ctx->dh_spawn, kpp_crypto_instance(inst), dh_name, 0, mask); if (err) goto err_free_inst; err = -EINVAL; dh_alg = crypto_spawn_kpp_alg(&ctx->dh_spawn); if (strcmp(dh_alg->base.cra_name, "dh")) goto err_free_inst; ctx->safe_prime = safe_prime; err = crypto_inst_setname(kpp_crypto_instance(inst), tmpl->name, &dh_alg->base); if (err) goto err_free_inst; inst->alg.set_secret = dh_safe_prime_set_secret; inst->alg.generate_public_key = dh_safe_prime_generate_public_key; inst->alg.compute_shared_secret = dh_safe_prime_compute_shared_secret; inst->alg.max_size = dh_safe_prime_max_size; inst->alg.init = dh_safe_prime_init_tfm; inst->alg.exit = dh_safe_prime_exit_tfm; inst->alg.base.cra_priority = dh_alg->base.cra_priority; inst->alg.base.cra_module = THIS_MODULE; inst->alg.base.cra_ctxsize = sizeof(struct dh_safe_prime_tfm_ctx); inst->free = dh_safe_prime_free_instance; err = kpp_register_instance(tmpl, inst); if (err) goto err_free_inst; return 0; err_free_inst: dh_safe_prime_free_instance(inst); return err; } #ifdef CONFIG_CRYPTO_DH_RFC7919_GROUPS static const struct dh_safe_prime ffdhe2048_prime = { .max_strength = 112, .p_size = 256, .p = "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" "\x88\x6b\x42\x38\x61\x28\x5c\x97\xff\xff\xff\xff\xff\xff\xff\xff", }; static const struct dh_safe_prime ffdhe3072_prime = { .max_strength = 128, .p_size = 384, .p = "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" "\x25\xe4\x1d\x2b\x66\xc6\x2e\x37\xff\xff\xff\xff\xff\xff\xff\xff", }; static const struct dh_safe_prime ffdhe4096_prime = { .max_strength = 152, .p_size = 512, .p = "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" "\xc6\x8a\x00\x7e\x5e\x65\x5f\x6a\xff\xff\xff\xff\xff\xff\xff\xff", }; static const struct dh_safe_prime ffdhe6144_prime = { .max_strength = 176, .p_size = 768, .p = "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a" "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6" "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c" "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71" "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77" "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8" "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e" "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4" "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92" "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82" "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c" "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46" "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17" "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04" "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69" "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4" "\xa4\x0e\x32\x9c\xd0\xe4\x0e\x65\xff\xff\xff\xff\xff\xff\xff\xff", }; static const struct dh_safe_prime ffdhe8192_prime = { .max_strength = 200, .p_size = 1024, .p = "\xff\xff\xff\xff\xff\xff\xff\xff\xad\xf8\x54\x58\xa2\xbb\x4a\x9a" "\xaf\xdc\x56\x20\x27\x3d\x3c\xf1\xd8\xb9\xc5\x83\xce\x2d\x36\x95" "\xa9\xe1\x36\x41\x14\x64\x33\xfb\xcc\x93\x9d\xce\x24\x9b\x3e\xf9" "\x7d\x2f\xe3\x63\x63\x0c\x75\xd8\xf6\x81\xb2\x02\xae\xc4\x61\x7a" "\xd3\xdf\x1e\xd5\xd5\xfd\x65\x61\x24\x33\xf5\x1f\x5f\x06\x6e\xd0" "\x85\x63\x65\x55\x3d\xed\x1a\xf3\xb5\x57\x13\x5e\x7f\x57\xc9\x35" "\x98\x4f\x0c\x70\xe0\xe6\x8b\x77\xe2\xa6\x89\xda\xf3\xef\xe8\x72" "\x1d\xf1\x58\xa1\x36\xad\xe7\x35\x30\xac\xca\x4f\x48\x3a\x79\x7a" "\xbc\x0a\xb1\x82\xb3\x24\xfb\x61\xd1\x08\xa9\x4b\xb2\xc8\xe3\xfb" "\xb9\x6a\xda\xb7\x60\xd7\xf4\x68\x1d\x4f\x42\xa3\xde\x39\x4d\xf4" "\xae\x56\xed\xe7\x63\x72\xbb\x19\x0b\x07\xa7\xc8\xee\x0a\x6d\x70" "\x9e\x02\xfc\xe1\xcd\xf7\xe2\xec\xc0\x34\x04\xcd\x28\x34\x2f\x61" "\x91\x72\xfe\x9c\xe9\x85\x83\xff\x8e\x4f\x12\x32\xee\xf2\x81\x83" "\xc3\xfe\x3b\x1b\x4c\x6f\xad\x73\x3b\xb5\xfc\xbc\x2e\xc2\x20\x05" "\xc5\x8e\xf1\x83\x7d\x16\x83\xb2\xc6\xf3\x4a\x26\xc1\xb2\xef\xfa" "\x88\x6b\x42\x38\x61\x1f\xcf\xdc\xde\x35\x5b\x3b\x65\x19\x03\x5b" "\xbc\x34\xf4\xde\xf9\x9c\x02\x38\x61\xb4\x6f\xc9\xd6\xe6\xc9\x07" "\x7a\xd9\x1d\x26\x91\xf7\xf7\xee\x59\x8c\xb0\xfa\xc1\x86\xd9\x1c" "\xae\xfe\x13\x09\x85\x13\x92\x70\xb4\x13\x0c\x93\xbc\x43\x79\x44" "\xf4\xfd\x44\x52\xe2\xd7\x4d\xd3\x64\xf2\xe2\x1e\x71\xf5\x4b\xff" "\x5c\xae\x82\xab\x9c\x9d\xf6\x9e\xe8\x6d\x2b\xc5\x22\x36\x3a\x0d" "\xab\xc5\x21\x97\x9b\x0d\xea\xda\x1d\xbf\x9a\x42\xd5\xc4\x48\x4e" "\x0a\xbc\xd0\x6b\xfa\x53\xdd\xef\x3c\x1b\x20\xee\x3f\xd5\x9d\x7c" "\x25\xe4\x1d\x2b\x66\x9e\x1e\xf1\x6e\x6f\x52\xc3\x16\x4d\xf4\xfb" "\x79\x30\xe9\xe4\xe5\x88\x57\xb6\xac\x7d\x5f\x42\xd6\x9f\x6d\x18" "\x77\x63\xcf\x1d\x55\x03\x40\x04\x87\xf5\x5b\xa5\x7e\x31\xcc\x7a" "\x71\x35\xc8\x86\xef\xb4\x31\x8a\xed\x6a\x1e\x01\x2d\x9e\x68\x32" "\xa9\x07\x60\x0a\x91\x81\x30\xc4\x6d\xc7\x78\xf9\x71\xad\x00\x38" "\x09\x29\x99\xa3\x33\xcb\x8b\x7a\x1a\x1d\xb9\x3d\x71\x40\x00\x3c" "\x2a\x4e\xce\xa9\xf9\x8d\x0a\xcc\x0a\x82\x91\xcd\xce\xc9\x7d\xcf" "\x8e\xc9\xb5\x5a\x7f\x88\xa4\x6b\x4d\xb5\xa8\x51\xf4\x41\x82\xe1" "\xc6\x8a\x00\x7e\x5e\x0d\xd9\x02\x0b\xfd\x64\xb6\x45\x03\x6c\x7a" "\x4e\x67\x7d\x2c\x38\x53\x2a\x3a\x23\xba\x44\x42\xca\xf5\x3e\xa6" "\x3b\xb4\x54\x32\x9b\x76\x24\xc8\x91\x7b\xdd\x64\xb1\xc0\xfd\x4c" "\xb3\x8e\x8c\x33\x4c\x70\x1c\x3a\xcd\xad\x06\x57\xfc\xcf\xec\x71" "\x9b\x1f\x5c\x3e\x4e\x46\x04\x1f\x38\x81\x47\xfb\x4c\xfd\xb4\x77" "\xa5\x24\x71\xf7\xa9\xa9\x69\x10\xb8\x55\x32\x2e\xdb\x63\x40\xd8" "\xa0\x0e\xf0\x92\x35\x05\x11\xe3\x0a\xbe\xc1\xff\xf9\xe3\xa2\x6e" "\x7f\xb2\x9f\x8c\x18\x30\x23\xc3\x58\x7e\x38\xda\x00\x77\xd9\xb4" "\x76\x3e\x4e\x4b\x94\xb2\xbb\xc1\x94\xc6\x65\x1e\x77\xca\xf9\x92" "\xee\xaa\xc0\x23\x2a\x28\x1b\xf6\xb3\xa7\x39\xc1\x22\x61\x16\x82" "\x0a\xe8\xdb\x58\x47\xa6\x7c\xbe\xf9\xc9\x09\x1b\x46\x2d\x53\x8c" "\xd7\x2b\x03\x74\x6a\xe7\x7f\x5e\x62\x29\x2c\x31\x15\x62\xa8\x46" "\x50\x5d\xc8\x2d\xb8\x54\x33\x8a\xe4\x9f\x52\x35\xc9\x5b\x91\x17" "\x8c\xcf\x2d\xd5\xca\xce\xf4\x03\xec\x9d\x18\x10\xc6\x27\x2b\x04" "\x5b\x3b\x71\xf9\xdc\x6b\x80\xd6\x3f\xdd\x4a\x8e\x9a\xdb\x1e\x69" "\x62\xa6\x95\x26\xd4\x31\x61\xc1\xa4\x1d\x57\x0d\x79\x38\xda\xd4" "\xa4\x0e\x32\x9c\xcf\xf4\x6a\xaa\x36\xad\x00\x4c\xf6\x00\xc8\x38" "\x1e\x42\x5a\x31\xd9\x51\xae\x64\xfd\xb2\x3f\xce\xc9\x50\x9d\x43" "\x68\x7f\xeb\x69\xed\xd1\xcc\x5e\x0b\x8c\xc3\xbd\xf6\x4b\x10\xef" "\x86\xb6\x31\x42\xa3\xab\x88\x29\x55\x5b\x2f\x74\x7c\x93\x26\x65" "\xcb\x2c\x0f\x1c\xc0\x1b\xd7\x02\x29\x38\x88\x39\xd2\xaf\x05\xe4" "\x54\x50\x4a\xc7\x8b\x75\x82\x82\x28\x46\xc0\xba\x35\xc3\x5f\x5c" "\x59\x16\x0c\xc0\x46\xfd\x82\x51\x54\x1f\xc6\x8c\x9c\x86\xb0\x22" "\xbb\x70\x99\x87\x6a\x46\x0e\x74\x51\xa8\xa9\x31\x09\x70\x3f\xee" "\x1c\x21\x7e\x6c\x38\x26\xe5\x2c\x51\xaa\x69\x1e\x0e\x42\x3c\xfc" "\x99\xe9\xe3\x16\x50\xc1\x21\x7b\x62\x48\x16\xcd\xad\x9a\x95\xf9" "\xd5\xb8\x01\x94\x88\xd9\xc0\xa0\xa1\xfe\x30\x75\xa5\x77\xe2\x31" "\x83\xf8\x1d\x4a\x3f\x2f\xa4\x57\x1e\xfc\x8c\xe0\xba\x8a\x4f\xe8" "\xb6\x85\x5d\xfe\x72\xb0\xa6\x6e\xde\xd2\xfb\xab\xfb\xe5\x8a\x30" "\xfa\xfa\xbe\x1c\x5d\x71\xa8\x7e\x2f\x74\x1e\xf8\xc1\xfe\x86\xfe" "\xa6\xbb\xfd\xe5\x30\x67\x7f\x0d\x97\xd1\x1d\x49\xf7\xa8\x44\x3d" "\x08\x22\xe5\x06\xa9\xf4\x61\x4e\x01\x1e\x2a\x94\x83\x8f\xf8\x8c" "\xd6\x8c\x8b\xb7\xc5\xc6\x42\x4c\xff\xff\xff\xff\xff\xff\xff\xff", }; static int dh_ffdhe2048_create(struct crypto_template *tmpl, struct rtattr **tb) { return __dh_safe_prime_create(tmpl, tb, &ffdhe2048_prime); } static int dh_ffdhe3072_create(struct crypto_template *tmpl, struct rtattr **tb) { return __dh_safe_prime_create(tmpl, tb, &ffdhe3072_prime); } static int dh_ffdhe4096_create(struct crypto_template *tmpl, struct rtattr **tb) { return __dh_safe_prime_create(tmpl, tb, &ffdhe4096_prime); } static int dh_ffdhe6144_create(struct crypto_template *tmpl, struct rtattr **tb) { return __dh_safe_prime_create(tmpl, tb, &ffdhe6144_prime); } static int dh_ffdhe8192_create(struct crypto_template *tmpl, struct rtattr **tb) { return __dh_safe_prime_create(tmpl, tb, &ffdhe8192_prime); } static struct crypto_template crypto_ffdhe_templates[] = { { .name = "ffdhe2048", .create = dh_ffdhe2048_create, .module = THIS_MODULE, }, { .name = "ffdhe3072", .create = dh_ffdhe3072_create, .module = THIS_MODULE, }, { .name = "ffdhe4096", .create = dh_ffdhe4096_create, .module = THIS_MODULE, }, { .name = "ffdhe6144", .create = dh_ffdhe6144_create, .module = THIS_MODULE, }, { .name = "ffdhe8192", .create = dh_ffdhe8192_create, .module = THIS_MODULE, }, }; #else /* ! CONFIG_CRYPTO_DH_RFC7919_GROUPS */ static struct crypto_template crypto_ffdhe_templates[] = {}; #endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */ static int __init dh_init(void) { int err; err = crypto_register_kpp(&dh); if (err) return err; err = crypto_register_templates(crypto_ffdhe_templates, ARRAY_SIZE(crypto_ffdhe_templates)); if (err) { crypto_unregister_kpp(&dh); return err; } return 0; } static void __exit dh_exit(void) { crypto_unregister_templates(crypto_ffdhe_templates, ARRAY_SIZE(crypto_ffdhe_templates)); crypto_unregister_kpp(&dh); } subsys_initcall(dh_init); module_exit(dh_exit); MODULE_ALIAS_CRYPTO("dh"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DH generic algorithm"); |
6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * configfs_internal.h - Internal stuff for configfs * * Based on sysfs: * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel * * configfs Copyright (C) 2005 Oracle. All rights reserved. */ #ifdef pr_fmt #undef pr_fmt #endif #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/list.h> #include <linux/spinlock.h> struct configfs_fragment { atomic_t frag_count; struct rw_semaphore frag_sem; bool frag_dead; }; void put_fragment(struct configfs_fragment *); struct configfs_fragment *get_fragment(struct configfs_fragment *); struct configfs_dirent { atomic_t s_count; int s_dependent_count; struct list_head s_sibling; struct list_head s_children; int s_links; void * s_element; int s_type; umode_t s_mode; struct dentry * s_dentry; struct iattr * s_iattr; #ifdef CONFIG_LOCKDEP int s_depth; #endif struct configfs_fragment *s_frag; }; #define CONFIGFS_ROOT 0x0001 #define CONFIGFS_DIR 0x0002 #define CONFIGFS_ITEM_ATTR 0x0004 #define CONFIGFS_ITEM_BIN_ATTR 0x0008 #define CONFIGFS_ITEM_LINK 0x0020 #define CONFIGFS_USET_DIR 0x0040 #define CONFIGFS_USET_DEFAULT 0x0080 #define CONFIGFS_USET_DROPPING 0x0100 #define CONFIGFS_USET_IN_MKDIR 0x0200 #define CONFIGFS_USET_CREATING 0x0400 #define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR | CONFIGFS_ITEM_BIN_ATTR) #define CONFIGFS_PINNED \ (CONFIGFS_ROOT | CONFIGFS_DIR | CONFIGFS_ITEM_LINK) extern struct mutex configfs_symlink_mutex; extern spinlock_t configfs_dirent_lock; extern struct kmem_cache *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); extern struct inode *configfs_create(struct dentry *, umode_t mode); extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_create_bin_file(struct config_item *, const struct configfs_bin_attribute *); extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *, void *, umode_t, int, struct configfs_fragment *); extern int configfs_dirent_is_ready(struct configfs_dirent *); extern const unsigned char * configfs_get_name(struct configfs_dirent *sd); extern void configfs_drop_dentry(struct configfs_dirent *sd, struct dentry *parent); extern int configfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr); extern struct dentry *configfs_pin_fs(void); extern void configfs_release_fs(void); extern const struct file_operations configfs_dir_operations; extern const struct file_operations configfs_file_operations; extern const struct file_operations configfs_bin_file_operations; extern const struct inode_operations configfs_dir_inode_operations; extern const struct inode_operations configfs_root_inode_operations; extern const struct inode_operations configfs_symlink_inode_operations; extern const struct dentry_operations configfs_dentry_ops; extern int configfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname); extern int configfs_unlink(struct inode *dir, struct dentry *dentry); int configfs_create_link(struct configfs_dirent *target, struct dentry *parent, struct dentry *dentry, char *body); static inline struct config_item * to_item(struct dentry * dentry) { struct configfs_dirent * sd = dentry->d_fsdata; return ((struct config_item *) sd->s_element); } static inline struct configfs_attribute * to_attr(struct dentry * dentry) { struct configfs_dirent * sd = dentry->d_fsdata; return ((struct configfs_attribute *) sd->s_element); } static inline struct configfs_bin_attribute *to_bin_attr(struct dentry *dentry) { struct configfs_attribute *attr = to_attr(dentry); return container_of(attr, struct configfs_bin_attribute, cb_attr); } static inline struct config_item *configfs_get_config_item(struct dentry *dentry) { struct config_item * item = NULL; spin_lock(&dentry->d_lock); if (!d_unhashed(dentry)) { struct configfs_dirent * sd = dentry->d_fsdata; item = config_item_get(sd->s_element); } spin_unlock(&dentry->d_lock); return item; } static inline void release_configfs_dirent(struct configfs_dirent * sd) { if (!(sd->s_type & CONFIGFS_ROOT)) { kfree(sd->s_iattr); put_fragment(sd->s_frag); kmem_cache_free(configfs_dir_cachep, sd); } } static inline struct configfs_dirent * configfs_get(struct configfs_dirent * sd) { if (sd) { WARN_ON(!atomic_read(&sd->s_count)); atomic_inc(&sd->s_count); } return sd; } static inline void configfs_put(struct configfs_dirent * sd) { WARN_ON(!atomic_read(&sd->s_count)); if (atomic_dec_and_test(&sd->s_count)) release_configfs_dirent(sd); } |
19 10 19 19 1 7 5 2 1 1 1 1 1 1 2 1 1 1 1 1 4 3 3 3 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 | // SPDX-License-Identifier: GPL-2.0 /* * Qualcomm Serial USB driver * * Copyright (c) 2008 QUALCOMM Incorporated. * Copyright (c) 2009 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2009 Novell Inc. */ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/slab.h> #include "usb-wwan.h" #define DRIVER_AUTHOR "Qualcomm Inc" #define DRIVER_DESC "Qualcomm USB Serial driver" #define QUECTEL_EC20_PID 0x9215 /* standard device layouts supported by this driver */ enum qcserial_layouts { QCSERIAL_G2K = 0, /* Gobi 2000 */ QCSERIAL_G1K = 1, /* Gobi 1000 */ QCSERIAL_SWI = 2, /* Sierra Wireless */ QCSERIAL_HWI = 3, /* Huawei */ }; #define DEVICE_G1K(v, p) \ USB_DEVICE(v, p), .driver_info = QCSERIAL_G1K #define DEVICE_SWI(v, p) \ USB_DEVICE(v, p), .driver_info = QCSERIAL_SWI #define DEVICE_HWI(v, p) \ USB_DEVICE(v, p), .driver_info = QCSERIAL_HWI static const struct usb_device_id id_table[] = { /* Gobi 1000 devices */ {DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ {DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */ {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */ {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */ {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ {DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ {DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */ {DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ {DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ {DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ {DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ {DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ {DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ {DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ {DEVICE_G1K(0x1bc7, 0x900e)}, /* Telit Gobi QDL device */ /* Gobi 2000 devices */ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */ {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */ {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */ {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */ {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */ {USB_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */ {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */ {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */ {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */ {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */ {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */ {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */ {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ /* Gobi 3000 devices */ {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */ {USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */ {USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ {USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */ {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */ {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */ {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */ {USB_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */ {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */ {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */ {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ {USB_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ {USB_DEVICE(0x0AF0, 0x8120)}, /* Option GTM681W */ /* non-Gobi Sierra Wireless devices */ {DEVICE_SWI(0x03f0, 0x4e1d)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */ {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */ {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC7304/MC7354 */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ {DEVICE_SWI(0x1199, 0x901e)}, /* Sierra Wireless EM7355 QDL */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9055)}, /* Netgear AirCard 341U */ {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */ {DEVICE_SWI(0x1199, 0x90e4)}, /* Sierra Wireless EM86xx QDL*/ {DEVICE_SWI(0x1199, 0x90e5)}, /* Sierra Wireless EM86xx */ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ {DEVICE_SWI(0x413c, 0x81c2)}, /* Dell Wireless 5811e */ {DEVICE_SWI(0x413c, 0x81cb)}, /* Dell Wireless 5816e QDL */ {DEVICE_SWI(0x413c, 0x81cc)}, /* Dell Wireless 5816e */ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */ {DEVICE_SWI(0x413c, 0x8217)}, /* Dell Wireless DW5826e */ {DEVICE_SWI(0x413c, 0x8218)}, /* Dell Wireless DW5826e QDL */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static int handle_quectel_ec20(struct device *dev, int ifnum) { int altsetting = 0; /* * Quectel EC20 Mini PCIe LTE module layout: * 0: DM/DIAG (use libqcdm from ModemManager for communication) * 1: NMEA * 2: AT-capable modem port * 3: Modem interface * 4: NDIS */ switch (ifnum) { case 0: dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n"); break; case 1: dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n"); break; case 2: case 3: dev_dbg(dev, "Quectel EC20 Modem port found\n"); break; case 4: /* Don't claim the QMI/net interface */ altsetting = -1; break; } return altsetting; } static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_host_interface *intf = serial->interface->cur_altsetting; struct device *dev = &serial->dev->dev; int retval = -ENODEV; __u8 nintf; __u8 ifnum; int altsetting = -1; bool sendsetup = false; /* we only support vendor specific functions */ if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) goto done; nintf = serial->dev->actconfig->desc.bNumInterfaces; dev_dbg(dev, "Num Interfaces = %d\n", nintf); ifnum = intf->desc.bInterfaceNumber; dev_dbg(dev, "This Interface = %d\n", ifnum); if (nintf == 1) { /* QDL mode */ /* Gobi 2000 has a single altsetting, older ones have two */ if (serial->interface->num_altsetting == 2) intf = usb_altnum_to_altsetting(serial->interface, 1); else if (serial->interface->num_altsetting > 2) goto done; if (intf && intf->desc.bNumEndpoints == 2 && usb_endpoint_is_bulk_in(&intf->endpoint[0].desc) && usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { dev_dbg(dev, "QDL port found\n"); if (serial->interface->num_altsetting == 1) retval = 0; /* Success */ else altsetting = 1; } goto done; } /* default to enabling interface */ altsetting = 0; /* * Composite mode; don't bind to the QMI/net interface as that * gets handled by other drivers. */ switch (id->driver_info) { case QCSERIAL_G1K: /* * Gobi 1K USB layout: * 0: DM/DIAG (use libqcdm from ModemManager for communication) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net */ if (nintf < 3 || nintf > 4) { dev_err(dev, "unknown number of interfaces: %d\n", nintf); altsetting = -1; goto done; } if (ifnum == 0) { dev_dbg(dev, "Gobi 1K DM/DIAG interface found\n"); altsetting = 1; } else if (ifnum == 2) dev_dbg(dev, "Modem port found\n"); else altsetting = -1; break; case QCSERIAL_G2K: /* handle non-standard layouts */ if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) { altsetting = handle_quectel_ec20(dev, ifnum); goto done; } /* * Gobi 2K+ USB layout: * 0: QMI/net * 1: DM/DIAG (use libqcdm from ModemManager for communication) * 2: AT-capable modem port * 3: NMEA */ if (nintf < 3 || nintf > 4) { dev_err(dev, "unknown number of interfaces: %d\n", nintf); altsetting = -1; goto done; } switch (ifnum) { case 0: /* Don't claim the QMI/net interface */ altsetting = -1; break; case 1: dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n"); break; case 2: dev_dbg(dev, "Modem port found\n"); break; case 3: /* * NMEA (serial line 9600 8N1) * # echo "\$GPS_START" > /dev/ttyUSBx * # echo "\$GPS_STOP" > /dev/ttyUSBx */ dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n"); break; } break; case QCSERIAL_SWI: /* * Sierra Wireless layout: * 0: DM/DIAG (use libqcdm from ModemManager for communication) * 2: NMEA * 3: AT-capable modem port * 8: QMI/net */ switch (ifnum) { case 0: dev_dbg(dev, "DM/DIAG interface found\n"); break; case 2: dev_dbg(dev, "NMEA GPS interface found\n"); sendsetup = true; break; case 3: dev_dbg(dev, "Modem port found\n"); sendsetup = true; break; default: /* don't claim any unsupported interface */ altsetting = -1; break; } break; case QCSERIAL_HWI: /* * Huawei devices map functions by subclass + protocol * instead of interface numbers. The protocol identify * a specific function, while the subclass indicate a * specific firmware source * * This is a list of functions known to be non-serial. The rest * are assumed to be serial and will be handled by this driver */ switch (intf->desc.bInterfaceProtocol) { /* QMI combined (qmi_wwan) */ case 0x07: case 0x37: case 0x67: /* QMI data (qmi_wwan) */ case 0x08: case 0x38: case 0x68: /* QMI control (qmi_wwan) */ case 0x09: case 0x39: case 0x69: /* NCM like (huawei_cdc_ncm) */ case 0x16: case 0x46: case 0x76: altsetting = -1; break; default: dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n", intf->desc.bInterfaceClass, intf->desc.bInterfaceSubClass, intf->desc.bInterfaceProtocol); } break; default: dev_err(dev, "unsupported device layout type: %lu\n", id->driver_info); break; } done: if (altsetting >= 0) { retval = usb_set_interface(serial->dev, ifnum, altsetting); if (retval < 0) { dev_err(dev, "Could not set interface, error %d\n", retval); retval = -ENODEV; } } if (!retval) usb_set_serial_data(serial, (void *)(unsigned long)sendsetup); return retval; } static int qc_attach(struct usb_serial *serial) { struct usb_wwan_intf_private *data; bool sendsetup; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; sendsetup = !!(unsigned long)(usb_get_serial_data(serial)); if (sendsetup) data->use_send_setup = 1; spin_lock_init(&data->susp_lock); usb_set_serial_data(serial, data); return 0; } static void qc_release(struct usb_serial *serial) { struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); usb_set_serial_data(serial, NULL); kfree(priv); } static struct usb_serial_driver qcdevice = { .driver = { .name = "qcserial", }, .description = "Qualcomm USB modem", .id_table = id_table, .num_ports = 1, .probe = qcprobe, .open = usb_wwan_open, .close = usb_wwan_close, .dtr_rts = usb_wwan_dtr_rts, .write = usb_wwan_write, .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .tiocmget = usb_wwan_tiocmget, .tiocmset = usb_wwan_tiocmset, .attach = qc_attach, .release = qc_release, .port_probe = usb_wwan_port_probe, .port_remove = usb_wwan_port_remove, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, #endif }; static struct usb_serial_driver * const serial_drivers[] = { &qcdevice, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL v2"); |
1 1 15 15 15 15 2 10 1 6 3 7 8 6 2 2 2 1 2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:net,port type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 SCTP and UDPLITE support added */ /* 2 Range as input support for IPv4 added */ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ /* 6 Forceadd support added */ /* 7 skbinfo support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:net,port"); /* Type specific function prefix */ #define HTYPE hash_netport #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 * However this way we have to store internally cidr - 1, * dancing back and forth. */ #define IP_SET_HASH_WITH_NETS_PACKED /* IPv4 variant */ /* Member elements */ struct hash_netport4_elem { __be32 ip; __be16 port; u8 proto; u8 cidr:7; u8 nomatch:1; }; /* Common functions */ static bool hash_netport4_data_equal(const struct hash_netport4_elem *ip1, const struct hash_netport4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->port == ip2->port && ip1->proto == ip2->proto && ip1->cidr == ip2->cidr; } static int hash_netport4_do_data_match(const struct hash_netport4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netport4_data_set_flags(struct hash_netport4_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_netport4_data_reset_flags(struct hash_netport4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr) { elem->ip &= ip_set_netmask(cidr); elem->cidr = cidr - 1; } static bool hash_netport4_data_list(struct sk_buff *skb, const struct hash_netport4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netport4_data_next(struct hash_netport4_elem *next, const struct hash_netport4_elem *d) { next->ip = d->ip; next->port = d->port; } #define MTYPE hash_netport4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_netport4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); e.ip &= ip_set_netmask(e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_netport4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMP)) e.port = 0; with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) { e.ip = htonl(ip & ip_set_hostmask(e.cidr + 1)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } port = port_to = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port_to < port) swap(port, port_to); } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip_to < ip) swap(ip, ip_to); if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip, ip_to, e.cidr + 1); } if (retried) { ip = ntohl(h->next.ip); p = ntohs(h->next.port); } else { p = port; } do { e.ip = htonl(ip); ip = ip_set_range_to_cidr(ip, ip_to, &cidr); e.cidr = cidr - 1; for (; p <= port_to; p++, i++) { e.port = htons(p); if (i > IPSET_MAX_RANGE) { hash_netport4_data_next(&h->next, &e); return -ERANGE; } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } p = port; } while (ip++ < ip_to); return ret; } /* IPv6 variant */ struct hash_netport6_elem { union nf_inet_addr ip; __be16 port; u8 proto; u8 cidr:7; u8 nomatch:1; }; /* Common functions */ static bool hash_netport6_data_equal(const struct hash_netport6_elem *ip1, const struct hash_netport6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ip1->port == ip2->port && ip1->proto == ip2->proto && ip1->cidr == ip2->cidr; } static int hash_netport6_do_data_match(const struct hash_netport6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netport6_data_set_flags(struct hash_netport6_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_netport6_data_reset_flags(struct hash_netport6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip, cidr); elem->cidr = cidr - 1; } static bool hash_netport6_data_list(struct sk_buff *skb, const struct hash_netport6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netport6_data_next(struct hash_netport6_elem *next, const struct hash_netport6_elem *d) { next->port = d->port; } #undef MTYPE #undef HOST_MASK #define MTYPE hash_netport6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_netport6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6_netmask(&e.ip, e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_netport6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netport6_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } ip6_netmask(&e.ip, e.cidr + 1); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMPV6)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(e.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = ntohs(h->next.port); for (; port <= port_to; port++) { e.port = htons(port); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } return ret; } static struct ip_set_type hash_netport_type __read_mostly = { .name = "hash:net,port", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_TWO, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_netport_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_netport_init(void) { return ip_set_type_register(&hash_netport_type); } static void __exit hash_netport_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_netport_type); } module_init(hash_netport_init); module_exit(hash_netport_fini); |
3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 | /* SPDX-License-Identifier: GPL-2.0 */ /* * irq_domain - IRQ translation domains * * Translation infrastructure between hw and linux irq numbers. This is * helpful for interrupt controllers to implement mapping between hardware * irq numbers and the Linux irq number space. * * irq_domains also have hooks for translating device tree or other * firmware interrupt representations into a hardware irq number that * can be mapped back to a Linux irq number without any extra platform * support code. * * Interrupt controller "domain" data structure. This could be defined as a * irq domain controller. That is, it handles the mapping between hardware * and virtual interrupt numbers for a given interrupt domain. The domain * structure is generally created by the PIC code for a given PIC instance * (though a domain can cover more than one PIC if they have a flat number * model). It's the domain callbacks that are responsible for setting the * irq_chip on a given irq_desc after it's been mapped. * * The host code and data structures use a fwnode_handle pointer to * identify the domain. In some cases, and in order to preserve source * code compatibility, this fwnode pointer is "upgraded" to a DT * device_node. For those firmware infrastructures that do not provide * a unique identifier for an interrupt controller, the irq_domain * code offers a fwnode allocator. */ #ifndef _LINUX_IRQDOMAIN_H #define _LINUX_IRQDOMAIN_H #include <linux/types.h> #include <linux/irqdomain_defs.h> #include <linux/irqhandler.h> #include <linux/of.h> #include <linux/mutex.h> #include <linux/radix-tree.h> struct device_node; struct fwnode_handle; struct irq_domain; struct irq_chip; struct irq_data; struct irq_desc; struct cpumask; struct seq_file; struct irq_affinity_desc; struct msi_parent_ops; #define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16 /** * struct irq_fwspec - generic IRQ specifier structure * * @fwnode: Pointer to a firmware-specific descriptor * @param_count: Number of device-specific parameters * @param: Device-specific parameters * * This structure, directly modeled after of_phandle_args, is used to * pass a device-specific description of an interrupt. */ struct irq_fwspec { struct fwnode_handle *fwnode; int param_count; u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; }; /* Conversion function from of_phandle_args fields to fwspec */ void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, unsigned int count, struct irq_fwspec *fwspec); /** * struct irq_domain_ops - Methods for irq_domain objects * @match: Match an interrupt controller device node to a host, returns * 1 on a match * @select: Match an interrupt controller fw specification. It is more generic * than @match as it receives a complete struct irq_fwspec. Therefore, * @select is preferred if provided. Returns 1 on a match. * @map: Create or update a mapping between a virtual irq number and a hw * irq number. This is called only once for a given mapping. * @unmap: Dispose of such a mapping * @xlate: Given a device tree node and interrupt specifier, decode * the hardware irq number and linux irq type value. * @alloc: Allocate @nr_irqs interrupts starting from @virq. * @free: Free @nr_irqs interrupts starting from @virq. * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only * reserve the vector. If unset, assign the vector (called from * request_irq()). * @deactivate: Disarm one interrupt (@irqd). * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and * linux irq type value (@out_type). This is a generalised @xlate * (over struct irq_fwspec) and is preferred if provided. * @debug_show: For domains to show specific data for an interrupt in debugfs. * * Functions below are provided by the driver and called whenever a new mapping * is created or an old mapping is disposed. The driver can then proceed to * whatever internal data structures management is required. It also needs * to setup the irq_desc when returning from map(). */ struct irq_domain_ops { int (*match)(struct irq_domain *d, struct device_node *node, enum irq_domain_bus_token bus_token); int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); void (*unmap)(struct irq_domain *d, unsigned int virq); int (*xlate)(struct irq_domain *d, struct device_node *node, const u32 *intspec, unsigned int intsize, unsigned long *out_hwirq, unsigned int *out_type); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* extended V2 interfaces to support hierarchy irq_domains */ int (*alloc)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, void *arg); void (*free)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs); int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); #endif #ifdef CONFIG_GENERIC_IRQ_DEBUGFS void (*debug_show)(struct seq_file *m, struct irq_domain *d, struct irq_data *irqd, int ind); #endif }; extern const struct irq_domain_ops irq_generic_chip_ops; struct irq_domain_chip_generic; /** * struct irq_domain - Hardware interrupt number translation object * @link: Element in global irq_domain list. * @name: Name of interrupt domain * @ops: Pointer to irq_domain methods * @host_data: Private data pointer for use by owner. Not touched by irq_domain * core code. * @flags: Per irq_domain flags * @mapcount: The number of mapped interrupts * @mutex: Domain lock, hierarchical domains use root domain's lock * @root: Pointer to root domain, or containing structure if non-hierarchical * * Optional elements: * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy * to swap it for the of_node via the irq_domain_get_of_node accessor * @bus_token: @fwnode's device_node might be used for several irq domains. But * in connection with @bus_token, the pair shall be unique in a * system. * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. * @dev: Pointer to the device which instantiated the irqdomain * With per device irq domains this is not necessarily the same * as @pm_dev. * @pm_dev: Pointer to a device that can be utilized for power management * purposes related to the irq domain. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init * @exit: Function called when the domain is destroyed * * Revmap data, used internally by the irq domain code: * @hwirq_max: Top limit for the HW irq number. Especially to avoid * conflicts/failures with reserved HW irqs. Can be ~0. * @revmap_size: Size of the linear map table @revmap * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map * @revmap: Linear table of irq_data pointers */ struct irq_domain { struct list_head link; const char *name; const struct irq_domain_ops *ops; void *host_data; unsigned int flags; unsigned int mapcount; struct mutex mutex; struct irq_domain *root; /* Optional data */ struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; struct device *dev; struct device *pm_dev; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_domain *parent; #endif #ifdef CONFIG_GENERIC_MSI_IRQ const struct msi_parent_ops *msi_parent_ops; #endif void (*exit)(struct irq_domain *d); /* reverse map data. The linear map gets appended to the irq_domain */ irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; struct irq_data __rcu *revmap[] __counted_by(revmap_size); }; /* Irq domain flags */ enum { /* Irq domain is hierarchical */ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), /* Irq domain name was allocated internally */ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1), /* Irq domain is an IPI domain with virq per cpu */ IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2), /* Irq domain is an IPI domain with single virq */ IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3), /* Irq domain implements MSIs */ IRQ_DOMAIN_FLAG_MSI = (1 << 4), /* * Irq domain implements isolated MSI, see msi_device_has_isolated_msi() */ IRQ_DOMAIN_FLAG_ISOLATED_MSI = (1 << 5), /* Irq domain doesn't translate anything */ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6), /* Irq domain is a MSI parent domain */ IRQ_DOMAIN_FLAG_MSI_PARENT = (1 << 8), /* Irq domain is a MSI device domain */ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9), /* Irq domain must destroy generic chips when removed */ IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10), /* * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved * for implementation specific purposes and ignored by the * core code. */ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) { return to_of_node(d->fwnode); } static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev) { if (d) d->pm_dev = dev; } #ifdef CONFIG_IRQ_DOMAIN struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, const char *name, phys_addr_t *pa); enum { IRQCHIP_FWNODE_REAL, IRQCHIP_FWNODE_NAMED, IRQCHIP_FWNODE_NAMED_ID, }; static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL); } static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name, NULL); } static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) { return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa); } void irq_domain_free_fwnode(struct fwnode_handle *fwnode); struct irq_domain_chip_generic_info; /** * struct irq_domain_info - Domain information structure * @fwnode: firmware node for the interrupt controller * @domain_flags: Additional flags to add to the domain flags * @size: Size of linear map; 0 for radix mapping only * @hwirq_max: Maximum number of interrupts supported by controller * @direct_max: Maximum value of direct maps; * Use ~0 for no limit; 0 for no direct mapping * @hwirq_base: The first hardware interrupt number (legacy domains only) * @virq_base: The first Linux interrupt number for legacy domains to * immediately associate the interrupts after domain creation * @bus_token: Domain bus token * @name_suffix: Optional name suffix to avoid collisions when multiple * domains are added using same fwnode * @ops: Domain operation callbacks * @host_data: Controller private data pointer * @dgc_info: Geneneric chip information structure pointer used to * create generic chips for the domain if not NULL. * @init: Function called when the domain is created. * Allow to do some additional domain initialisation. * @exit: Function called when the domain is destroyed. * Allow to do some additional cleanup operation. */ struct irq_domain_info { struct fwnode_handle *fwnode; unsigned int domain_flags; unsigned int size; irq_hw_number_t hwirq_max; int direct_max; unsigned int hwirq_base; unsigned int virq_base; enum irq_domain_bus_token bus_token; const char *name_suffix; const struct irq_domain_ops *ops; void *host_data; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /** * @parent: Pointer to the parent irq domain used in a hierarchy domain */ struct irq_domain *parent; #endif struct irq_domain_chip_generic_info *dgc_info; int (*init)(struct irq_domain *d); void (*exit)(struct irq_domain *d); }; struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info); struct irq_domain *devm_irq_domain_instantiate(struct device *dev, const struct irq_domain_info *info); struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, const struct irq_domain_ops *ops, void *host_data); struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size, unsigned int first_irq, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, enum irq_domain_bus_token bus_token); extern void irq_set_default_host(struct irq_domain *host); extern struct irq_domain *irq_get_default_host(void); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, const struct irq_affinity_desc *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { return node ? &node->fwnode : NULL; } extern const struct fwnode_operations irqchip_fwnode_ops; static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode) { return fwnode && fwnode->ops == &irqchip_fwnode_ops; } extern void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token); static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) { struct irq_fwspec fwspec = { .fwnode = fwnode, }; return irq_find_matching_fwspec(&fwspec, bus_token); } static inline struct irq_domain *irq_find_matching_host(struct device_node *node, enum irq_domain_bus_token bus_token) { return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token); } static inline struct irq_domain *irq_find_host(struct device_node *node) { struct irq_domain *d; d = irq_find_matching_host(node, DOMAIN_BUS_WIRED); if (!d) d = irq_find_matching_host(node, DOMAIN_BUS_ANY); return d; } static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node, unsigned int size, unsigned int first_irq, const struct irq_domain_ops *ops, void *host_data) { return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data); } /** * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. * @of_node: pointer to interrupt controller's device tree node. * @size: Number of interrupts in the domain. * @ops: map/unmap domain callbacks * @host_data: Controller private data pointer */ static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = of_node_to_fwnode(of_node), .size = size, .hwirq_max = size, .ops = ops, .host_data = host_data, }; struct irq_domain *d; d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } #ifdef CONFIG_IRQ_DOMAIN_NOMAP static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = of_node_to_fwnode(of_node), .hwirq_max = max_irq, .direct_max = max_irq, .ops = ops, .host_data = host_data, }; struct irq_domain *d; d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } extern unsigned int irq_create_direct_mapping(struct irq_domain *host); #endif static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = of_node_to_fwnode(of_node), .hwirq_max = ~0U, .ops = ops, .host_data = host_data, }; struct irq_domain *d; d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode, unsigned int size, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .size = size, .hwirq_max = size, .ops = ops, .host_data = host_data, }; struct irq_domain *d; d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data) { struct irq_domain_info info = { .fwnode = fwnode, .hwirq_max = ~0, .ops = ops, .host_data = host_data, }; struct irq_domain *d; d = irq_domain_instantiate(&info); return IS_ERR(d) ? NULL : d; } extern void irq_domain_remove(struct irq_domain *host); extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq); extern void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); extern unsigned int irq_create_mapping_affinity(struct irq_domain *host, irq_hw_number_t hwirq, const struct irq_affinity_desc *affinity); extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec); extern void irq_dispose_mapping(unsigned int virq); static inline unsigned int irq_create_mapping(struct irq_domain *host, irq_hw_number_t hwirq) { return irq_create_mapping_affinity(host, hwirq, NULL); } extern struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain, irq_hw_number_t hwirq, unsigned int *irq); static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { return __irq_resolve_mapping(domain, hwirq, NULL); } /** * irq_find_mapping() - Find a linux irq from a hw irq number. * @domain: domain owning this hardware interrupt * @hwirq: hardware irq number in that domain space */ static inline unsigned int irq_find_mapping(struct irq_domain *domain, irq_hw_number_t hwirq) { unsigned int irq; if (__irq_resolve_mapping(domain, hwirq, &irq)) return irq; return 0; } static inline unsigned int irq_linear_revmap(struct irq_domain *domain, irq_hw_number_t hwirq) { return irq_find_mapping(domain, hwirq); } extern const struct irq_domain_ops irq_domain_simple_ops; /* stock xlate functions */ int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); /* IPI functions */ int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); /* V2 interfaces to support hierarchy IRQ domains. */ extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); extern void irq_domain_reset_irq_data(struct irq_data *irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, struct device_node *node, const struct irq_domain_ops *ops, void *host_data) { return irq_domain_create_hierarchy(parent, flags, size, of_node_to_fwnode(node), ops, host_data); } extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL); } extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, const struct irq_chip *chip, void *chip_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); extern void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg); extern int irq_domain_pop_irq(struct irq_domain *domain, int virq); extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs, void *arg); extern void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base, unsigned int nr_irqs); extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq); static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; } static inline bool irq_domain_is_ipi(struct irq_domain *domain) { return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE); } static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_IPI_PER_CPU; } static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE; } static inline bool irq_domain_is_msi(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_MSI; } static inline bool irq_domain_is_msi_parent(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT; } static inline bool irq_domain_is_msi_device(struct irq_domain *domain) { return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE; } #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs, int node, void *arg) { return -1; } static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { } static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_ipi(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_ipi_single(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_msi(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_msi_parent(struct irq_domain *domain) { return false; } static inline bool irq_domain_is_msi_device(struct irq_domain *domain) { return false; } #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_MSI_IRQ int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type); void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq); #else static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type) { WARN_ON_ONCE(1); return -EINVAL; } static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) { WARN_ON_ONCE(1); } #endif #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } static inline struct irq_domain *irq_find_matching_fwnode( struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token) { return NULL; } #endif /* !CONFIG_IRQ_DOMAIN */ #endif /* _LINUX_IRQDOMAIN_H */ |
3157 3188 3194 3157 3157 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | // SPDX-License-Identifier: GPL-2.0 /* * drivers/usb/core/endpoint.c * * (C) Copyright 2002,2004,2006 Greg Kroah-Hartman * (C) Copyright 2002,2004 IBM Corp. * (C) Copyright 2006 Novell Inc. * * Released under the GPLv2 only. * * Endpoint sysfs stuff */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/usb.h> #include "usb.h" struct ep_device { struct usb_endpoint_descriptor *desc; struct usb_device *udev; struct device dev; }; #define to_ep_device(_dev) \ container_of(_dev, struct ep_device, dev) struct ep_attribute { struct attribute attr; ssize_t (*show)(struct usb_device *, struct usb_endpoint_descriptor *, char *); }; #define to_ep_attribute(_attr) \ container_of(_attr, struct ep_attribute, attr) #define usb_ep_attr(field, format_string) \ static ssize_t field##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ { \ struct ep_device *ep = to_ep_device(dev); \ return sysfs_emit(buf, format_string, ep->desc->field); \ } \ static DEVICE_ATTR_RO(field) usb_ep_attr(bLength, "%02x\n"); usb_ep_attr(bEndpointAddress, "%02x\n"); usb_ep_attr(bmAttributes, "%02x\n"); usb_ep_attr(bInterval, "%02x\n"); static ssize_t wMaxPacketSize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); return sysfs_emit(buf, "%04x\n", usb_endpoint_maxp(ep->desc)); } static DEVICE_ATTR_RO(wMaxPacketSize); static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); char *type = "unknown"; switch (usb_endpoint_type(ep->desc)) { case USB_ENDPOINT_XFER_CONTROL: type = "Control"; break; case USB_ENDPOINT_XFER_ISOC: type = "Isoc"; break; case USB_ENDPOINT_XFER_BULK: type = "Bulk"; break; case USB_ENDPOINT_XFER_INT: type = "Interrupt"; break; } return sysfs_emit(buf, "%s\n", type); } static DEVICE_ATTR_RO(type); static ssize_t interval_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); unsigned int interval; char unit; interval = usb_decode_interval(ep->desc, ep->udev->speed); if (interval % 1000) { unit = 'u'; } else { unit = 'm'; interval /= 1000; } return sysfs_emit(buf, "%d%cs\n", interval, unit); } static DEVICE_ATTR_RO(interval); static ssize_t direction_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ep_device *ep = to_ep_device(dev); char *direction; if (usb_endpoint_xfer_control(ep->desc)) direction = "both"; else if (usb_endpoint_dir_in(ep->desc)) direction = "in"; else direction = "out"; return sysfs_emit(buf, "%s\n", direction); } static DEVICE_ATTR_RO(direction); static struct attribute *ep_dev_attrs[] = { &dev_attr_bLength.attr, &dev_attr_bEndpointAddress.attr, &dev_attr_bmAttributes.attr, &dev_attr_bInterval.attr, &dev_attr_wMaxPacketSize.attr, &dev_attr_interval.attr, &dev_attr_type.attr, &dev_attr_direction.attr, NULL, }; static const struct attribute_group ep_dev_attr_grp = { .attrs = ep_dev_attrs, }; static const struct attribute_group *ep_dev_groups[] = { &ep_dev_attr_grp, NULL }; static void ep_device_release(struct device *dev) { struct ep_device *ep_dev = to_ep_device(dev); kfree(ep_dev); } const struct device_type usb_ep_device_type = { .name = "usb_endpoint", .release = ep_device_release, }; int usb_create_ep_devs(struct device *parent, struct usb_host_endpoint *endpoint, struct usb_device *udev) { struct ep_device *ep_dev; int retval; ep_dev = kzalloc(sizeof(*ep_dev), GFP_KERNEL); if (!ep_dev) { retval = -ENOMEM; goto exit; } ep_dev->desc = &endpoint->desc; ep_dev->udev = udev; ep_dev->dev.groups = ep_dev_groups; ep_dev->dev.type = &usb_ep_device_type; ep_dev->dev.parent = parent; dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress); retval = device_register(&ep_dev->dev); if (retval) goto error_register; device_enable_async_suspend(&ep_dev->dev); endpoint->ep_dev = ep_dev; return retval; error_register: put_device(&ep_dev->dev); exit: return retval; } void usb_remove_ep_devs(struct usb_host_endpoint *endpoint) { struct ep_device *ep_dev = endpoint->ep_dev; if (ep_dev) { device_unregister(&ep_dev->dev); endpoint->ep_dev = NULL; } } |
6 8 2 80 2 16 10 6 94 94 87 61 16 80 88 58 58 88 94 6 94 94 19 19 19 7 3 2 2 2 9 9 14 5 2 9 1 9 6 8 5 12 7 7 3 13 5 5 4 3 7 39 10 1 1 1 1 1 7 1 1 7 1 9 5 8 7 3 16 3 13 2 1 5 4 2 5 2 3 34 24 4 9 20 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 | // SPDX-License-Identifier: GPL-2.0 /* * The USB Monitor, inspired by Dave Harding's USBMon. * * This is a binary format reader. * * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/cdev.h> #include <linux/export.h> #include <linux/usb.h> #include <linux/poll.h> #include <linux/compat.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/time64.h> #include <linux/uaccess.h> #include "usb_mon.h" /* * Defined by USB 2.0 clause 9.3, table 9.2. */ #define SETUP_LEN 8 /* ioctl macros */ #define MON_IOC_MAGIC 0x92 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) /* #9 was MON_IOCT_SETAPI */ #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) #ifdef CONFIG_COMPAT #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) #endif /* * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). * But it's all right. Just use a simple way to make sure the chunk is never * smaller than a page. * * N.B. An application does not know our chunk size. * * Woops, get_zeroed_page() returns a single page. I guess we're stuck with * page-sized chunks for the time being. */ #define CHUNK_SIZE PAGE_SIZE #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) /* * The magic limit was calculated so that it allows the monitoring * application to pick data once in two ticks. This way, another application, * which presumably drives the bus, gets to hog CPU, yet we collect our data. * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an * enormous overhead built into the bus protocol, so we need about 1000 KB. * * This is still too much for most cases, where we just snoop a few * descriptor fetches for enumeration. So, the default is a "reasonable" * amount for systems with HZ=250 and incomplete bus saturation. * * XXX What about multi-megabyte URBs which take minutes to transfer? */ #define BUFF_MAX CHUNK_ALIGN(1200*1024) #define BUFF_DFL CHUNK_ALIGN(300*1024) #define BUFF_MIN CHUNK_ALIGN(8*1024) /* * The per-event API header (2 per URB). * * This structure is seen in userland as defined by the documentation. */ struct mon_bin_hdr { u64 id; /* URB ID - from submission to callback */ unsigned char type; /* Same as in text API; extensible. */ unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ unsigned char epnum; /* Endpoint number and transfer direction */ unsigned char devnum; /* Device address */ unsigned short busnum; /* Bus number */ char flag_setup; char flag_data; s64 ts_sec; /* ktime_get_real_ts64 */ s32 ts_usec; /* ktime_get_real_ts64 */ int status; unsigned int len_urb; /* Length of data (submitted or actual) */ unsigned int len_cap; /* Delivered length */ union { unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ struct iso_rec { int error_count; int numdesc; } iso; } s; int interval; int start_frame; unsigned int xfer_flags; unsigned int ndesc; /* Actual number of ISO descriptors */ }; /* * ISO vector, packed into the head of data stream. * This has to take 16 bytes to make sure that the end of buffer * wrap is not happening in the middle of a descriptor. */ struct mon_bin_isodesc { int iso_status; unsigned int iso_off; unsigned int iso_len; u32 _pad; }; /* per file statistic */ struct mon_bin_stats { u32 queued; u32 dropped; }; struct mon_bin_get { struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ void __user *data; size_t alloc; /* Length of data (can be zero) */ }; struct mon_bin_mfetch { u32 __user *offvec; /* Vector of events fetched */ u32 nfetch; /* Number of events to fetch (out: fetched) */ u32 nflush; /* Number of events to flush */ }; #ifdef CONFIG_COMPAT struct mon_bin_get32 { u32 hdr32; u32 data32; u32 alloc32; }; struct mon_bin_mfetch32 { u32 offvec32; u32 nfetch32; u32 nflush32; }; #endif /* Having these two values same prevents wrapping of the mon_bin_hdr */ #define PKT_ALIGN 64 #define PKT_SIZE 64 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ #define PKT_SZ_API1 64 /* API 1 size: extra fields */ #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ /* max number of USB bus supported */ #define MON_BIN_MAX_MINOR 128 /* * The buffer: map of used pages. */ struct mon_pgmap { struct page *pg; unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ }; /* * This gets associated with an open file struct. */ struct mon_reader_bin { /* The buffer: one per open. */ spinlock_t b_lock; /* Protect b_cnt, b_in */ unsigned int b_size; /* Current size of the buffer - bytes */ unsigned int b_cnt; /* Bytes used */ unsigned int b_in, b_out; /* Offsets into buffer - bytes */ unsigned int b_read; /* Amount of read data in curr. pkt. */ struct mon_pgmap *b_vec; /* The map array */ wait_queue_head_t b_wait; /* Wait for data here */ struct mutex fetch_lock; /* Protect b_read, b_out */ int mmap_active; /* A list of these is needed for "bus 0". Some time later. */ struct mon_reader r; /* Stats */ unsigned int cnt_lost; }; static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, unsigned int offset) { return (struct mon_bin_hdr *) (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); } #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) static unsigned char xfer_to_pipe[4] = { PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT }; static const struct class mon_bin_class = { .name = "usbmon", }; static dev_t mon_bin_dev0; static struct cdev mon_bin_cdev; static void mon_buff_area_fill(const struct mon_reader_bin *rp, unsigned int offset, unsigned int size); static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); static int mon_alloc_buff(struct mon_pgmap *map, int npages); static void mon_free_buff(struct mon_pgmap *map, int npages); /* * This is a "chunked memcpy". It does not manipulate any counters. */ static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, unsigned int off, const unsigned char *from, unsigned int length) { unsigned int step_len; unsigned char *buf; unsigned int in_page; while (length) { /* * Determine step_len. */ step_len = length; in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); if (in_page < step_len) step_len = in_page; /* * Copy data and advance pointers. */ buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; memcpy(buf, from, step_len); if ((off += step_len) >= this->b_size) off = 0; from += step_len; length -= step_len; } return off; } /* * This is a little worse than the above because it's "chunked copy_to_user". * The return value is an error code, not an offset. */ static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, char __user *to, int length) { unsigned int step_len; unsigned char *buf; unsigned int in_page; while (length) { /* * Determine step_len. */ step_len = length; in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); if (in_page < step_len) step_len = in_page; /* * Copy data and advance pointers. */ buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; if (copy_to_user(to, buf, step_len)) return -EINVAL; if ((off += step_len) >= this->b_size) off = 0; to += step_len; length -= step_len; } return 0; } /* * Allocate an (aligned) area in the buffer. * This is called under b_lock. * Returns ~0 on failure. */ static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, unsigned int size) { unsigned int offset; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; offset = rp->b_in; rp->b_cnt += size; if ((rp->b_in += size) >= rp->b_size) rp->b_in -= rp->b_size; return offset; } /* * This is the same thing as mon_buff_area_alloc, only it does not allow * buffers to wrap. This is needed by applications which pass references * into mmap-ed buffers up their stacks (libpcap can do that). * * Currently, we always have the header stuck with the data, although * it is not strictly speaking necessary. * * When a buffer would wrap, we place a filler packet to mark the space. */ static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, unsigned int size) { unsigned int offset; unsigned int fill_size; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if (rp->b_cnt + size > rp->b_size) return ~0; if (rp->b_in + size > rp->b_size) { /* * This would wrap. Find if we still have space after * skipping to the end of the buffer. If we do, place * a filler packet and allocate a new packet. */ fill_size = rp->b_size - rp->b_in; if (rp->b_cnt + size + fill_size > rp->b_size) return ~0; mon_buff_area_fill(rp, rp->b_in, fill_size); offset = 0; rp->b_in = size; rp->b_cnt += size + fill_size; } else if (rp->b_in + size == rp->b_size) { offset = rp->b_in; rp->b_in = 0; rp->b_cnt += size; } else { offset = rp->b_in; rp->b_in += size; rp->b_cnt += size; } return offset; } /* * Return a few (kilo-)bytes to the head of the buffer. * This is used if a data fetch fails. */ static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) { /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ rp->b_cnt -= size; if (rp->b_in < size) rp->b_in += rp->b_size; rp->b_in -= size; } /* * This has to be called under both b_lock and fetch_lock, because * it accesses both b_cnt and b_out. */ static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) { size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); rp->b_cnt -= size; if ((rp->b_out += size) >= rp->b_size) rp->b_out -= rp->b_size; } static void mon_buff_area_fill(const struct mon_reader_bin *rp, unsigned int offset, unsigned int size) { struct mon_bin_hdr *ep; ep = MON_OFF2HDR(rp, offset); memset(ep, 0, PKT_SIZE); ep->type = '@'; ep->len_cap = size - PKT_SIZE; } static inline char mon_bin_get_setup(unsigned char *setupb, const struct urb *urb, char ev_type) { if (urb->setup_packet == NULL) return 'Z'; memcpy(setupb, urb->setup_packet, SETUP_LEN); return 0; } static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, unsigned int length, char *flag) { int i; struct scatterlist *sg; unsigned int this_len; *flag = 0; if (urb->num_sgs == 0) { if (urb->transfer_buffer == NULL) { *flag = 'Z'; return length; } mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); length = 0; } else { /* If IOMMU coalescing occurred, we cannot trust sg_page */ if (urb->transfer_flags & URB_DMA_SG_COMBINED) { *flag = 'D'; return length; } /* Copy up to the first non-addressable segment */ for_each_sg(urb->sg, sg, urb->num_sgs, i) { if (length == 0 || PageHighMem(sg_page(sg))) break; this_len = min_t(unsigned int, sg->length, length); offset = mon_copy_to_buff(rp, offset, sg_virt(sg), this_len); length -= this_len; } if (i == 0) *flag = 'D'; } return length; } /* * This is the look-ahead pass in case of 'C Zi', when actual_length cannot * be used to determine the length of the whole contiguous buffer. */ static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp, struct urb *urb, unsigned int ndesc) { struct usb_iso_packet_descriptor *fp; unsigned int length; length = 0; fp = urb->iso_frame_desc; while (ndesc-- != 0) { if (fp->actual_length != 0) { if (fp->offset + fp->actual_length > length) length = fp->offset + fp->actual_length; } fp++; } return length; } static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) { struct mon_bin_isodesc *dp; struct usb_iso_packet_descriptor *fp; fp = urb->iso_frame_desc; while (ndesc-- != 0) { dp = (struct mon_bin_isodesc *) (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); dp->iso_status = fp->status; dp->iso_off = fp->offset; dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; dp->_pad = 0; if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) offset = 0; fp++; } } static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, char ev_type, int status) { const struct usb_endpoint_descriptor *epd = &urb->ep->desc; struct timespec64 ts; unsigned long flags; unsigned int urb_length; unsigned int offset; unsigned int length; unsigned int delta; unsigned int ndesc, lendesc; unsigned char dir; struct mon_bin_hdr *ep; char data_tag = 0; ktime_get_real_ts64(&ts); spin_lock_irqsave(&rp->b_lock, flags); /* * Find the maximum allowable length, then allocate space. */ urb_length = (ev_type == 'S') ? urb->transfer_buffer_length : urb->actual_length; length = urb_length; if (usb_endpoint_xfer_isoc(epd)) { if (urb->number_of_packets < 0) { ndesc = 0; } else if (urb->number_of_packets >= ISODESC_MAX) { ndesc = ISODESC_MAX; } else { ndesc = urb->number_of_packets; } if (ev_type == 'C' && usb_urb_dir_in(urb)) length = mon_bin_collate_isodesc(rp, urb, ndesc); } else { ndesc = 0; } lendesc = ndesc*sizeof(struct mon_bin_isodesc); /* not an issue unless there's a subtle bug in a HCD somewhere */ if (length >= urb->transfer_buffer_length) length = urb->transfer_buffer_length; if (length >= rp->b_size/5) length = rp->b_size/5; if (usb_urb_dir_in(urb)) { if (ev_type == 'S') { length = 0; data_tag = '<'; } /* Cannot rely on endpoint number in case of control ep.0 */ dir = USB_DIR_IN; } else { if (ev_type == 'C') { length = 0; data_tag = '>'; } dir = 0; } if (rp->mmap_active) { offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE + lendesc); } else { offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); } if (offset == ~0) { rp->cnt_lost++; spin_unlock_irqrestore(&rp->b_lock, flags); return; } ep = MON_OFF2HDR(rp, offset); if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; /* * Fill the allocated area. */ memset(ep, 0, PKT_SIZE); ep->type = ev_type; ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; ep->epnum = dir | usb_endpoint_num(epd); ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; ep->status = status; ep->len_urb = urb_length; ep->len_cap = length + lendesc; ep->xfer_flags = urb->transfer_flags; if (usb_endpoint_xfer_int(epd)) { ep->interval = urb->interval; } else if (usb_endpoint_xfer_isoc(epd)) { ep->interval = urb->interval; ep->start_frame = urb->start_frame; ep->s.iso.error_count = urb->error_count; ep->s.iso.numdesc = urb->number_of_packets; } if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); } else { ep->flag_setup = '-'; } if (ndesc != 0) { ep->ndesc = ndesc; mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); if ((offset += lendesc) >= rp->b_size) offset -= rp->b_size; } if (length != 0) { length = mon_bin_get_data(rp, offset, urb, length, &ep->flag_data); if (length > 0) { delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); ep->len_cap -= length; delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); mon_buff_area_shrink(rp, delta); } } else { ep->flag_data = data_tag; } spin_unlock_irqrestore(&rp->b_lock, flags); wake_up(&rp->b_wait); } static void mon_bin_submit(void *data, struct urb *urb) { struct mon_reader_bin *rp = data; mon_bin_event(rp, urb, 'S', -EINPROGRESS); } static void mon_bin_complete(void *data, struct urb *urb, int status) { struct mon_reader_bin *rp = data; mon_bin_event(rp, urb, 'C', status); } static void mon_bin_error(void *data, struct urb *urb, int error) { struct mon_reader_bin *rp = data; struct timespec64 ts; unsigned long flags; unsigned int offset; struct mon_bin_hdr *ep; ktime_get_real_ts64(&ts); spin_lock_irqsave(&rp->b_lock, flags); offset = mon_buff_area_alloc(rp, PKT_SIZE); if (offset == ~0) { /* Not incrementing cnt_lost. Just because. */ spin_unlock_irqrestore(&rp->b_lock, flags); return; } ep = MON_OFF2HDR(rp, offset); memset(ep, 0, PKT_SIZE); ep->type = 'E'; ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; ep->epnum |= usb_endpoint_num(&urb->ep->desc); ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; ep->ts_sec = ts.tv_sec; ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; ep->status = error; ep->flag_setup = '-'; ep->flag_data = 'E'; spin_unlock_irqrestore(&rp->b_lock, flags); wake_up(&rp->b_wait); } static int mon_bin_open(struct inode *inode, struct file *file) { struct mon_bus *mbus; struct mon_reader_bin *rp; size_t size; int rc; mutex_lock(&mon_lock); mbus = mon_bus_lookup(iminor(inode)); if (mbus == NULL) { mutex_unlock(&mon_lock); return -ENODEV; } if (mbus != &mon_bus0 && mbus->u_bus == NULL) { printk(KERN_ERR TAG ": consistency error on open\n"); mutex_unlock(&mon_lock); return -ENODEV; } rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); if (rp == NULL) { rc = -ENOMEM; goto err_alloc; } spin_lock_init(&rp->b_lock); init_waitqueue_head(&rp->b_wait); mutex_init(&rp->fetch_lock); rp->b_size = BUFF_DFL; size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { rc = -ENOMEM; goto err_allocvec; } if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) goto err_allocbuff; rp->r.m_bus = mbus; rp->r.r_data = rp; rp->r.rnf_submit = mon_bin_submit; rp->r.rnf_error = mon_bin_error; rp->r.rnf_complete = mon_bin_complete; mon_reader_add(mbus, &rp->r); file->private_data = rp; mutex_unlock(&mon_lock); return 0; err_allocbuff: kfree(rp->b_vec); err_allocvec: kfree(rp); err_alloc: mutex_unlock(&mon_lock); return rc; } /* * Extract an event from buffer and copy it to user space. * Wait if there is no event ready. * Returns zero or error. */ static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, void __user *data, unsigned int nbytes) { unsigned long flags; struct mon_bin_hdr *ep; size_t step_len; unsigned int offset; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } ep = MON_OFF2HDR(rp, rp->b_out); if (copy_to_user(hdr, ep, hdrbytes)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } step_len = min(ep->len_cap, nbytes); if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; if (copy_from_buf(rp, offset, data, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } spin_lock_irqsave(&rp->b_lock, flags); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; mutex_unlock(&rp->fetch_lock); return 0; } static int mon_bin_release(struct inode *inode, struct file *file) { struct mon_reader_bin *rp = file->private_data; struct mon_bus* mbus = rp->r.m_bus; mutex_lock(&mon_lock); if (mbus->nreaders <= 0) { printk(KERN_ERR TAG ": consistency error on close\n"); mutex_unlock(&mon_lock); return 0; } mon_reader_del(mbus, &rp->r); mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); kfree(rp); mutex_unlock(&mon_lock); return 0; } static ssize_t mon_bin_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct mon_reader_bin *rp = file->private_data; unsigned int hdrbytes = PKT_SZ_API0; unsigned long flags; struct mon_bin_hdr *ep; unsigned int offset; size_t step_len; char *ptr; ssize_t done = 0; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } ep = MON_OFF2HDR(rp, rp->b_out); if (rp->b_read < hdrbytes) { step_len = min_t(size_t, nbytes, hdrbytes - rp->b_read); ptr = ((char *)ep) + rp->b_read; if (step_len && copy_to_user(buf, ptr, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nbytes -= step_len; buf += step_len; rp->b_read += step_len; done += step_len; } if (rp->b_read >= hdrbytes) { step_len = ep->len_cap; step_len -= rp->b_read - hdrbytes; if (step_len > nbytes) step_len = nbytes; offset = rp->b_out + PKT_SIZE; offset += rp->b_read - hdrbytes; if (offset >= rp->b_size) offset -= rp->b_size; if (copy_from_buf(rp, offset, buf, step_len)) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nbytes -= step_len; buf += step_len; rp->b_read += step_len; done += step_len; } /* * Check if whole packet was read, and if so, jump to the next one. */ if (rp->b_read >= hdrbytes + ep->len_cap) { spin_lock_irqsave(&rp->b_lock, flags); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; } mutex_unlock(&rp->fetch_lock); return done; } /* * Remove at most nevents from chunked buffer. * Returns the number of removed events. */ static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) { unsigned long flags; struct mon_bin_hdr *ep; int i; mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); for (i = 0; i < nevents; ++i) { if (MON_RING_EMPTY(rp)) break; ep = MON_OFF2HDR(rp, rp->b_out); mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); } spin_unlock_irqrestore(&rp->b_lock, flags); rp->b_read = 0; mutex_unlock(&rp->fetch_lock); return i; } /* * Fetch at most max event offsets into the buffer and put them into vec. * The events are usually freed later with mon_bin_flush. * Return the effective number of events fetched. */ static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, u32 __user *vec, unsigned int max) { unsigned int cur_out; unsigned int bytes, avail; unsigned int size; unsigned int nevents; struct mon_bin_hdr *ep; unsigned long flags; int rc; mutex_lock(&rp->fetch_lock); if ((rc = mon_bin_wait_event(file, rp)) < 0) { mutex_unlock(&rp->fetch_lock); return rc; } spin_lock_irqsave(&rp->b_lock, flags); avail = rp->b_cnt; spin_unlock_irqrestore(&rp->b_lock, flags); cur_out = rp->b_out; nevents = 0; bytes = 0; while (bytes < avail) { if (nevents >= max) break; ep = MON_OFF2HDR(rp, cur_out); if (put_user(cur_out, &vec[nevents])) { mutex_unlock(&rp->fetch_lock); return -EFAULT; } nevents++; size = ep->len_cap + PKT_SIZE; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if ((cur_out += size) >= rp->b_size) cur_out -= rp->b_size; bytes += size; } mutex_unlock(&rp->fetch_lock); return nevents; } /* * Count events. This is almost the same as the above mon_bin_fetch, * only we do not store offsets into user vector, and we have no limit. */ static int mon_bin_queued(struct mon_reader_bin *rp) { unsigned int cur_out; unsigned int bytes, avail; unsigned int size; unsigned int nevents; struct mon_bin_hdr *ep; unsigned long flags; mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); avail = rp->b_cnt; spin_unlock_irqrestore(&rp->b_lock, flags); cur_out = rp->b_out; nevents = 0; bytes = 0; while (bytes < avail) { ep = MON_OFF2HDR(rp, cur_out); nevents++; size = ep->len_cap + PKT_SIZE; size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); if ((cur_out += size) >= rp->b_size) cur_out -= rp->b_size; bytes += size; } mutex_unlock(&rp->fetch_lock); return nevents; } /* */ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; // struct mon_bus* mbus = rp->r.m_bus; int ret = 0; struct mon_bin_hdr *ep; unsigned long flags; switch (cmd) { case MON_IOCQ_URB_LEN: /* * N.B. This only returns the size of data, without the header. */ spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) { ep = MON_OFF2HDR(rp, rp->b_out); ret = ep->len_cap; } spin_unlock_irqrestore(&rp->b_lock, flags); break; case MON_IOCQ_RING_SIZE: mutex_lock(&rp->fetch_lock); ret = rp->b_size; mutex_unlock(&rp->fetch_lock); break; case MON_IOCT_RING_SIZE: /* * Changing the buffer size will flush it's contents; the new * buffer is allocated before releasing the old one to be sure * the device will stay functional also in case of memory * pressure. */ { int size; struct mon_pgmap *vec; if (arg < BUFF_MIN || arg > BUFF_MAX) return -EINVAL; size = CHUNK_ALIGN(arg); vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap), GFP_KERNEL); if (vec == NULL) { ret = -ENOMEM; break; } ret = mon_alloc_buff(vec, size/CHUNK_SIZE); if (ret < 0) { kfree(vec); break; } mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); if (rp->mmap_active) { mon_free_buff(vec, size/CHUNK_SIZE); kfree(vec); ret = -EBUSY; } else { mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); rp->b_vec = vec; rp->b_size = size; rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; rp->cnt_lost = 0; } spin_unlock_irqrestore(&rp->b_lock, flags); mutex_unlock(&rp->fetch_lock); } break; case MON_IOCH_MFLUSH: ret = mon_bin_flush(rp, arg); break; case MON_IOCX_GET: case MON_IOCX_GETX: { struct mon_bin_get getb; if (copy_from_user(&getb, (void __user *)arg, sizeof(struct mon_bin_get))) return -EFAULT; if (getb.alloc > 0x10000000) /* Want to cast to u32 */ return -EINVAL; ret = mon_bin_get_event(file, rp, getb.hdr, (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, getb.data, (unsigned int)getb.alloc); } break; case MON_IOCX_MFETCH: { struct mon_bin_mfetch mfetch; struct mon_bin_mfetch __user *uptr; uptr = (struct mon_bin_mfetch __user *)arg; if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) return -EFAULT; if (mfetch.nflush) { ret = mon_bin_flush(rp, mfetch.nflush); if (ret < 0) return ret; if (put_user(ret, &uptr->nflush)) return -EFAULT; } ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); if (ret < 0) return ret; if (put_user(ret, &uptr->nfetch)) return -EFAULT; ret = 0; } break; case MON_IOCG_STATS: { struct mon_bin_stats __user *sp; unsigned int nevents; unsigned int ndropped; spin_lock_irqsave(&rp->b_lock, flags); ndropped = rp->cnt_lost; rp->cnt_lost = 0; spin_unlock_irqrestore(&rp->b_lock, flags); nevents = mon_bin_queued(rp); sp = (struct mon_bin_stats __user *)arg; if (put_user(ndropped, &sp->dropped)) return -EFAULT; if (put_user(nevents, &sp->queued)) return -EFAULT; } break; default: return -ENOTTY; } return ret; } #ifdef CONFIG_COMPAT static long mon_bin_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mon_reader_bin *rp = file->private_data; int ret; switch (cmd) { case MON_IOCX_GET32: case MON_IOCX_GETX32: { struct mon_bin_get32 getb; if (copy_from_user(&getb, (void __user *)arg, sizeof(struct mon_bin_get32))) return -EFAULT; ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, compat_ptr(getb.data32), getb.alloc32); if (ret < 0) return ret; } return 0; case MON_IOCX_MFETCH32: { struct mon_bin_mfetch32 mfetch; struct mon_bin_mfetch32 __user *uptr; uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) return -EFAULT; if (mfetch.nflush32) { ret = mon_bin_flush(rp, mfetch.nflush32); if (ret < 0) return ret; if (put_user(ret, &uptr->nflush32)) return -EFAULT; } ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), mfetch.nfetch32); if (ret < 0) return ret; if (put_user(ret, &uptr->nfetch32)) return -EFAULT; } return 0; case MON_IOCG_STATS: return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); case MON_IOCQ_URB_LEN: case MON_IOCQ_RING_SIZE: case MON_IOCT_RING_SIZE: case MON_IOCH_MFLUSH: return mon_bin_ioctl(file, cmd, arg); default: ; } return -ENOTTY; } #endif /* CONFIG_COMPAT */ static __poll_t mon_bin_poll(struct file *file, struct poll_table_struct *wait) { struct mon_reader_bin *rp = file->private_data; __poll_t mask = 0; unsigned long flags; if (file->f_mode & FMODE_READ) poll_wait(file, &rp->b_wait, wait); spin_lock_irqsave(&rp->b_lock, flags); if (!MON_RING_EMPTY(rp)) mask |= EPOLLIN | EPOLLRDNORM; /* readable */ spin_unlock_irqrestore(&rp->b_lock, flags); return mask; } /* * open and close: just keep track of how many times the device is * mapped, to use the proper memory allocation function. */ static void mon_bin_vma_open(struct vm_area_struct *vma) { struct mon_reader_bin *rp = vma->vm_private_data; unsigned long flags; spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active++; spin_unlock_irqrestore(&rp->b_lock, flags); } static void mon_bin_vma_close(struct vm_area_struct *vma) { unsigned long flags; struct mon_reader_bin *rp = vma->vm_private_data; spin_lock_irqsave(&rp->b_lock, flags); rp->mmap_active--; spin_unlock_irqrestore(&rp->b_lock, flags); } /* * Map ring pages to user space. */ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) { struct mon_reader_bin *rp = vmf->vma->vm_private_data; unsigned long offset, chunk_idx; struct page *pageptr; unsigned long flags; spin_lock_irqsave(&rp->b_lock, flags); offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rp->b_size) { spin_unlock_irqrestore(&rp->b_lock, flags); return VM_FAULT_SIGBUS; } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); vmf->page = pageptr; spin_unlock_irqrestore(&rp->b_lock, flags); return 0; } static const struct vm_operations_struct mon_bin_vm_ops = { .open = mon_bin_vma_open, .close = mon_bin_vma_close, .fault = mon_bin_vma_fault, }; static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) { /* don't do anything here: "fault" will set up page table entries */ vma->vm_ops = &mon_bin_vm_ops; if (vma->vm_flags & VM_WRITE) return -EPERM; vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE); vma->vm_private_data = filp->private_data; mon_bin_vma_open(vma); return 0; } static const struct file_operations mon_fops_binary = { .owner = THIS_MODULE, .open = mon_bin_open, .read = mon_bin_read, /* .write = mon_text_write, */ .poll = mon_bin_poll, .unlocked_ioctl = mon_bin_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = mon_bin_compat_ioctl, #endif .release = mon_bin_release, .mmap = mon_bin_mmap, }; static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) { DECLARE_WAITQUEUE(waita, current); unsigned long flags; add_wait_queue(&rp->b_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&rp->b_lock, flags); while (MON_RING_EMPTY(rp)) { spin_unlock_irqrestore(&rp->b_lock, flags); if (file->f_flags & O_NONBLOCK) { set_current_state(TASK_RUNNING); remove_wait_queue(&rp->b_wait, &waita); return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ } schedule(); if (signal_pending(current)) { remove_wait_queue(&rp->b_wait, &waita); return -EINTR; } set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&rp->b_lock, flags); } spin_unlock_irqrestore(&rp->b_lock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&rp->b_wait, &waita); return 0; } static int mon_alloc_buff(struct mon_pgmap *map, int npages) { int n; unsigned long vaddr; for (n = 0; n < npages; n++) { vaddr = get_zeroed_page(GFP_KERNEL); if (vaddr == 0) { while (n-- != 0) free_page((unsigned long) map[n].ptr); return -ENOMEM; } map[n].ptr = (unsigned char *) vaddr; map[n].pg = virt_to_page((void *) vaddr); } return 0; } static void mon_free_buff(struct mon_pgmap *map, int npages) { int n; for (n = 0; n < npages; n++) free_page((unsigned long) map[n].ptr); } int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) { struct device *dev; unsigned minor = ubus? ubus->busnum: 0; if (minor >= MON_BIN_MAX_MINOR) return 0; dev = device_create(&mon_bin_class, ubus ? ubus->controller : NULL, MKDEV(MAJOR(mon_bin_dev0), minor), NULL, "usbmon%d", minor); if (IS_ERR(dev)) return 0; mbus->classdev = dev; return 1; } void mon_bin_del(struct mon_bus *mbus) { device_destroy(&mon_bin_class, mbus->classdev->devt); } int __init mon_bin_init(void) { int rc; rc = class_register(&mon_bin_class); if (rc) goto err_class; rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); if (rc < 0) goto err_dev; cdev_init(&mon_bin_cdev, &mon_fops_binary); mon_bin_cdev.owner = THIS_MODULE; rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); if (rc < 0) goto err_add; return 0; err_add: unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); err_dev: class_unregister(&mon_bin_class); err_class: return rc; } void mon_bin_exit(void) { cdev_del(&mon_bin_cdev); unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); class_unregister(&mon_bin_class); } |
34 1519 1551 167 172 170 46 154 172 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ #define _NET_BATMAN_ADV_HARD_INTERFACE_H_ #include "main.h" #include <linux/compiler.h> #include <linux/kref.h> #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/stddef.h> #include <linux/types.h> /** * enum batadv_hard_if_state - State of a hard interface */ enum batadv_hard_if_state { /** * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a * batman-adv soft interface */ BATADV_IF_NOT_IN_USE, /** * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft * interface */ BATADV_IF_TO_BE_REMOVED, /** @BATADV_IF_INACTIVE: interface is deactivated */ BATADV_IF_INACTIVE, /** @BATADV_IF_ACTIVE: interface is used */ BATADV_IF_ACTIVE, /** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */ BATADV_IF_TO_BE_ACTIVATED, }; /** * enum batadv_hard_if_bcast - broadcast avoidance options */ enum batadv_hard_if_bcast { /** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */ BATADV_HARDIF_BCAST_OK = 0, /** * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no * recipient */ BATADV_HARDIF_BCAST_NORECIPIENT, /** * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it * from */ BATADV_HARDIF_BCAST_DUPFWD, /** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */ BATADV_HARDIF_BCAST_DUPORIG, }; extern struct notifier_block batadv_hard_if_notifier; struct net_device *batadv_get_real_netdev(struct net_device *net_device); bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface); bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface); struct batadv_hard_iface* batadv_hardif_get_by_netdev(const struct net_device *net_dev); int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, struct net_device *soft_iface); void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface); int batadv_hardif_min_mtu(struct net_device *soft_iface); void batadv_update_min_mtu(struct net_device *soft_iface); void batadv_hardif_release(struct kref *ref); int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, u8 *orig_addr, u8 *orig_neigh); /** * batadv_hardif_put() - decrement the hard interface refcounter and possibly * release it * @hard_iface: the hard interface to free */ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface) { if (!hard_iface) return; kref_put(&hard_iface->refcount, batadv_hardif_release); } /** * batadv_primary_if_get_selected() - Get reference to primary interface * @bat_priv: the bat priv with all the soft interface information * * Return: primary interface (with increased refcnt), otherwise NULL */ static inline struct batadv_hard_iface * batadv_primary_if_get_selected(struct batadv_priv *bat_priv) { struct batadv_hard_iface *hard_iface; rcu_read_lock(); hard_iface = rcu_dereference(bat_priv->primary_if); if (!hard_iface) goto out; if (!kref_get_unless_zero(&hard_iface->refcount)) hard_iface = NULL; out: rcu_read_unlock(); return hard_iface; } #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ |
64 64 64 64 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ip_vs_ftp.c: IPVS ftp application module * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * Changes: * * Most code here is taken from ip_masq_ftp.c in kernel 2.2. The difference * is that ip_vs_ftp module handles the reverse direction to ip_masq_ftp. * * IP_MASQ_FTP ftp masquerading module * * Version: @(#)ip_masq_ftp.c 0.04 02/05/96 * * Author: Wouter Gadeyne */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #include <linux/gfp.h> #include <net/protocol.h> #include <net/tcp.h> #include <linux/unaligned.h> #include <net/ip_vs.h> #define SERVER_STRING_PASV "227 " #define CLIENT_STRING_PORT "PORT" #define SERVER_STRING_EPSV "229 " #define CLIENT_STRING_EPRT "EPRT" enum { IP_VS_FTP_ACTIVE = 0, IP_VS_FTP_PORT = 0, IP_VS_FTP_PASV, IP_VS_FTP_EPRT, IP_VS_FTP_EPSV, }; /* * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper * First port is set to the default port. */ static unsigned int ports_count = 1; static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0}; module_param_array(ports, ushort, &ports_count, 0444); MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands"); static char *ip_vs_ftp_data_ptr(struct sk_buff *skb, struct ip_vs_iphdr *ipvsh) { struct tcphdr *th = (struct tcphdr *)((char *)skb->data + ipvsh->len); if ((th->doff << 2) < sizeof(struct tcphdr)) return NULL; return (char *)th + (th->doff << 2); } static int ip_vs_ftp_init_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) { /* We use connection tracking for the command connection */ cp->flags |= IP_VS_CONN_F_NFCT; return 0; } static int ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp) { return 0; } /* Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started * with the "pattern". <addr,port> is in network order. * Parse extended format depending on ext. In this case addr can be pre-set. */ static int ip_vs_ftp_get_addrport(char *data, char *data_limit, const char *pattern, size_t plen, char skip, bool ext, int mode, union nf_inet_addr *addr, __be16 *port, __u16 af, char **start, char **end) { char *s, c; unsigned char p[6]; char edelim; __u16 hport; int i = 0; if (data_limit - data < plen) { /* check if there is partial match */ if (strncasecmp(data, pattern, data_limit - data) == 0) return -1; else return 0; } if (strncasecmp(data, pattern, plen) != 0) { return 0; } s = data + plen; if (skip) { bool found = false; for (;; s++) { if (s == data_limit) return -1; if (!found) { /* "(" is optional for non-extended format, * so catch the start of IPv4 address */ if (!ext && isdigit(*s)) break; if (*s == skip) found = true; } else if (*s != skip) { break; } } } /* Old IPv4-only format? */ if (!ext) { p[0] = 0; for (data = s; ; data++) { if (data == data_limit) return -1; c = *data; if (isdigit(c)) { p[i] = p[i]*10 + c - '0'; } else if (c == ',' && i < 5) { i++; p[i] = 0; } else { /* unexpected character or terminator */ break; } } if (i != 5) return -1; *start = s; *end = data; addr->ip = get_unaligned((__be32 *) p); *port = get_unaligned((__be16 *) (p + 4)); return 1; } if (s == data_limit) return -1; *start = s; edelim = *s++; if (edelim < 33 || edelim > 126) return -1; if (s == data_limit) return -1; if (*s == edelim) { /* Address family is usually missing for EPSV response */ if (mode != IP_VS_FTP_EPSV) return -1; s++; if (s == data_limit) return -1; /* Then address should be missing too */ if (*s != edelim) return -1; /* Caller can pre-set addr, if needed */ s++; } else { const char *ep; /* We allow address only from same family */ if (af == AF_INET6 && *s != '2') return -1; if (af == AF_INET && *s != '1') return -1; s++; if (s == data_limit) return -1; if (*s != edelim) return -1; s++; if (s == data_limit) return -1; if (af == AF_INET6) { if (in6_pton(s, data_limit - s, (u8 *)addr, edelim, &ep) <= 0) return -1; } else { if (in4_pton(s, data_limit - s, (u8 *)addr, edelim, &ep) <= 0) return -1; } s = (char *) ep; if (s == data_limit) return -1; if (*s != edelim) return -1; s++; } for (hport = 0; ; s++) { if (s == data_limit) return -1; if (!isdigit(*s)) break; hport = hport * 10 + *s - '0'; } if (s == data_limit || !hport || *s != edelim) return -1; s++; *end = s; *port = htons(hport); return 1; } /* Look at outgoing ftp packets to catch the response to a PASV/EPSV command * from the server (inside-to-outside). * When we see one, we build a connection entry with the client address, * client port 0 (unknown at the moment), the server address and the * server port. Mark the current connection entry as a control channel * of the new entry. All this work is just to make the data connection * can be scheduled to the right server later. * * The outgoing packet should be something like * "227 Entering Passive Mode (xxx,xxx,xxx,xxx,ppp,ppp)". * xxx,xxx,xxx,xxx is the server address, ppp,ppp is the server port number. * The extended format for EPSV response provides usually only port: * "229 Entering Extended Passive Mode (|||ppp|)" */ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct sk_buff *skb, int *diff, struct ip_vs_iphdr *ipvsh) { char *data, *data_limit; char *start, *end; union nf_inet_addr from; __be16 port; struct ip_vs_conn *n_cp; char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ unsigned int buf_len; int ret = 0; enum ip_conntrack_info ctinfo; struct nf_conn *ct; *diff = 0; /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; /* Linear packets are much easier to deal with. */ if (skb_ensure_writable(skb, skb->len)) return 0; if (cp->app_data == (void *) IP_VS_FTP_PASV) { data = ip_vs_ftp_data_ptr(skb, ipvsh); data_limit = skb_tail_pointer(skb); if (!data || data >= data_limit) return 1; if (ip_vs_ftp_get_addrport(data, data_limit, SERVER_STRING_PASV, sizeof(SERVER_STRING_PASV)-1, '(', false, IP_VS_FTP_PASV, &from, &port, cp->af, &start, &end) != 1) return 1; IP_VS_DBG(7, "PASV response (%pI4:%u) -> %pI4:%u detected\n", &from.ip, ntohs(port), &cp->caddr.ip, 0); } else if (cp->app_data == (void *) IP_VS_FTP_EPSV) { data = ip_vs_ftp_data_ptr(skb, ipvsh); data_limit = skb_tail_pointer(skb); if (!data || data >= data_limit) return 1; /* Usually, data address is not specified but * we support different address, so pre-set it. */ from = cp->daddr; if (ip_vs_ftp_get_addrport(data, data_limit, SERVER_STRING_EPSV, sizeof(SERVER_STRING_EPSV)-1, '(', true, IP_VS_FTP_EPSV, &from, &port, cp->af, &start, &end) != 1) return 1; IP_VS_DBG_BUF(7, "EPSV response (%s:%u) -> %s:%u detected\n", IP_VS_DBG_ADDR(cp->af, &from), ntohs(port), IP_VS_DBG_ADDR(cp->af, &cp->caddr), 0); } else { return 1; } /* Now update or create a connection entry for it */ { struct ip_vs_conn_param p; ip_vs_conn_fill_param(cp->ipvs, cp->af, ipvsh->protocol, &from, port, &cp->caddr, 0, &p); n_cp = ip_vs_conn_out_get(&p); } if (!n_cp) { struct ip_vs_conn_param p; ip_vs_conn_fill_param(cp->ipvs, cp->af, ipvsh->protocol, &cp->caddr, 0, &cp->vaddr, port, &p); n_cp = ip_vs_conn_new(&p, cp->af, &from, port, IP_VS_CONN_F_NO_CPORT | IP_VS_CONN_F_NFCT, cp->dest, skb->mark); if (!n_cp) return 0; /* add its controller */ ip_vs_control_add(n_cp, cp); } /* Replace the old passive address with the new one */ if (cp->app_data == (void *) IP_VS_FTP_PASV) { from.ip = n_cp->vaddr.ip; port = n_cp->vport; snprintf(buf, sizeof(buf), "%u,%u,%u,%u,%u,%u", ((unsigned char *)&from.ip)[0], ((unsigned char *)&from.ip)[1], ((unsigned char *)&from.ip)[2], ((unsigned char *)&from.ip)[3], ntohs(port) >> 8, ntohs(port) & 0xFF); } else if (cp->app_data == (void *) IP_VS_FTP_EPSV) { from = n_cp->vaddr; port = n_cp->vport; /* Only port, client will use VIP for the data connection */ snprintf(buf, sizeof(buf), "|||%u|", ntohs(port)); } else { *buf = 0; } buf_len = strlen(buf); ct = nf_ct_get(skb, &ctinfo); if (ct) { bool mangled; /* If mangling fails this function will return 0 * which will cause the packet to be dropped. * Mangling can only fail under memory pressure, * hopefully it will succeed on the retransmitted * packet. */ mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, ipvsh->len, start - data, end - start, buf, buf_len); if (mangled) { ip_vs_nfct_expect_related(skb, ct, n_cp, ipvsh->protocol, 0, 0); if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_UNNECESSARY; /* csum is updated */ ret = 1; } } /* Not setting 'diff' is intentional, otherwise the sequence * would be adjusted twice. */ cp->app_data = (void *) IP_VS_FTP_ACTIVE; ip_vs_tcp_conn_listen(n_cp); ip_vs_conn_put(n_cp); return ret; } /* Look at incoming ftp packets to catch the PASV/PORT/EPRT/EPSV command * (outside-to-inside). * * The incoming packet having the PORT command should be something like * "PORT xxx,xxx,xxx,xxx,ppp,ppp\n". * xxx,xxx,xxx,xxx is the client address, ppp,ppp is the client port number. * In this case, we create a connection entry using the client address and * port, so that the active ftp data connection from the server can reach * the client. * Extended format: * "EPSV\r\n" when client requests server address from same family * "EPSV 1\r\n" when client requests IPv4 server address * "EPSV 2\r\n" when client requests IPv6 server address * "EPSV ALL\r\n" - not supported * EPRT with specified delimiter (ASCII 33..126), "|" by default: * "EPRT |1|IPv4ADDR|PORT|\r\n" when client provides IPv4 addrport * "EPRT |2|IPv6ADDR|PORT|\r\n" when client provides IPv6 addrport */ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, struct sk_buff *skb, int *diff, struct ip_vs_iphdr *ipvsh) { char *data, *data_start, *data_limit; char *start, *end; union nf_inet_addr to; __be16 port; struct ip_vs_conn *n_cp; /* no diff required for incoming packets */ *diff = 0; /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; /* Linear packets are much easier to deal with. */ if (skb_ensure_writable(skb, skb->len)) return 0; data = data_start = ip_vs_ftp_data_ptr(skb, ipvsh); data_limit = skb_tail_pointer(skb); if (!data || data >= data_limit) return 1; while (data <= data_limit - 6) { if (cp->af == AF_INET && strncasecmp(data, "PASV\r\n", 6) == 0) { /* Passive mode on */ IP_VS_DBG(7, "got PASV at %td of %td\n", data - data_start, data_limit - data_start); cp->app_data = (void *) IP_VS_FTP_PASV; return 1; } /* EPSV or EPSV<space><net-prt> */ if (strncasecmp(data, "EPSV", 4) == 0 && (data[4] == ' ' || data[4] == '\r')) { if (data[4] == ' ') { char proto = data[5]; if (data > data_limit - 7 || data[6] != '\r') return 1; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6 && proto == '2') { } else #endif if (cp->af == AF_INET && proto == '1') { } else { return 1; } } /* Extended Passive mode on */ IP_VS_DBG(7, "got EPSV at %td of %td\n", data - data_start, data_limit - data_start); cp->app_data = (void *) IP_VS_FTP_EPSV; return 1; } data++; } /* * To support virtual FTP server, the scenerio is as follows: * FTP client ----> Load Balancer ----> FTP server * First detect the port number in the application data, * then create a new connection entry for the coming data * connection. */ if (cp->af == AF_INET && ip_vs_ftp_get_addrport(data_start, data_limit, CLIENT_STRING_PORT, sizeof(CLIENT_STRING_PORT)-1, ' ', false, IP_VS_FTP_PORT, &to, &port, cp->af, &start, &end) == 1) { IP_VS_DBG(7, "PORT %pI4:%u detected\n", &to.ip, ntohs(port)); /* Now update or create a connection entry for it */ IP_VS_DBG(7, "protocol %s %pI4:%u %pI4:%u\n", ip_vs_proto_name(ipvsh->protocol), &to.ip, ntohs(port), &cp->vaddr.ip, ntohs(cp->vport)-1); } else if (ip_vs_ftp_get_addrport(data_start, data_limit, CLIENT_STRING_EPRT, sizeof(CLIENT_STRING_EPRT)-1, ' ', true, IP_VS_FTP_EPRT, &to, &port, cp->af, &start, &end) == 1) { IP_VS_DBG_BUF(7, "EPRT %s:%u detected\n", IP_VS_DBG_ADDR(cp->af, &to), ntohs(port)); /* Now update or create a connection entry for it */ IP_VS_DBG_BUF(7, "protocol %s %s:%u %s:%u\n", ip_vs_proto_name(ipvsh->protocol), IP_VS_DBG_ADDR(cp->af, &to), ntohs(port), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport)-1); } else { return 1; } /* Passive mode off */ cp->app_data = (void *) IP_VS_FTP_ACTIVE; { struct ip_vs_conn_param p; ip_vs_conn_fill_param(cp->ipvs, cp->af, ipvsh->protocol, &to, port, &cp->vaddr, htons(ntohs(cp->vport)-1), &p); n_cp = ip_vs_conn_in_get(&p); if (!n_cp) { n_cp = ip_vs_conn_new(&p, cp->af, &cp->daddr, htons(ntohs(cp->dport)-1), IP_VS_CONN_F_NFCT, cp->dest, skb->mark); if (!n_cp) return 0; /* add its controller */ ip_vs_control_add(n_cp, cp); } } /* * Move tunnel to listen state */ ip_vs_tcp_conn_listen(n_cp); ip_vs_conn_put(n_cp); return 1; } static struct ip_vs_app ip_vs_ftp = { .name = "ftp", .type = IP_VS_APP_TYPE_FTP, .protocol = IPPROTO_TCP, .module = THIS_MODULE, .incs_list = LIST_HEAD_INIT(ip_vs_ftp.incs_list), .init_conn = ip_vs_ftp_init_conn, .done_conn = ip_vs_ftp_done_conn, .bind_conn = NULL, .unbind_conn = NULL, .pkt_out = ip_vs_ftp_out, .pkt_in = ip_vs_ftp_in, }; /* * per netns ip_vs_ftp initialization */ static int __net_init __ip_vs_ftp_init(struct net *net) { int i, ret; struct ip_vs_app *app; struct netns_ipvs *ipvs = net_ipvs(net); if (!ipvs) return -ENOENT; app = register_ip_vs_app(ipvs, &ip_vs_ftp); if (IS_ERR(app)) return PTR_ERR(app); for (i = 0; i < ports_count; i++) { if (!ports[i]) continue; ret = register_ip_vs_app_inc(ipvs, app, app->protocol, ports[i]); if (ret) goto err_unreg; } return 0; err_unreg: unregister_ip_vs_app(ipvs, &ip_vs_ftp); return ret; } /* * netns exit */ static void __ip_vs_ftp_exit(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); if (!ipvs) return; unregister_ip_vs_app(ipvs, &ip_vs_ftp); } static struct pernet_operations ip_vs_ftp_ops = { .init = __ip_vs_ftp_init, .exit = __ip_vs_ftp_exit, }; static int __init ip_vs_ftp_init(void) { /* rcu_barrier() is called by netns on error */ return register_pernet_subsys(&ip_vs_ftp_ops); } /* * ip_vs_ftp finish. */ static void __exit ip_vs_ftp_exit(void) { unregister_pernet_subsys(&ip_vs_ftp_ops); /* rcu_barrier() is called by netns */ } module_init(ip_vs_ftp_init); module_exit(ip_vs_ftp_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ipvs ftp helper"); |
3573 93 32 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 | // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell */ #ifndef __USB_CORE_HCD_H #define __USB_CORE_HCD_H #ifdef __KERNEL__ #include <linux/rwsem.h> #include <linux/interrupt.h> #include <linux/idr.h> #define MAX_TOPO_LEVEL 6 /* This file contains declarations of usbcore internals that are mostly * used or exposed by Host Controller Drivers. */ /* * USB Packet IDs (PIDs) */ #define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */ #define USB_PID_OUT 0xe1 #define USB_PID_ACK 0xd2 #define USB_PID_DATA0 0xc3 #define USB_PID_PING 0xb4 /* USB 2.0 */ #define USB_PID_SOF 0xa5 #define USB_PID_NYET 0x96 /* USB 2.0 */ #define USB_PID_DATA2 0x87 /* USB 2.0 */ #define USB_PID_SPLIT 0x78 /* USB 2.0 */ #define USB_PID_IN 0x69 #define USB_PID_NAK 0x5a #define USB_PID_DATA1 0x4b #define USB_PID_PREAMBLE 0x3c /* Token mode */ #define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */ #define USB_PID_SETUP 0x2d #define USB_PID_STALL 0x1e #define USB_PID_MDATA 0x0f /* USB 2.0 */ /*-------------------------------------------------------------------------*/ /* * USB Host Controller Driver (usb_hcd) framework * * Since "struct usb_bus" is so thin, you can't share much code in it. * This framework is a layer over that, and should be more shareable. */ /*-------------------------------------------------------------------------*/ struct giveback_urb_bh { bool running; bool high_prio; spinlock_t lock; struct list_head head; struct work_struct bh; struct usb_host_endpoint *completing_ep; }; enum usb_dev_authorize_policy { USB_DEVICE_AUTHORIZE_NONE = 0, USB_DEVICE_AUTHORIZE_ALL = 1, USB_DEVICE_AUTHORIZE_INTERNAL = 2, }; struct usb_hcd { /* * housekeeping */ struct usb_bus self; /* hcd is-a bus */ struct kref kref; /* reference counter */ const char *product_desc; /* product/vendor string */ int speed; /* Speed for this roothub. * May be different from * hcd->driver->flags & HCD_MASK */ char irq_descr[24]; /* driver + bus # */ struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ #ifdef CONFIG_PM struct work_struct wakeup_work; /* for remote wakeup */ #endif struct work_struct died_work; /* for when the device dies */ /* * hardware info/state */ const struct hc_driver *driver; /* hw-specific hooks */ /* * OTG and some Host controllers need software interaction with phys; * other external phys should be software-transparent */ struct usb_phy *usb_phy; struct usb_phy_roothub *phy_roothub; /* Flags that need to be manipulated atomically because they can * change while the host controller is running. Always use * set_bit() or clear_bit() to change their values. */ unsigned long flags; #define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */ #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ #define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). */ #define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE)) #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) #define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER)) /* * Specifies if interfaces are authorized by default * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/interface_authorized_default */ #define HCD_INTF_AUTHORIZED(hcd) \ ((hcd)->flags & (1U << HCD_FLAG_INTF_AUTHORIZED)) /* * Specifies if devices are authorized by default * or they require explicit user space authorization; this bit is * settable through /sys/class/usb_host/X/authorized_default */ enum usb_dev_authorize_policy dev_policy; /* Flags that get set only during HCD registration or removal. */ unsigned rh_registered:1;/* is root hub registered? */ unsigned rh_pollable:1; /* may we poll the root hub? */ unsigned msix_enabled:1; /* driver has MSI-X enabled? */ unsigned msi_enabled:1; /* driver has MSI enabled? */ /* * do not manage the PHY state in the HCD core, instead let the driver * handle this (for example if the PHY can only be turned on after a * specific event) */ unsigned skip_phy_initialization:1; /* The next flag is a stopgap, to be removed when all the HCDs * support the new root-hub polling mechanism. */ unsigned uses_new_polling:1; unsigned has_tt:1; /* Integrated TT in root hub */ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ unsigned can_do_streams:1; /* HC supports streams */ unsigned tpl_support:1; /* OTG & EH TPL support */ unsigned cant_recv_wakeups:1; /* wakeup requests from downstream aren't received */ unsigned int irq; /* irq allocated */ void __iomem *regs; /* device memory/io */ resource_size_t rsrc_start; /* memory/io resource start */ resource_size_t rsrc_len; /* memory/io resource length */ unsigned power_budget; /* in mA, 0 = no limit */ struct giveback_urb_bh high_prio_bh; struct giveback_urb_bh low_prio_bh; /* bandwidth_mutex should be taken before adding or removing * any new bus bandwidth constraints: * 1. Before adding a configuration for a new device. * 2. Before removing the configuration to put the device into * the addressed state. * 3. Before selecting a different configuration. * 4. Before selecting an alternate interface setting. * * bandwidth_mutex should be dropped after a successful control message * to the device, or resetting the bandwidth after a failed attempt. */ struct mutex *address0_mutex; struct mutex *bandwidth_mutex; struct usb_hcd *shared_hcd; struct usb_hcd *primary_hcd; #define HCD_BUFFER_POOLS 4 struct dma_pool *pool[HCD_BUFFER_POOLS]; int state; # define __ACTIVE 0x01 # define __SUSPEND 0x04 # define __TRANSIENT 0x80 # define HC_STATE_HALT 0 # define HC_STATE_RUNNING (__ACTIVE) # define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE) # define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT) # define HC_STATE_SUSPENDED (__SUSPEND) #define HC_IS_RUNNING(state) ((state) & __ACTIVE) #define HC_IS_SUSPENDED(state) ((state) & __SUSPEND) /* memory pool for HCs having local memory, or %NULL */ struct gen_pool *localmem_pool; /* more shared queuing code would be good; it should support * smarter scheduling, handle transaction translators, etc; * input size of periodic table to an interrupt scheduler. * (ohci 32, uhci 1024, ehci 256/512/1024). */ /* The HC driver's private data is stored at the end of * this structure. */ unsigned long hcd_priv[] __attribute__ ((aligned(sizeof(s64)))); }; /* 2.4 does this a bit differently ... */ static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd) { return &hcd->self; } static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus) { return container_of(bus, struct usb_hcd, self); } /*-------------------------------------------------------------------------*/ struct hc_driver { const char *description; /* "ehci-hcd" etc */ const char *product_desc; /* product/vendor string */ size_t hcd_priv_size; /* size of private data */ /* irq handler */ irqreturn_t (*irq) (struct usb_hcd *hcd); int flags; #define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */ #define HCD_DMA 0x0002 /* HC uses DMA */ #define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */ #define HCD_USB11 0x0010 /* USB 1.1 */ #define HCD_USB2 0x0020 /* USB 2.0 */ #define HCD_USB3 0x0040 /* USB 3.0 */ #define HCD_USB31 0x0050 /* USB 3.1 */ #define HCD_USB32 0x0060 /* USB 3.2 */ #define HCD_MASK 0x0070 #define HCD_BH 0x0100 /* URB complete in BH context */ /* called to init HCD and root hub */ int (*reset) (struct usb_hcd *hcd); int (*start) (struct usb_hcd *hcd); /* NOTE: these suspend/resume calls relate to the HC as * a whole, not just the root hub; they're for PCI bus glue. */ /* called after suspending the hub, before entering D3 etc */ int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup); /* called after entering D0 (etc), before resuming the hub */ int (*pci_resume)(struct usb_hcd *hcd, pm_message_t state); /* called just before hibernate final D3 state, allows host to poweroff parts */ int (*pci_poweroff_late)(struct usb_hcd *hcd, bool do_wakeup); /* cleanly make HCD stop writing memory and doing I/O */ void (*stop) (struct usb_hcd *hcd); /* shutdown HCD */ void (*shutdown) (struct usb_hcd *hcd); /* return current frame number */ int (*get_frame_number) (struct usb_hcd *hcd); /* manage i/o requests, device state */ int (*urb_enqueue)(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); int (*urb_dequeue)(struct usb_hcd *hcd, struct urb *urb, int status); /* * (optional) these hooks allow an HCD to override the default DMA * mapping and unmapping routines. In general, they shouldn't be * necessary unless the host controller has special DMA requirements, * such as alignment constraints. If these are not specified, the * general usb_hcd_(un)?map_urb_for_dma functions will be used instead * (and it may be a good idea to call these functions in your HCD * implementation) */ int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb); /* hw synch, freeing endpoint resources that urb_dequeue can't */ void (*endpoint_disable)(struct usb_hcd *hcd, struct usb_host_endpoint *ep); /* (optional) reset any endpoint state such as sequence number and current window */ void (*endpoint_reset)(struct usb_hcd *hcd, struct usb_host_endpoint *ep); /* root hub support */ int (*hub_status_data) (struct usb_hcd *hcd, char *buf); int (*hub_control) (struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength); int (*bus_suspend)(struct usb_hcd *); int (*bus_resume)(struct usb_hcd *); int (*start_port_reset)(struct usb_hcd *, unsigned port_num); unsigned long (*get_resuming_ports)(struct usb_hcd *); /* force handover of high-speed port to full-speed companion */ void (*relinquish_port)(struct usb_hcd *, int); /* has a port been handed over to a companion? */ int (*port_handed_over)(struct usb_hcd *, int); /* CLEAR_TT_BUFFER completion callback */ void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *); /* xHCI specific functions */ /* Called by usb_alloc_dev to alloc HC device structures */ int (*alloc_dev)(struct usb_hcd *, struct usb_device *); /* Called by usb_disconnect to free HC device structures */ void (*free_dev)(struct usb_hcd *, struct usb_device *); /* Change a group of bulk endpoints to support multiple stream IDs */ int (*alloc_streams)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, unsigned int num_streams, gfp_t mem_flags); /* Reverts a group of bulk endpoints back to not using stream IDs. * Can fail if we run out of memory. */ int (*free_streams)(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint **eps, unsigned int num_eps, gfp_t mem_flags); /* Bandwidth computation functions */ /* Note that add_endpoint() can only be called once per endpoint before * check_bandwidth() or reset_bandwidth() must be called. * drop_endpoint() can only be called once per endpoint also. * A call to xhci_drop_endpoint() followed by a call to * xhci_add_endpoint() will add the endpoint to the schedule with * possibly new parameters denoted by a different endpoint descriptor * in usb_host_endpoint. A call to xhci_add_endpoint() followed by a * call to xhci_drop_endpoint() is not allowed. */ /* Allocate endpoint resources and add them to a new schedule */ int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); /* Drop an endpoint from a new schedule */ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *); /* Check that a new hardware configuration, set using * endpoint_enable and endpoint_disable, does not exceed bus * bandwidth. This must be called before any set configuration * or set interface requests are sent to the device. */ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); /* Reset the device schedule to the last known good schedule, * which was set from a previous successful call to * check_bandwidth(). This reverts any add_endpoint() and * drop_endpoint() calls since that last successful call. * Used for when a check_bandwidth() call fails due to resource * or bandwidth constraints. */ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); /* Set the hardware-chosen device address */ int (*address_device)(struct usb_hcd *, struct usb_device *udev, unsigned int timeout_ms); /* prepares the hardware to send commands to the device */ int (*enable_device)(struct usb_hcd *, struct usb_device *udev); /* Notifies the HCD after a hub descriptor is fetched. * Will block. */ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); int (*reset_device)(struct usb_hcd *, struct usb_device *); /* Notifies the HCD after a device is connected and its * address is set */ int (*update_device)(struct usb_hcd *, struct usb_device *); int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int); /* USB 3.0 Link Power Management */ /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); /* The xHCI host controller can still fail the command to * disable the LPM timeouts, so this can return an error code. */ int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state state); int (*find_raw_port_number)(struct usb_hcd *, int); /* Call for power on/off the port if necessary */ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable); /* Call for SINGLE_STEP_SET_FEATURE Test for USB2 EH certification */ #define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 int (*submit_single_step_set_feature)(struct usb_hcd *, struct urb *, int); }; static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) { return hcd->driver->flags & HCD_BH; } static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd, struct usb_host_endpoint *ep) { return hcd->high_prio_bh.completing_ep == ep; } static inline bool hcd_uses_dma(struct usb_hcd *hcd) { return IS_ENABLED(CONFIG_HAS_DMA) && (hcd->driver->flags & HCD_DMA); } extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, int status); extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb); extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags); extern int usb_hcd_unlink_urb(struct urb *urb, int status); extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status); extern int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *); extern void usb_hcd_flush_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_disable_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_reset_endpoint(struct usb_device *udev, struct usb_host_endpoint *ep); extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); extern int usb_hcd_alloc_bandwidth(struct usb_device *udev, struct usb_host_config *new_config, struct usb_host_interface *old_alt, struct usb_host_interface *new_alt); extern int usb_hcd_get_frame_number(struct usb_device *udev); struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, struct device *sysdev, struct device *dev, const char *bus_name, struct usb_hcd *primary_hcd); extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name); extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name, struct usb_hcd *shared_hcd); extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd); extern void usb_put_hcd(struct usb_hcd *hcd); extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd); extern int usb_add_hcd(struct usb_hcd *hcd, unsigned int irqnum, unsigned long irqflags); extern void usb_remove_hcd(struct usb_hcd *hcd); extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, dma_addr_t dma, size_t size); struct platform_device; extern void usb_hcd_platform_shutdown(struct platform_device *dev); #ifdef CONFIG_USB_HCD_TEST_MODE extern int ehset_single_step_set_feature(struct usb_hcd *hcd, int port); #else static inline int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) { return 0; } #endif /* CONFIG_USB_HCD_TEST_MODE */ #ifdef CONFIG_USB_PCI struct pci_dev; struct pci_device_id; extern int usb_hcd_pci_probe(struct pci_dev *dev, const struct hc_driver *driver); extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); #ifdef CONFIG_USB_PCI_AMD extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, const struct hc_driver *driver) { if (!usb_hcd_amd_remote_wakeup_quirk(dev)) return false; if (driver->flags & (HCD_USB11 | HCD_USB3)) return true; return false; } #else /* CONFIG_USB_PCI_AMD */ static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev, const struct hc_driver *driver) { return false; } #endif extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif /* CONFIG_USB_PCI */ /* pci-ish (pdev null is ok) buffer alloc/mapping support */ void usb_init_pool_max(void); int hcd_buffer_create(struct usb_hcd *hcd); void hcd_buffer_destroy(struct usb_hcd *hcd); void *hcd_buffer_alloc(struct usb_bus *bus, size_t size, gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, size_t size, gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free_pages(struct usb_hcd *hcd, size_t size, void *addr, dma_addr_t dma); /* generic bus glue, needed for host controllers that don't use PCI */ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); extern void usb_hc_died(struct usb_hcd *hcd); extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); extern void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum); extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) #define usb_settoggle(dev, ep, out, bit) \ ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ ((bit) << (ep))) /* -------------------------------------------------------------------------- */ /* Enumeration is only for the hub driver, or HCD virtual root hubs */ extern struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *, unsigned port); extern int usb_new_device(struct usb_device *dev); extern void usb_disconnect(struct usb_device **); extern int usb_get_configuration(struct usb_device *dev); extern void usb_destroy_configuration(struct usb_device *dev); /*-------------------------------------------------------------------------*/ /* * HCD Root Hub support */ #include <linux/usb/ch11.h> /* * As of USB 2.0, full/low speed devices are segregated into trees. * One type grows from USB 1.1 host controllers (OHCI, UHCI etc). * The other type grows from high speed hubs when they connect to * full/low speed devices using "Transaction Translators" (TTs). * * TTs should only be known to the hub driver, and high speed bus * drivers (only EHCI for now). They affect periodic scheduling and * sometimes control/bulk error recovery. */ struct usb_device; struct usb_tt { struct usb_device *hub; /* upstream highspeed hub */ int multi; /* true means one TT per port */ unsigned think_time; /* think time in ns */ void *hcpriv; /* HCD private data */ /* for control/bulk error recovery (CLEAR_TT_BUFFER) */ spinlock_t lock; struct list_head clear_list; /* of usb_tt_clear */ struct work_struct clear_work; }; struct usb_tt_clear { struct list_head clear_list; unsigned tt; u16 devinfo; struct usb_hcd *hcd; struct usb_host_endpoint *ep; }; extern int usb_hub_clear_tt_buffer(struct urb *urb); extern void usb_ep0_reinit(struct usb_device *); /* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */ #define DeviceRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define DeviceOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8) #define InterfaceRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) #define EndpointOutRequest \ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ #define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) /* GetBusState and SetHubDescriptor are optional, omitted */ #define ClearHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE) #define ClearPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE) #define GetHubDescriptor HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR) #define GetHubStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS) #define GetPortStatus HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS) #define SetHubFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE) #define SetPortFeature HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE) #define ClearTTBuffer HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_CLEAR_TT_BUFFER) #define ResetTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_RESET_TT) #define GetTTState HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_TT_STATE) #define StopTT HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, HUB_STOP_TT) /*-------------------------------------------------------------------------*/ /* class requests from USB 3.1 hub spec, table 10-7 */ #define SetHubDepth HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH) #define GetPortErrorCount HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT) /* * Generic bandwidth allocation constants/support */ #define FRAME_TIME_USECS 1000L #define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */ /* Trying not to use worst-case bit-stuffing * of (7/6 * 8 * bytecount) = 9.33 * bytecount */ /* bytecount = data payload byte count */ #define NS_TO_US(ns) DIV_ROUND_UP(ns, 1000L) /* convert nanoseconds to microseconds, rounding up */ /* * Full/low speed bandwidth allocation constants/support. */ #define BW_HOST_DELAY 1000L /* nanoseconds */ #define BW_HUB_LS_SETUP 333L /* nanoseconds */ /* 4 full-speed bit times (est.) */ #define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */ #define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L) #define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L) /* * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed * to preallocate bandwidth) */ #define USB2_HOST_DELAY 5 /* nsec, guess */ #define HS_NSECS(bytes) (((55 * 8 * 2083) \ + (2083UL * (3 + BitTime(bytes))))/1000 \ + USB2_HOST_DELAY) #define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \ + (2083UL * (3 + BitTime(bytes))))/1000 \ + USB2_HOST_DELAY) #define HS_USECS(bytes) NS_TO_US(HS_NSECS(bytes)) #define HS_USECS_ISO(bytes) NS_TO_US(HS_NSECS_ISO(bytes)) extern long usb_calc_bus_time(int speed, int is_input, int isoc, int bytecount); /*-------------------------------------------------------------------------*/ extern void usb_set_device_state(struct usb_device *udev, enum usb_device_state new_state); /*-------------------------------------------------------------------------*/ /* exported only within usbcore */ extern struct idr usb_bus_idr; extern struct mutex usb_bus_idr_lock; extern wait_queue_head_t usb_kill_urb_queue; #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) #ifdef CONFIG_PM extern unsigned usb_wakeup_enabled_descendants(struct usb_device *udev); extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline unsigned usb_wakeup_enabled_descendants(struct usb_device *udev) { return 0; } static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct usb_mon_operations { void (*urb_submit)(struct usb_bus *bus, struct urb *urb); void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err); void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status); /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ }; extern const struct usb_mon_operations *mon_ops; static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) { if (bus->monitored) (*mon_ops->urb_submit)(bus, urb); } static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, int error) { if (bus->monitored) (*mon_ops->urb_submit_error)(bus, urb, error); } static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, int status) { if (bus->monitored) (*mon_ops->urb_complete)(bus, urb, status); } int usb_mon_register(const struct usb_mon_operations *ops); void usb_mon_deregister(void); #else static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {} static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb, int error) {} static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb, int status) {} #endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */ /*-------------------------------------------------------------------------*/ /* random stuff */ /* This rwsem is for use only by the hub driver and ehci-hcd. * Nobody else should touch it. */ extern struct rw_semaphore ehci_cf_port_reset_rwsem; /* Keep track of which host controller drivers are loaded */ #define USB_UHCI_LOADED 0 #define USB_OHCI_LOADED 1 #define USB_EHCI_LOADED 2 extern unsigned long usb_hcds_loaded; #endif /* __KERNEL__ */ #endif /* __USB_CORE_HCD_H */ |
4 12 18 18 8 18 22 22 32 32 31 16 32 24 24 21 24 24 2 2 2 26 2 2 2 2 2 2 1 1 2 2 2 1 1 3 3 3 2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 | // SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> */ #include <net/inet_dscp.h> #include <net/ip.h> #include "ipvlan.h" static u32 ipvlan_jhash_secret __read_mostly; void ipvlan_init_secret(void) { net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); } void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast) { if (likely(success)) { struct ipvl_pcpu_stats *pcptr; pcptr = this_cpu_ptr(ipvlan->pcpu_stats); u64_stats_update_begin(&pcptr->syncp); u64_stats_inc(&pcptr->rx_pkts); u64_stats_add(&pcptr->rx_bytes, len); if (mcast) u64_stats_inc(&pcptr->rx_mcast); u64_stats_update_end(&pcptr->syncp); } else { this_cpu_inc(ipvlan->pcpu_stats->rx_errs); } } EXPORT_SYMBOL_GPL(ipvlan_count_rx); #if IS_ENABLED(CONFIG_IPV6) static u8 ipvlan_get_v6_hash(const void *iaddr) { const struct in6_addr *ip6_addr = iaddr; return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & IPVLAN_HASH_MASK; } #else static u8 ipvlan_get_v6_hash(const void *iaddr) { return 0; } #endif static u8 ipvlan_get_v4_hash(const void *iaddr) { const struct in_addr *ip4_addr = iaddr; return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) & IPVLAN_HASH_MASK; } static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr) { if (!is_v6 && addr->atype == IPVL_IPV4) { struct in_addr *i4addr = (struct in_addr *)iaddr; return addr->ip4addr.s_addr == i4addr->s_addr; #if IS_ENABLED(CONFIG_IPV6) } else if (is_v6 && addr->atype == IPVL_IPV6) { struct in6_addr *i6addr = (struct in6_addr *)iaddr; return ipv6_addr_equal(&addr->ip6addr, i6addr); #endif } return false; } static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, const void *iaddr, bool is_v6) { struct ipvl_addr *addr; u8 hash; hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : ipvlan_get_v4_hash(iaddr); hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) if (addr_equal(is_v6, addr, iaddr)) return addr; return NULL; } void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) { struct ipvl_port *port = ipvlan->port; u8 hash; hash = (addr->atype == IPVL_IPV6) ? ipvlan_get_v6_hash(&addr->ip6addr) : ipvlan_get_v4_hash(&addr->ip4addr); if (hlist_unhashed(&addr->hlnode)) hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); } void ipvlan_ht_addr_del(struct ipvl_addr *addr) { hlist_del_init_rcu(&addr->hlnode); } struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, const void *iaddr, bool is_v6) { struct ipvl_addr *addr, *ret = NULL; rcu_read_lock(); list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) { if (addr_equal(is_v6, addr, iaddr)) { ret = addr; break; } } rcu_read_unlock(); return ret; } bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) { struct ipvl_dev *ipvlan; bool ret = false; rcu_read_lock(); list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) { ret = true; break; } } rcu_read_unlock(); return ret; } void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) { void *lyr3h = NULL; switch (skb->protocol) { case htons(ETH_P_ARP): { struct arphdr *arph; if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev)))) return NULL; arph = arp_hdr(skb); *type = IPVL_ARP; lyr3h = arph; break; } case htons(ETH_P_IP): { u32 pktlen; struct iphdr *ip4h; if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) return NULL; ip4h = ip_hdr(skb); pktlen = skb_ip_totlen(skb); if (ip4h->ihl < 5 || ip4h->version != 4) return NULL; if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) return NULL; *type = IPVL_IPV4; lyr3h = ip4h; break; } #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): { struct ipv6hdr *ip6h; if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) return NULL; ip6h = ipv6_hdr(skb); if (ip6h->version != 6) return NULL; *type = IPVL_IPV6; lyr3h = ip6h; /* Only Neighbour Solicitation pkts need different treatment */ if (ipv6_addr_any(&ip6h->saddr) && ip6h->nexthdr == NEXTHDR_ICMP) { struct icmp6hdr *icmph; if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)))) return NULL; ip6h = ipv6_hdr(skb); icmph = (struct icmp6hdr *)(ip6h + 1); if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { /* Need to access the ipv6 address in body */ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph) + sizeof(struct in6_addr)))) return NULL; ip6h = ipv6_hdr(skb); icmph = (struct icmp6hdr *)(ip6h + 1); } *type = IPVL_ICMPV6; lyr3h = icmph; } break; } #endif default: return NULL; } return lyr3h; } unsigned int ipvlan_mac_hash(const unsigned char *addr) { u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2), ipvlan_jhash_secret); return hash & IPVLAN_MAC_FILTER_MASK; } void ipvlan_process_multicast(struct work_struct *work) { struct ipvl_port *port = container_of(work, struct ipvl_port, wq); struct ethhdr *ethh; struct ipvl_dev *ipvlan; struct sk_buff *skb, *nskb; struct sk_buff_head list; unsigned int len; unsigned int mac_hash; int ret; u8 pkt_type; bool tx_pkt; __skb_queue_head_init(&list); spin_lock_bh(&port->backlog.lock); skb_queue_splice_tail_init(&port->backlog, &list); spin_unlock_bh(&port->backlog.lock); while ((skb = __skb_dequeue(&list)) != NULL) { struct net_device *dev = skb->dev; bool consumed = false; ethh = eth_hdr(skb); tx_pkt = IPVL_SKB_CB(skb)->tx_pkt; mac_hash = ipvlan_mac_hash(ethh->h_dest); if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) pkt_type = PACKET_BROADCAST; else pkt_type = PACKET_MULTICAST; rcu_read_lock(); list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { if (tx_pkt && (ipvlan->dev == skb->dev)) continue; if (!test_bit(mac_hash, ipvlan->mac_filters)) continue; if (!(ipvlan->dev->flags & IFF_UP)) continue; ret = NET_RX_DROP; len = skb->len + ETH_HLEN; nskb = skb_clone(skb, GFP_ATOMIC); local_bh_disable(); if (nskb) { consumed = true; nskb->pkt_type = pkt_type; nskb->dev = ipvlan->dev; if (tx_pkt) ret = dev_forward_skb(ipvlan->dev, nskb); else ret = netif_rx(nskb); } ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); local_bh_enable(); } rcu_read_unlock(); if (tx_pkt) { /* If the packet originated here, send it out. */ skb->dev = port->dev; skb->pkt_type = pkt_type; dev_queue_xmit(skb); } else { if (consumed) consume_skb(skb); else kfree_skb(skb); } dev_put(dev); cond_resched(); } } static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev) { bool xnet = true; if (dev) xnet = !net_eq(dev_net(skb->dev), dev_net(dev)); skb_scrub_packet(skb, xnet); if (dev) skb->dev = dev; } static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, bool local) { struct ipvl_dev *ipvlan = addr->master; struct net_device *dev = ipvlan->dev; unsigned int len; rx_handler_result_t ret = RX_HANDLER_CONSUMED; bool success = false; struct sk_buff *skb = *pskb; len = skb->len + ETH_HLEN; /* Only packets exchanged between two local slaves need to have * device-up check as well as skb-share check. */ if (local) { if (unlikely(!(dev->flags & IFF_UP))) { kfree_skb(skb); goto out; } skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; *pskb = skb; } if (local) { skb->pkt_type = PACKET_HOST; if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { skb->dev = dev; ret = RX_HANDLER_ANOTHER; success = true; } out: ipvlan_count_rx(ipvlan, len, success, false); return ret; } struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, int addr_type, bool use_dest) { struct ipvl_addr *addr = NULL; switch (addr_type) { #if IS_ENABLED(CONFIG_IPV6) case IPVL_IPV6: { struct ipv6hdr *ip6h; struct in6_addr *i6addr; ip6h = (struct ipv6hdr *)lyr3h; i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; addr = ipvlan_ht_addr_lookup(port, i6addr, true); break; } case IPVL_ICMPV6: { struct nd_msg *ndmh; struct in6_addr *i6addr; /* Make sure that the NeighborSolicitation ICMPv6 packets * are handled to avoid DAD issue. */ ndmh = (struct nd_msg *)lyr3h; if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { i6addr = &ndmh->target; addr = ipvlan_ht_addr_lookup(port, i6addr, true); } break; } #endif case IPVL_IPV4: { struct iphdr *ip4h; __be32 *i4addr; ip4h = (struct iphdr *)lyr3h; i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; addr = ipvlan_ht_addr_lookup(port, i4addr, false); break; } case IPVL_ARP: { struct arphdr *arph; unsigned char *arp_ptr; __be32 dip; arph = (struct arphdr *)lyr3h; arp_ptr = (unsigned char *)(arph + 1); if (use_dest) arp_ptr += (2 * port->dev->addr_len) + 4; else arp_ptr += port->dev->addr_len; memcpy(&dip, arp_ptr, 4); addr = ipvlan_ht_addr_lookup(port, &dip, false); break; } } return addr; } static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; struct net *net = dev_net(dev); struct rtable *rt; int err, ret = NET_XMIT_DROP; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)), .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, .daddr = ip4h->daddr, .saddr = ip4h->saddr, }; rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { ip_rt_put(rt); goto err; } skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); err = ip_local_out(net, NULL, skb); if (unlikely(net_xmit_eval(err))) DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out; err: DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out: return ret; } #if IS_ENABLED(CONFIG_IPV6) static noinline_for_stack int ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct flowi6 fl6 = { .flowi6_oif = dev->ifindex, .daddr = ip6h->daddr, .saddr = ip6h->saddr, .flowi6_flags = FLOWI_FLAG_ANYSRC, .flowlabel = ip6_flowinfo(ip6h), .flowi6_mark = skb->mark, .flowi6_proto = ip6h->nexthdr, }; struct dst_entry *dst; int err; dst = ip6_route_output(dev_net(dev), NULL, &fl6); err = dst->error; if (err) { dst_release(dst); return err; } skb_dst_set(skb, dst); return 0; } static int ipvlan_process_v6_outbound(struct sk_buff *skb) { struct net_device *dev = skb->dev; int err, ret = NET_XMIT_DROP; err = ipvlan_route_v6_outbound(dev, skb); if (unlikely(err)) { DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); return err; } memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); err = ip6_local_out(dev_net(dev), NULL, skb); if (unlikely(net_xmit_eval(err))) DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; return ret; } #else static int ipvlan_process_v6_outbound(struct sk_buff *skb) { return NET_XMIT_DROP; } #endif static int ipvlan_process_outbound(struct sk_buff *skb) { int ret = NET_XMIT_DROP; /* The ipvlan is a pseudo-L2 device, so the packets that we receive * will have L2; which need to discarded and processed further * in the net-ns of the main-device. */ if (skb_mac_header_was_set(skb)) { /* In this mode we dont care about * multicast and broadcast traffic */ struct ethhdr *ethh = eth_hdr(skb); if (is_multicast_ether_addr(ethh->h_dest)) { pr_debug_ratelimited( "Dropped {multi|broad}cast of type=[%x]\n", ntohs(skb->protocol)); kfree_skb(skb); goto out; } skb_pull(skb, sizeof(*ethh)); skb->mac_header = (typeof(skb->mac_header))~0U; skb_reset_network_header(skb); } if (skb->protocol == htons(ETH_P_IPV6)) ret = ipvlan_process_v6_outbound(skb); else if (skb->protocol == htons(ETH_P_IP)) ret = ipvlan_process_v4_outbound(skb); else { pr_warn_ratelimited("Dropped outbound packet type=%x\n", ntohs(skb->protocol)); kfree_skb(skb); } out: return ret; } static void ipvlan_multicast_enqueue(struct ipvl_port *port, struct sk_buff *skb, bool tx_pkt) { if (skb->protocol == htons(ETH_P_PAUSE)) { kfree_skb(skb); return; } /* Record that the deferred packet is from TX or RX path. By * looking at mac-addresses on packet will lead to erronus decisions. * (This would be true for a loopback-mode on master device or a * hair-pin mode of the switch.) */ IPVL_SKB_CB(skb)->tx_pkt = tx_pkt; spin_lock(&port->backlog.lock); if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { dev_hold(skb->dev); __skb_queue_tail(&port->backlog, skb); spin_unlock(&port->backlog.lock); schedule_work(&port->wq); } else { spin_unlock(&port->backlog.lock); dev_core_stats_rx_dropped_inc(skb->dev); kfree_skb(skb); } } static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); void *lyr3h; struct ipvl_addr *addr; int addr_type; lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); if (!lyr3h) goto out; if (!ipvlan_is_vepa(ipvlan->port)) { addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); if (addr) { if (ipvlan_is_private(ipvlan->port)) { consume_skb(skb); return NET_XMIT_DROP; } ipvlan_rcv_frame(addr, &skb, true); return NET_XMIT_SUCCESS; } } out: ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); return ipvlan_process_outbound(skb); } static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) { const struct ipvl_dev *ipvlan = netdev_priv(dev); struct ethhdr *eth = skb_eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; if (!ipvlan_is_vepa(ipvlan->port) && ether_addr_equal(eth->h_dest, eth->h_source)) { lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); if (lyr3h) { addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); if (addr) { if (ipvlan_is_private(ipvlan->port)) { consume_skb(skb); return NET_XMIT_DROP; } ipvlan_rcv_frame(addr, &skb, true); return NET_XMIT_SUCCESS; } } skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return NET_XMIT_DROP; /* Packet definitely does not belong to any of the * virtual devices, but the dest is local. So forward * the skb for the main-dev. At the RX side we just return * RX_PASS for it to be processed further on the stack. */ dev_forward_skb(ipvlan->phy_dev, skb); return NET_XMIT_SUCCESS; } else if (is_multicast_ether_addr(eth->h_dest)) { skb_reset_mac_header(skb); ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; } skb->dev = ipvlan->phy_dev; return dev_queue_xmit(skb); } int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); if (!port) goto out; if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) goto out; switch(port->mode) { case IPVLAN_MODE_L2: return ipvlan_xmit_mode_l2(skb, dev); case IPVLAN_MODE_L3: #ifdef CONFIG_IPVLAN_L3S case IPVLAN_MODE_L3S: #endif return ipvlan_xmit_mode_l3(skb, dev); } /* Should not reach here */ WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode); out: kfree_skb(skb); return NET_XMIT_DROP; } static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) { struct ethhdr *eth = eth_hdr(skb); struct ipvl_addr *addr; void *lyr3h; int addr_type; if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); if (!lyr3h) return true; addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false); if (addr) return false; } return true; } static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, struct ipvl_port *port) { void *lyr3h; int addr_type; struct ipvl_addr *addr; struct sk_buff *skb = *pskb; rx_handler_result_t ret = RX_HANDLER_PASS; lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); if (!lyr3h) goto out; addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); if (addr) ret = ipvlan_rcv_frame(addr, pskb, false); out: return ret; } static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, struct ipvl_port *port) { struct sk_buff *skb = *pskb; struct ethhdr *eth = eth_hdr(skb); rx_handler_result_t ret = RX_HANDLER_PASS; if (is_multicast_ether_addr(eth->h_dest)) { if (ipvlan_external_frame(skb, port)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); /* External frames are queued for device local * distribution, but a copy is given to master * straight away to avoid sending duplicates later * when work-queue processes this frame. This is * achieved by returning RX_HANDLER_PASS. */ if (nskb) { ipvlan_skb_crossing_ns(nskb, NULL); ipvlan_multicast_enqueue(port, nskb, false); } } } else { /* Perform like l3 mode for non-multicast packet */ ret = ipvlan_handle_mode_l3(pskb, port); } return ret; } rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); if (!port) return RX_HANDLER_PASS; switch (port->mode) { case IPVLAN_MODE_L2: return ipvlan_handle_mode_l2(pskb, port); case IPVLAN_MODE_L3: return ipvlan_handle_mode_l3(pskb, port); #ifdef CONFIG_IPVLAN_L3S case IPVLAN_MODE_L3S: return RX_HANDLER_PASS; #endif } /* Should not reach here */ WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode); kfree_skb(skb); return RX_HANDLER_CONSUMED; } |
2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Surface2.0/SUR40/PixelSense input driver * * Copyright (c) 2014 by Florian 'floe' Echtler <floe@butterbrot.org> * * Derived from the USB Skeleton driver 1.1, * Copyright (c) 2003 Greg Kroah-Hartman (greg@kroah.com) * * and from the Apple USB BCM5974 multitouch driver, * Copyright (c) 2008 Henrik Rydberg (rydberg@euromail.se) * * and from the generic hid-multitouch driver, * Copyright (c) 2010-2012 Stephane Chatty <chatty@enac.fr> * * and from the v4l2-pci-skeleton driver, * Copyright (c) Copyright 2014 Cisco Systems, Inc. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/printk.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/usb/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-sg.h> /* read 512 bytes from endpoint 0x86 -> get header + blobs */ struct sur40_header { __le16 type; /* always 0x0001 */ __le16 count; /* count of blobs (if 0: continue prev. packet) */ __le32 packet_id; /* unique ID for all packets in one frame */ __le32 timestamp; /* milliseconds (inc. by 16 or 17 each frame) */ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ } __packed; struct sur40_blob { __le16 blob_id; u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */ __le16 bb_pos_x; /* upper left corner of bounding box */ __le16 bb_pos_y; __le16 bb_size_x; /* size of bounding box */ __le16 bb_size_y; __le16 pos_x; /* finger tip position */ __le16 pos_y; __le16 ctr_x; /* centroid position */ __le16 ctr_y; __le16 axis_x; /* somehow related to major/minor axis, mostly: */ __le16 axis_y; /* axis_x == bb_size_y && axis_y == bb_size_x */ __le32 angle; /* orientation in radians relative to x axis - actually an IEEE754 float, don't use in kernel */ __le32 area; /* size in pixels/pressure (?) */ u8 padding[24]; __le32 tag_id; /* valid when type == 0x04 (SUR40_TAG) */ __le32 unknown; } __packed; /* combined header/blob data */ struct sur40_data { struct sur40_header header; struct sur40_blob blobs[]; } __packed; /* read 512 bytes from endpoint 0x82 -> get header below * continue reading 16k blocks until header.size bytes read */ struct sur40_image_header { __le32 magic; /* "SUBF" */ __le32 packet_id; __le32 size; /* always 0x0007e900 = 960x540 */ __le32 timestamp; /* milliseconds (increases by 16 or 17 each frame) */ __le32 unknown; /* "epoch?" always 02/03 00 00 00 */ } __packed; /* version information */ #define DRIVER_SHORT "sur40" #define DRIVER_LONG "Samsung SUR40" #define DRIVER_AUTHOR "Florian 'floe' Echtler <floe@butterbrot.org>" #define DRIVER_DESC "Surface2.0/SUR40/PixelSense input driver" /* vendor and device IDs */ #define ID_MICROSOFT 0x045e #define ID_SUR40 0x0775 /* sensor resolution */ #define SENSOR_RES_X 1920 #define SENSOR_RES_Y 1080 /* touch data endpoint */ #define TOUCH_ENDPOINT 0x86 /* video data endpoint */ #define VIDEO_ENDPOINT 0x82 /* video header fields */ #define VIDEO_HEADER_MAGIC 0x46425553 #define VIDEO_PACKET_SIZE 16384 /* polling interval (ms) */ #define POLL_INTERVAL 1 /* maximum number of contacts FIXME: this is a guess? */ #define MAX_CONTACTS 64 /* control commands */ #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ #define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */ #define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */ #define SUR40_POKE 0xc5 /* poke register byte */ #define SUR40_PEEK 0xc4 /* 48 bytes registers */ #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ #define SUR40_BLOB 0x01 #define SUR40_TOUCH 0x02 #define SUR40_TAG 0x04 /* video controls */ #define SUR40_BRIGHTNESS_MAX 0xff #define SUR40_BRIGHTNESS_MIN 0x00 #define SUR40_BRIGHTNESS_DEF 0xff #define SUR40_CONTRAST_MAX 0x0f #define SUR40_CONTRAST_MIN 0x00 #define SUR40_CONTRAST_DEF 0x0a #define SUR40_GAIN_MAX 0x09 #define SUR40_GAIN_MIN 0x00 #define SUR40_GAIN_DEF 0x08 #define SUR40_BACKLIGHT_MAX 0x01 #define SUR40_BACKLIGHT_MIN 0x00 #define SUR40_BACKLIGHT_DEF 0x01 #define sur40_str(s) #s #define SUR40_PARAM_RANGE(lo, hi) " (range " sur40_str(lo) "-" sur40_str(hi) ")" /* module parameters */ static uint brightness = SUR40_BRIGHTNESS_DEF; module_param(brightness, uint, 0644); MODULE_PARM_DESC(brightness, "set initial brightness" SUR40_PARAM_RANGE(SUR40_BRIGHTNESS_MIN, SUR40_BRIGHTNESS_MAX)); static uint contrast = SUR40_CONTRAST_DEF; module_param(contrast, uint, 0644); MODULE_PARM_DESC(contrast, "set initial contrast" SUR40_PARAM_RANGE(SUR40_CONTRAST_MIN, SUR40_CONTRAST_MAX)); static uint gain = SUR40_GAIN_DEF; module_param(gain, uint, 0644); MODULE_PARM_DESC(gain, "set initial gain" SUR40_PARAM_RANGE(SUR40_GAIN_MIN, SUR40_GAIN_MAX)); static const struct v4l2_pix_format sur40_pix_format[] = { { .pixelformat = V4L2_TCH_FMT_TU08, .width = SENSOR_RES_X / 2, .height = SENSOR_RES_Y / 2, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .bytesperline = SENSOR_RES_X / 2, .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2), }, { .pixelformat = V4L2_PIX_FMT_GREY, .width = SENSOR_RES_X / 2, .height = SENSOR_RES_Y / 2, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .bytesperline = SENSOR_RES_X / 2, .sizeimage = (SENSOR_RES_X/2) * (SENSOR_RES_Y/2), } }; /* master device state */ struct sur40_state { struct usb_device *usbdev; struct device *dev; struct input_dev *input; struct v4l2_device v4l2; struct video_device vdev; struct mutex lock; struct v4l2_pix_format pix_fmt; struct v4l2_ctrl_handler hdl; struct vb2_queue queue; struct list_head buf_list; spinlock_t qlock; int sequence; struct sur40_data *bulk_in_buffer; size_t bulk_in_size; u8 bulk_in_epaddr; u8 vsvideo; char phys[64]; }; struct sur40_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; /* forward declarations */ static const struct video_device sur40_video_device; static const struct vb2_queue sur40_queue; static void sur40_process_video(struct sur40_state *sur40); static int sur40_s_ctrl(struct v4l2_ctrl *ctrl); static const struct v4l2_ctrl_ops sur40_ctrl_ops = { .s_ctrl = sur40_s_ctrl, }; /* * Note: an earlier, non-public version of this driver used USB_RECIP_ENDPOINT * here by mistake which is very likely to have corrupted the firmware EEPROM * on two separate SUR40 devices. Thanks to Alan Stern who spotted this bug. * Should you ever run into a similar problem, the background story to this * incident and instructions on how to fix the corrupted EEPROM are available * at https://floe.butterbrot.org/matrix/hacking/surface/brick.html */ /* command wrapper */ static int sur40_command(struct sur40_state *dev, u8 command, u16 index, void *buffer, u16 size) { return usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0), command, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, index, buffer, size, 1000); } /* poke a byte in the panel register space */ static int sur40_poke(struct sur40_state *dev, u8 offset, u8 value) { int result; u8 index = 0x96; // 0xae for permanent write result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x32, index, NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x72, offset, NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0xb2, value, NULL, 0, 1000); if (result < 0) goto error; msleep(5); error: return result; } static int sur40_set_preprocessor(struct sur40_state *dev, u8 value) { u8 setting_07[2] = { 0x01, 0x00 }; u8 setting_17[2] = { 0x85, 0x80 }; int result; if (value > 1) return -ERANGE; result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x07, setting_07[value], NULL, 0, 1000); if (result < 0) goto error; msleep(5); result = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), SUR40_POKE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x17, setting_17[value], NULL, 0, 1000); if (result < 0) goto error; msleep(5); error: return result; } static void sur40_set_vsvideo(struct sur40_state *handle, u8 value) { int i; for (i = 0; i < 4; i++) sur40_poke(handle, 0x1c+i, value); handle->vsvideo = value; } static void sur40_set_irlevel(struct sur40_state *handle, u8 value) { int i; for (i = 0; i < 8; i++) sur40_poke(handle, 0x08+(2*i), value); } /* Initialization routine, called from sur40_open */ static int sur40_init(struct sur40_state *dev) { int result; u8 *buffer; buffer = kmalloc(24, GFP_KERNEL); if (!buffer) { result = -ENOMEM; goto error; } /* stupidly replay the original MS driver init sequence */ result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12); if (result < 0) goto error; result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24); if (result < 0) goto error; result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5); if (result < 0) goto error; result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12); if (result < 0) goto error; result = 0; /* * Discard the result buffer - no known data inside except * some version strings, maybe extract these sometime... */ error: kfree(buffer); return result; } /* * Callback routines from input_dev */ /* Enable the device, polling will now start. */ static int sur40_open(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); dev_dbg(sur40->dev, "open\n"); return sur40_init(sur40); } /* Disable device, polling has stopped. */ static void sur40_close(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); dev_dbg(sur40->dev, "close\n"); /* * There is no known way to stop the device, so we simply * stop polling. */ } /* * This function is called when a whole contact has been processed, * so that it can assign it to a slot and store the data there. */ static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) { int wide, major, minor; int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum; if (blob->type != SUR40_TOUCH) return; slotnum = input_mt_get_slot_by_key(input, le16_to_cpu(blob->blob_id)); if (slotnum < 0 || slotnum >= MAX_CONTACTS) return; bb_size_x = le16_to_cpu(blob->bb_size_x); bb_size_y = le16_to_cpu(blob->bb_size_y); pos_x = le16_to_cpu(blob->pos_x); pos_y = le16_to_cpu(blob->pos_y); ctr_x = le16_to_cpu(blob->ctr_x); ctr_y = le16_to_cpu(blob->ctr_y); input_mt_slot(input, slotnum); input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); wide = (bb_size_x > bb_size_y); major = max(bb_size_x, bb_size_y); minor = min(bb_size_x, bb_size_y); input_report_abs(input, ABS_MT_POSITION_X, pos_x); input_report_abs(input, ABS_MT_POSITION_Y, pos_y); input_report_abs(input, ABS_MT_TOOL_X, ctr_x); input_report_abs(input, ABS_MT_TOOL_Y, ctr_y); /* TODO: use a better orientation measure */ input_report_abs(input, ABS_MT_ORIENTATION, wide); input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); } /* core function: poll for new input data */ static void sur40_poll(struct input_dev *input) { struct sur40_state *sur40 = input_get_drvdata(input); int result, bulk_read, need_blobs, packet_blobs, i; struct sur40_header *header = &sur40->bulk_in_buffer->header; struct sur40_blob *inblob = &sur40->bulk_in_buffer->blobs[0]; dev_dbg(sur40->dev, "poll\n"); need_blobs = -1; do { /* perform a blocking bulk read to get data from the device */ result = usb_bulk_msg(sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, sur40->bulk_in_epaddr), sur40->bulk_in_buffer, sur40->bulk_in_size, &bulk_read, 1000); dev_dbg(sur40->dev, "received %d bytes\n", bulk_read); if (result < 0) { dev_err(sur40->dev, "error in usb_bulk_read\n"); return; } result = bulk_read - sizeof(struct sur40_header); if (result % sizeof(struct sur40_blob) != 0) { dev_err(sur40->dev, "transfer size mismatch\n"); return; } /* first packet? */ if (need_blobs == -1) { need_blobs = le16_to_cpu(header->count); dev_dbg(sur40->dev, "need %d blobs\n", need_blobs); /* packet_id = le32_to_cpu(header->packet_id); */ } /* * Sanity check. when video data is also being retrieved, the * packet ID will usually increase in the middle of a series * instead of at the end. However, the data is still consistent, * so the packet ID is probably just valid for the first packet * in a series. if (packet_id != le32_to_cpu(header->packet_id)) dev_dbg(sur40->dev, "packet ID mismatch\n"); */ packet_blobs = result / sizeof(struct sur40_blob); dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs); /* packets always contain at least 4 blobs, even if empty */ if (packet_blobs > need_blobs) packet_blobs = need_blobs; for (i = 0; i < packet_blobs; i++) { need_blobs--; dev_dbg(sur40->dev, "processing blob\n"); sur40_report_blob(&(inblob[i]), input); } } while (need_blobs > 0); input_mt_sync_frame(input); input_sync(input); sur40_process_video(sur40); } /* deal with video data */ static void sur40_process_video(struct sur40_state *sur40) { struct sur40_image_header *img = (void *)(sur40->bulk_in_buffer); struct sur40_buffer *new_buf; struct usb_sg_request sgr; struct sg_table *sgt; int result, bulk_read; if (!vb2_start_streaming_called(&sur40->queue)) return; /* get a new buffer from the list */ spin_lock(&sur40->qlock); if (list_empty(&sur40->buf_list)) { dev_dbg(sur40->dev, "buffer queue empty\n"); spin_unlock(&sur40->qlock); return; } new_buf = list_entry(sur40->buf_list.next, struct sur40_buffer, list); list_del(&new_buf->list); spin_unlock(&sur40->qlock); dev_dbg(sur40->dev, "buffer acquired\n"); /* retrieve data via bulk read */ result = usb_bulk_msg(sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), sur40->bulk_in_buffer, sur40->bulk_in_size, &bulk_read, 1000); if (result < 0) { dev_err(sur40->dev, "error in usb_bulk_read\n"); goto err_poll; } if (bulk_read != sizeof(struct sur40_image_header)) { dev_err(sur40->dev, "received %d bytes (%zd expected)\n", bulk_read, sizeof(struct sur40_image_header)); goto err_poll; } if (le32_to_cpu(img->magic) != VIDEO_HEADER_MAGIC) { dev_err(sur40->dev, "image magic mismatch\n"); goto err_poll; } if (le32_to_cpu(img->size) != sur40->pix_fmt.sizeimage) { dev_err(sur40->dev, "image size mismatch\n"); goto err_poll; } dev_dbg(sur40->dev, "header acquired\n"); sgt = vb2_dma_sg_plane_desc(&new_buf->vb.vb2_buf, 0); result = usb_sg_init(&sgr, sur40->usbdev, usb_rcvbulkpipe(sur40->usbdev, VIDEO_ENDPOINT), 0, sgt->sgl, sgt->nents, sur40->pix_fmt.sizeimage, 0); if (result < 0) { dev_err(sur40->dev, "error %d in usb_sg_init\n", result); goto err_poll; } usb_sg_wait(&sgr); if (sgr.status < 0) { dev_err(sur40->dev, "error %d in usb_sg_wait\n", sgr.status); goto err_poll; } dev_dbg(sur40->dev, "image acquired\n"); /* return error if streaming was stopped in the meantime */ if (sur40->sequence == -1) return; /* mark as finished */ new_buf->vb.vb2_buf.timestamp = ktime_get_ns(); new_buf->vb.sequence = sur40->sequence++; new_buf->vb.field = V4L2_FIELD_NONE; vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); dev_dbg(sur40->dev, "buffer marked done\n"); return; err_poll: vb2_buffer_done(&new_buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } /* Initialize input device parameters. */ static int sur40_input_setup_events(struct input_dev *input_dev) { int error; input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, SENSOR_RES_Y, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_X, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_Y, 0, SENSOR_RES_Y, 0, 0); /* max value unknown, but major/minor axis * can never be larger than screen */ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, SENSOR_RES_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0, SENSOR_RES_Y, 0, 0); input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0); error = input_mt_init_slots(input_dev, MAX_CONTACTS, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { dev_err(input_dev->dev.parent, "failed to set up slots\n"); return error; } return 0; } /* Check candidate USB interface. */ static int sur40_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(interface); struct sur40_state *sur40; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct input_dev *input; int error; /* Check if we really have the right interface. */ iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; if (iface_desc->desc.bNumEndpoints < 5) return -ENODEV; /* Use endpoint #4 (0x86). */ endpoint = &iface_desc->endpoint[4].desc; if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) return -ENODEV; /* Allocate memory for our device state and initialize it. */ sur40 = kzalloc(sizeof(*sur40), GFP_KERNEL); if (!sur40) return -ENOMEM; input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_free_dev; } /* initialize locks/lists */ INIT_LIST_HEAD(&sur40->buf_list); spin_lock_init(&sur40->qlock); mutex_init(&sur40->lock); /* Set up regular input device structure */ input->name = DRIVER_LONG; usb_to_input_id(usbdev, &input->id); usb_make_path(usbdev, sur40->phys, sizeof(sur40->phys)); strlcat(sur40->phys, "/input0", sizeof(sur40->phys)); input->phys = sur40->phys; input->dev.parent = &interface->dev; input->open = sur40_open; input->close = sur40_close; error = sur40_input_setup_events(input); if (error) goto err_free_input; input_set_drvdata(input, sur40); error = input_setup_polling(input, sur40_poll); if (error) { dev_err(&interface->dev, "failed to set up polling"); goto err_free_input; } input_set_poll_interval(input, POLL_INTERVAL); sur40->usbdev = usbdev; sur40->dev = &interface->dev; sur40->input = input; /* use the bulk-in endpoint tested above */ sur40->bulk_in_size = usb_endpoint_maxp(endpoint); sur40->bulk_in_epaddr = endpoint->bEndpointAddress; sur40->bulk_in_buffer = kmalloc(sur40->bulk_in_size, GFP_KERNEL); if (!sur40->bulk_in_buffer) { dev_err(&interface->dev, "Unable to allocate input buffer."); error = -ENOMEM; goto err_free_input; } /* register the polled input device */ error = input_register_device(input); if (error) { dev_err(&interface->dev, "Unable to register polled input device."); goto err_free_buffer; } /* register the video master device */ snprintf(sur40->v4l2.name, sizeof(sur40->v4l2.name), "%s", DRIVER_LONG); error = v4l2_device_register(sur40->dev, &sur40->v4l2); if (error) { dev_err(&interface->dev, "Unable to register video master device."); goto err_unreg_v4l2; } /* initialize the lock and subdevice */ sur40->queue = sur40_queue; sur40->queue.drv_priv = sur40; sur40->queue.lock = &sur40->lock; sur40->queue.dev = sur40->dev; /* initialize the queue */ error = vb2_queue_init(&sur40->queue); if (error) goto err_unreg_v4l2; sur40->pix_fmt = sur40_pix_format[0]; sur40->vdev = sur40_video_device; sur40->vdev.v4l2_dev = &sur40->v4l2; sur40->vdev.lock = &sur40->lock; sur40->vdev.queue = &sur40->queue; video_set_drvdata(&sur40->vdev, sur40); /* initialize the control handler for 4 controls */ v4l2_ctrl_handler_init(&sur40->hdl, 4); sur40->v4l2.ctrl_handler = &sur40->hdl; sur40->vsvideo = (SUR40_CONTRAST_DEF << 4) | SUR40_GAIN_DEF; v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_BRIGHTNESS, SUR40_BRIGHTNESS_MIN, SUR40_BRIGHTNESS_MAX, 1, clamp(brightness, (uint)SUR40_BRIGHTNESS_MIN, (uint)SUR40_BRIGHTNESS_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_CONTRAST, SUR40_CONTRAST_MIN, SUR40_CONTRAST_MAX, 1, clamp(contrast, (uint)SUR40_CONTRAST_MIN, (uint)SUR40_CONTRAST_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_GAIN, SUR40_GAIN_MIN, SUR40_GAIN_MAX, 1, clamp(gain, (uint)SUR40_GAIN_MIN, (uint)SUR40_GAIN_MAX)); v4l2_ctrl_new_std(&sur40->hdl, &sur40_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, SUR40_BACKLIGHT_MIN, SUR40_BACKLIGHT_MAX, 1, SUR40_BACKLIGHT_DEF); v4l2_ctrl_handler_setup(&sur40->hdl); if (sur40->hdl.error) { dev_err(&interface->dev, "Unable to register video controls."); v4l2_ctrl_handler_free(&sur40->hdl); error = sur40->hdl.error; goto err_unreg_v4l2; } error = video_register_device(&sur40->vdev, VFL_TYPE_TOUCH, -1); if (error) { dev_err(&interface->dev, "Unable to register video subdevice."); goto err_unreg_video; } /* we can register the device now, as it is ready */ usb_set_intfdata(interface, sur40); dev_dbg(&interface->dev, "%s is now attached\n", DRIVER_DESC); return 0; err_unreg_video: video_unregister_device(&sur40->vdev); err_unreg_v4l2: v4l2_device_unregister(&sur40->v4l2); err_free_buffer: kfree(sur40->bulk_in_buffer); err_free_input: input_free_device(input); err_free_dev: kfree(sur40); return error; } /* Unregister device & clean up. */ static void sur40_disconnect(struct usb_interface *interface) { struct sur40_state *sur40 = usb_get_intfdata(interface); v4l2_ctrl_handler_free(&sur40->hdl); video_unregister_device(&sur40->vdev); v4l2_device_unregister(&sur40->v4l2); input_unregister_device(sur40->input); kfree(sur40->bulk_in_buffer); kfree(sur40); usb_set_intfdata(interface, NULL); dev_dbg(&interface->dev, "%s is now disconnected\n", DRIVER_DESC); } /* * Setup the constraints of the queue: besides setting the number of planes * per buffer and the size and allocation context of each plane, it also * checks if sufficient buffers have been allocated. Usually 3 is a good * minimum number: many DMA engines need a minimum of 2 buffers in the * queue and you need to have another available for userspace processing. */ static int sur40_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct sur40_state *sur40 = vb2_get_drv_priv(q); unsigned int q_num_bufs = vb2_get_num_buffers(q); if (q_num_bufs + *nbuffers < 3) *nbuffers = 3 - q_num_bufs; if (*nplanes) return sizes[0] < sur40->pix_fmt.sizeimage ? -EINVAL : 0; *nplanes = 1; sizes[0] = sur40->pix_fmt.sizeimage; return 0; } /* * Prepare the buffer for queueing to the DMA engine: check and set the * payload size. */ static int sur40_buffer_prepare(struct vb2_buffer *vb) { struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = sur40->pix_fmt.sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(&sur40->usbdev->dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } /* * Queue this buffer to the DMA engine. */ static void sur40_buffer_queue(struct vb2_buffer *vb) { struct sur40_state *sur40 = vb2_get_drv_priv(vb->vb2_queue); struct sur40_buffer *buf = (struct sur40_buffer *)vb; spin_lock(&sur40->qlock); list_add_tail(&buf->list, &sur40->buf_list); spin_unlock(&sur40->qlock); } static void return_all_buffers(struct sur40_state *sur40, enum vb2_buffer_state state) { struct sur40_buffer *buf, *node; spin_lock(&sur40->qlock); list_for_each_entry_safe(buf, node, &sur40->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } spin_unlock(&sur40->qlock); } /* * Start streaming. First check if the minimum number of buffers have been * queued. If not, then return -ENOBUFS and the vb2 framework will call * this function again the next time a buffer has been queued until enough * buffers are available to actually start the DMA engine. */ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); sur40->sequence = 0; return 0; } /* * Stop the DMA engine. Any remaining buffers in the DMA queue are dequeued * and passed on to the vb2 framework marked as STATE_ERROR. */ static void sur40_stop_streaming(struct vb2_queue *vq) { struct sur40_state *sur40 = vb2_get_drv_priv(vq); vb2_wait_for_all_buffers(vq); sur40->sequence = -1; /* Release all active buffers */ return_all_buffers(sur40, VB2_BUF_STATE_ERROR); } /* V4L ioctl */ static int sur40_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct sur40_state *sur40 = video_drvdata(file); strscpy(cap->driver, DRIVER_SHORT, sizeof(cap->driver)); strscpy(cap->card, DRIVER_LONG, sizeof(cap->card)); usb_make_path(sur40->usbdev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int sur40_vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_TOUCH; i->std = V4L2_STD_UNKNOWN; strscpy(i->name, "In-Cell Sensor", sizeof(i->name)); i->capabilities = 0; return 0; } static int sur40_vidioc_s_input(struct file *file, void *priv, unsigned int i) { return (i == 0) ? 0 : -EINVAL; } static int sur40_vidioc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int sur40_vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_GREY: f->fmt.pix = sur40_pix_format[1]; break; default: f->fmt.pix = sur40_pix_format[0]; break; } return 0; } static int sur40_vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct sur40_state *sur40 = video_drvdata(file); switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_GREY: sur40->pix_fmt = sur40_pix_format[1]; break; default: sur40->pix_fmt = sur40_pix_format[0]; break; } f->fmt.pix = sur40->pix_fmt; return 0; } static int sur40_vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct sur40_state *sur40 = video_drvdata(file); f->fmt.pix = sur40->pix_fmt; return 0; } static int sur40_s_ctrl(struct v4l2_ctrl *ctrl) { struct sur40_state *sur40 = container_of(ctrl->handler, struct sur40_state, hdl); u8 value = sur40->vsvideo; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: sur40_set_irlevel(sur40, ctrl->val); break; case V4L2_CID_CONTRAST: value = (value & 0x0f) | (ctrl->val << 4); sur40_set_vsvideo(sur40, value); break; case V4L2_CID_GAIN: value = (value & 0xf0) | (ctrl->val); sur40_set_vsvideo(sur40, value); break; case V4L2_CID_BACKLIGHT_COMPENSATION: sur40_set_preprocessor(sur40, ctrl->val); break; } return 0; } static int sur40_ioctl_parm(struct file *file, void *priv, struct v4l2_streamparm *p) { if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; p->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; p->parm.capture.timeperframe.numerator = 1; p->parm.capture.timeperframe.denominator = 60; p->parm.capture.readbuffers = 3; return 0; } static int sur40_vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(sur40_pix_format)) return -EINVAL; f->pixelformat = sur40_pix_format[f->index].pixelformat; f->flags = 0; return 0; } static int sur40_vidioc_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *f) { struct sur40_state *sur40 = video_drvdata(file); if ((f->index != 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08) && (f->pixel_format != V4L2_PIX_FMT_GREY))) return -EINVAL; f->type = V4L2_FRMSIZE_TYPE_DISCRETE; f->discrete.width = sur40->pix_fmt.width; f->discrete.height = sur40->pix_fmt.height; return 0; } static int sur40_vidioc_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *f) { struct sur40_state *sur40 = video_drvdata(file); if ((f->index > 0) || ((f->pixel_format != V4L2_TCH_FMT_TU08) && (f->pixel_format != V4L2_PIX_FMT_GREY)) || (f->width != sur40->pix_fmt.width) || (f->height != sur40->pix_fmt.height)) return -EINVAL; f->type = V4L2_FRMIVAL_TYPE_DISCRETE; f->discrete.denominator = 60; f->discrete.numerator = 1; return 0; } static const struct usb_device_id sur40_table[] = { { USB_DEVICE(ID_MICROSOFT, ID_SUR40) }, /* Samsung SUR40 */ { } /* terminating null entry */ }; MODULE_DEVICE_TABLE(usb, sur40_table); /* V4L2 structures */ static const struct vb2_ops sur40_queue_ops = { .queue_setup = sur40_queue_setup, .buf_prepare = sur40_buffer_prepare, .buf_queue = sur40_buffer_queue, .start_streaming = sur40_start_streaming, .stop_streaming = sur40_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static const struct vb2_queue sur40_queue = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, /* * VB2_USERPTR in currently not enabled: passing a user pointer to * dma-sg will result in segment sizes that are not a multiple of * 512 bytes, which is required by the host controller. */ .io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF, .buf_struct_size = sizeof(struct sur40_buffer), .ops = &sur40_queue_ops, .mem_ops = &vb2_dma_sg_memops, .timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC, .min_queued_buffers = 3, }; static const struct v4l2_file_operations sur40_video_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .read = vb2_fop_read, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; static const struct v4l2_ioctl_ops sur40_video_ioctl_ops = { .vidioc_querycap = sur40_vidioc_querycap, .vidioc_enum_fmt_vid_cap = sur40_vidioc_enum_fmt, .vidioc_try_fmt_vid_cap = sur40_vidioc_try_fmt, .vidioc_s_fmt_vid_cap = sur40_vidioc_s_fmt, .vidioc_g_fmt_vid_cap = sur40_vidioc_g_fmt, .vidioc_enum_framesizes = sur40_vidioc_enum_framesizes, .vidioc_enum_frameintervals = sur40_vidioc_enum_frameintervals, .vidioc_g_parm = sur40_ioctl_parm, .vidioc_s_parm = sur40_ioctl_parm, .vidioc_enum_input = sur40_vidioc_enum_input, .vidioc_g_input = sur40_vidioc_g_input, .vidioc_s_input = sur40_vidioc_s_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; static const struct video_device sur40_video_device = { .name = DRIVER_LONG, .fops = &sur40_video_fops, .ioctl_ops = &sur40_video_ioctl_ops, .release = video_device_release_empty, .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TOUCH | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; /* USB-specific object needed to register this driver with the USB subsystem. */ static struct usb_driver sur40_driver = { .name = DRIVER_SHORT, .probe = sur40_probe, .disconnect = sur40_disconnect, .id_table = sur40_table, }; module_usb_driver(sur40_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
497 496 494 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Devmem TCP * * Authors: Mina Almasry <almasrymina@google.com> * Willem de Bruijn <willemdebruijn.kernel@gmail.com> * Kaiyuan Zhang <kaiyuanz@google.com */ #include <linux/dma-buf.h> #include <linux/genalloc.h> #include <linux/mm.h> #include <linux/netdevice.h> #include <linux/types.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> #include <trace/events/page_pool.h> #include "devmem.h" #include "mp_dmabuf_devmem.h" #include "page_pool_priv.h" /* Device memory support */ /* Protected by rtnl_lock() */ static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1); static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool, struct gen_pool_chunk *chunk, void *not_used) { struct dmabuf_genpool_chunk_owner *owner = chunk->owner; kvfree(owner->niovs); kfree(owner); } static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov) { struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov); return owner->base_dma_addr + ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT); } void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding) { size_t size, avail; gen_pool_for_each_chunk(binding->chunk_pool, net_devmem_dmabuf_free_chunk_owner, NULL); size = gen_pool_size(binding->chunk_pool); avail = gen_pool_avail(binding->chunk_pool); if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu", size, avail)) gen_pool_destroy(binding->chunk_pool); dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, DMA_FROM_DEVICE); dma_buf_detach(binding->dmabuf, binding->attachment); dma_buf_put(binding->dmabuf); xa_destroy(&binding->bound_rxqs); kfree(binding); } struct net_iov * net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) { struct dmabuf_genpool_chunk_owner *owner; unsigned long dma_addr; struct net_iov *niov; ssize_t offset; ssize_t index; dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE, (void **)&owner); if (!dma_addr) return NULL; offset = dma_addr - owner->base_dma_addr; index = offset / PAGE_SIZE; niov = &owner->niovs[index]; niov->pp_magic = 0; niov->pp = NULL; atomic_long_set(&niov->pp_ref_count, 0); return niov; } void net_devmem_free_dmabuf(struct net_iov *niov) { struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov); unsigned long dma_addr = net_devmem_get_dma_addr(niov); if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, PAGE_SIZE))) return; gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE); } void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) { struct netdev_rx_queue *rxq; unsigned long xa_idx; unsigned int rxq_idx; if (binding->list.next) list_del(&binding->list); xa_for_each(&binding->bound_rxqs, xa_idx, rxq) { WARN_ON(rxq->mp_params.mp_priv != binding); rxq->mp_params.mp_priv = NULL; rxq_idx = get_netdev_rx_queue_index(rxq); WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx)); } xa_erase(&net_devmem_dmabuf_bindings, binding->id); net_devmem_dmabuf_binding_put(binding); } int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, struct net_devmem_dmabuf_binding *binding, struct netlink_ext_ack *extack) { struct netdev_rx_queue *rxq; u32 xa_idx; int err; if (rxq_idx >= dev->real_num_rx_queues) { NL_SET_ERR_MSG(extack, "rx queue index out of range"); return -ERANGE; } rxq = __netif_get_rx_queue(dev, rxq_idx); if (rxq->mp_params.mp_priv) { NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); return -EEXIST; } #ifdef CONFIG_XDP_SOCKETS if (rxq->pool) { NL_SET_ERR_MSG(extack, "designated queue already in use by AF_XDP"); return -EBUSY; } #endif err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b, GFP_KERNEL); if (err) return err; rxq->mp_params.mp_priv = binding; err = netdev_rx_queue_restart(dev, rxq_idx); if (err) goto err_xa_erase; return 0; err_xa_erase: rxq->mp_params.mp_priv = NULL; xa_erase(&binding->bound_rxqs, xa_idx); return err; } struct net_devmem_dmabuf_binding * net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, struct netlink_ext_ack *extack) { struct net_devmem_dmabuf_binding *binding; static u32 id_alloc_next; struct scatterlist *sg; struct dma_buf *dmabuf; unsigned int sg_idx, i; unsigned long virtual; int err; dmabuf = dma_buf_get(dmabuf_fd); if (IS_ERR(dmabuf)) return ERR_CAST(dmabuf); binding = kzalloc_node(sizeof(*binding), GFP_KERNEL, dev_to_node(&dev->dev)); if (!binding) { err = -ENOMEM; goto err_put_dmabuf; } binding->dev = dev; err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id, binding, xa_limit_32b, &id_alloc_next, GFP_KERNEL); if (err < 0) goto err_free_binding; xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC); refcount_set(&binding->ref, 1); binding->dmabuf = dmabuf; binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent); if (IS_ERR(binding->attachment)) { err = PTR_ERR(binding->attachment); NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device"); goto err_free_id; } binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment, DMA_FROM_DEVICE); if (IS_ERR(binding->sgt)) { err = PTR_ERR(binding->sgt); NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment"); goto err_detach; } /* For simplicity we expect to make PAGE_SIZE allocations, but the * binding can be much more flexible than that. We may be able to * allocate MTU sized chunks here. Leave that for future work... */ binding->chunk_pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev)); if (!binding->chunk_pool) { err = -ENOMEM; goto err_unmap; } virtual = 0; for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) { dma_addr_t dma_addr = sg_dma_address(sg); struct dmabuf_genpool_chunk_owner *owner; size_t len = sg_dma_len(sg); struct net_iov *niov; owner = kzalloc_node(sizeof(*owner), GFP_KERNEL, dev_to_node(&dev->dev)); if (!owner) { err = -ENOMEM; goto err_free_chunks; } owner->base_virtual = virtual; owner->base_dma_addr = dma_addr; owner->num_niovs = len / PAGE_SIZE; owner->binding = binding; err = gen_pool_add_owner(binding->chunk_pool, dma_addr, dma_addr, len, dev_to_node(&dev->dev), owner); if (err) { kfree(owner); err = -EINVAL; goto err_free_chunks; } owner->niovs = kvmalloc_array(owner->num_niovs, sizeof(*owner->niovs), GFP_KERNEL); if (!owner->niovs) { err = -ENOMEM; goto err_free_chunks; } for (i = 0; i < owner->num_niovs; i++) { niov = &owner->niovs[i]; niov->owner = owner; page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), net_devmem_get_dma_addr(niov)); } virtual += len; } return binding; err_free_chunks: gen_pool_for_each_chunk(binding->chunk_pool, net_devmem_dmabuf_free_chunk_owner, NULL); gen_pool_destroy(binding->chunk_pool); err_unmap: dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt, DMA_FROM_DEVICE); err_detach: dma_buf_detach(dmabuf, binding->attachment); err_free_id: xa_erase(&net_devmem_dmabuf_bindings, binding->id); err_free_binding: kfree(binding); err_put_dmabuf: dma_buf_put(dmabuf); return ERR_PTR(err); } void dev_dmabuf_uninstall(struct net_device *dev) { struct net_devmem_dmabuf_binding *binding; struct netdev_rx_queue *rxq; unsigned long xa_idx; unsigned int i; for (i = 0; i < dev->real_num_rx_queues; i++) { binding = dev->_rx[i].mp_params.mp_priv; if (!binding) continue; xa_for_each(&binding->bound_rxqs, xa_idx, rxq) if (rxq == &dev->_rx[i]) { xa_erase(&binding->bound_rxqs, xa_idx); break; } } } /*** "Dmabuf devmem memory provider" ***/ int mp_dmabuf_devmem_init(struct page_pool *pool) { struct net_devmem_dmabuf_binding *binding = pool->mp_priv; if (!binding) return -EINVAL; if (!pool->dma_map) return -EOPNOTSUPP; if (pool->dma_sync) return -EOPNOTSUPP; if (pool->p.order != 0) return -E2BIG; net_devmem_dmabuf_binding_get(binding); return 0; } netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp) { struct net_devmem_dmabuf_binding *binding = pool->mp_priv; struct net_iov *niov; netmem_ref netmem; niov = net_devmem_alloc_dmabuf(binding); if (!niov) return 0; netmem = net_iov_to_netmem(niov); page_pool_set_pp_info(pool, netmem); pool->pages_state_hold_cnt++; trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); return netmem; } void mp_dmabuf_devmem_destroy(struct page_pool *pool) { struct net_devmem_dmabuf_binding *binding = pool->mp_priv; net_devmem_dmabuf_binding_put(binding); } bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) { long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem)); if (WARN_ON_ONCE(!netmem_is_net_iov(netmem))) return false; if (WARN_ON_ONCE(refcount != 1)) return false; page_pool_clear_pp_info(netmem); net_devmem_free_dmabuf(netmem_to_net_iov(netmem)); /* We don't want the page pool put_page()ing our net_iovs. */ return false; } |
5293 182 3144 5144 5145 5155 5147 401 399 15 3878 21 3878 3898 63 3875 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 | // SPDX-License-Identifier: GPL-2.0 /* sysfs entries for device PM */ #include <linux/device.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/export.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeup.h> #include <linux/atomic.h> #include <linux/jiffies.h> #include "power.h" /* * control - Report/change current runtime PM setting of the device * * Runtime power management of a device can be blocked with the help of * this attribute. All devices have one of the following two values for * the power/control file: * * + "auto\n" to allow the device to be power managed at run time; * + "on\n" to prevent the device from being power managed at run time; * * The default for all devices is "auto", which means that devices may be * subject to automatic power management, depending on their drivers. * Changing this attribute to "on" prevents the driver from power managing * the device at run time. Doing that while the device is suspended causes * it to be woken up. * * wakeup - Report/change current wakeup option for device * * Some devices support "wakeup" events, which are hardware signals * used to activate devices from suspended or low power states. Such * devices have one of three values for the sysfs power/wakeup file: * * + "enabled\n" to issue the events; * + "disabled\n" not to do so; or * + "\n" for temporary or permanent inability to issue wakeup. * * (For example, unconfigured USB devices can't issue wakeups.) * * Familiar examples of devices that can issue wakeup events include * keyboards and mice (both PS2 and USB styles), power buttons, modems, * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events * will wake the entire system from a suspend state; others may just * wake up the device (if the system as a whole is already active). * Some wakeup events use normal IRQ lines; other use special out * of band signaling. * * It is the responsibility of device drivers to enable (or disable) * wakeup signaling as part of changing device power states, respecting * the policy choices provided through the driver model. * * Devices may not be able to generate wakeup events from all power * states. Also, the events may be ignored in some configurations; * for example, they might need help from other devices that aren't * active, or which may have wakeup disabled. Some drivers rely on * wakeup events internally (unless they are disabled), keeping * their hardware in low power modes whenever they're unused. This * saves runtime power, without requiring system-wide sleep states. * * async - Report/change current async suspend setting for the device * * Asynchronous suspend and resume of the device during system-wide power * state transitions can be enabled by writing "enabled" to this file. * Analogously, if "disabled" is written to this file, the device will be * suspended and resumed synchronously. * * All devices have one of the following two values for power/async: * * + "enabled\n" to permit the asynchronous suspend/resume of the device; * + "disabled\n" to forbid it; * * NOTE: It generally is unsafe to permit the asynchronous suspend/resume * of a device unless it is certain that all of the PM dependencies of the * device are known to the PM core. However, for some devices this * attribute is set to "enabled" by bus type code or device drivers and in * that cases it should be safe to leave the default value. * * autosuspend_delay_ms - Report/change a device's autosuspend_delay value * * Some drivers don't want to carry out a runtime suspend as soon as a * device becomes idle; they want it always to remain idle for some period * of time before suspending it. This period is the autosuspend_delay * value (expressed in milliseconds) and it can be controlled by the user. * If the value is negative then the device will never be runtime * suspended. * * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay * value are used only if the driver calls pm_runtime_use_autosuspend(). * * wakeup_count - Report the number of wakeup events related to the device */ const char power_group_name[] = "power"; EXPORT_SYMBOL_GPL(power_group_name); static const char ctrl_auto[] = "auto"; static const char ctrl_on[] = "on"; static ssize_t control_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", dev->power.runtime_auto ? ctrl_auto : ctrl_on); } static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { device_lock(dev); if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); else if (sysfs_streq(buf, ctrl_on)) pm_runtime_forbid(dev); else n = -EINVAL; device_unlock(dev); return n; } static DEVICE_ATTR_RW(control); static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { u64 tmp = pm_runtime_active_time(dev); do_div(tmp, NSEC_PER_MSEC); return sysfs_emit(buf, "%llu\n", tmp); } static DEVICE_ATTR_RO(runtime_active_time); static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { u64 tmp = pm_runtime_suspended_time(dev); do_div(tmp, NSEC_PER_MSEC); return sysfs_emit(buf, "%llu\n", tmp); } static DEVICE_ATTR_RO(runtime_suspended_time); static ssize_t runtime_status_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *output; if (dev->power.runtime_error) { output = "error"; } else if (dev->power.disable_depth) { output = "unsupported"; } else { switch (dev->power.runtime_status) { case RPM_SUSPENDED: output = "suspended"; break; case RPM_SUSPENDING: output = "suspending"; break; case RPM_RESUMING: output = "resuming"; break; case RPM_ACTIVE: output = "active"; break; default: return -EIO; } } return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(runtime_status); static ssize_t autosuspend_delay_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!dev->power.use_autosuspend) return -EIO; return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay); } static ssize_t autosuspend_delay_ms_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { long delay; if (!dev->power.use_autosuspend) return -EIO; if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) return -EINVAL; device_lock(dev); pm_runtime_set_autosuspend_delay(dev, delay); device_unlock(dev); return n; } static DEVICE_ATTR_RW(autosuspend_delay_ms); static ssize_t pm_qos_resume_latency_us_show(struct device *dev, struct device_attribute *attr, char *buf) { s32 value = dev_pm_qos_requested_resume_latency(dev); if (value == 0) return sysfs_emit(buf, "n/a\n"); if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) value = 0; return sysfs_emit(buf, "%d\n", value); } static ssize_t pm_qos_resume_latency_us_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { s32 value; int ret; if (!kstrtos32(buf, 0, &value)) { /* * Prevent users from writing negative or "no constraint" values * directly. */ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) return -EINVAL; if (value == 0) value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } else if (sysfs_streq(buf, "n/a")) { value = 0; } else { return -EINVAL; } ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, value); return ret < 0 ? ret : n; } static DEVICE_ATTR_RW(pm_qos_resume_latency_us); static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, struct device_attribute *attr, char *buf) { s32 value = dev_pm_qos_get_user_latency_tolerance(dev); if (value < 0) return sysfs_emit(buf, "%s\n", "auto"); if (value == PM_QOS_LATENCY_ANY) return sysfs_emit(buf, "%s\n", "any"); return sysfs_emit(buf, "%d\n", value); } static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { s32 value; int ret; if (kstrtos32(buf, 0, &value) == 0) { /* Users can't write negative values directly */ if (value < 0) return -EINVAL; } else { if (sysfs_streq(buf, "auto")) value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (sysfs_streq(buf, "any")) value = PM_QOS_LATENCY_ANY; else return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; } static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) & PM_QOS_FLAG_NO_POWER_OFF)); } static ssize_t pm_qos_no_power_off_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { int ret; if (kstrtoint(buf, 0, &ret)) return -EINVAL; if (ret != 0 && ret != 1) return -EINVAL; ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); return ret < 0 ? ret : n; } static DEVICE_ATTR_RW(pm_qos_no_power_off); #ifdef CONFIG_PM_SLEEP static const char _enabled[] = "enabled"; static const char _disabled[] = "disabled"; static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", device_can_wakeup(dev) ? (device_may_wakeup(dev) ? _enabled : _disabled) : ""); } static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { if (!device_can_wakeup(dev)) return -EINVAL; if (sysfs_streq(buf, _enabled)) device_set_wakeup_enable(dev, 1); else if (sysfs_streq(buf, _disabled)) device_set_wakeup_enable(dev, 0); else return -EINVAL; return n; } static DEVICE_ATTR_RW(wakeup); static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->wakeup_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_count); static ssize_t wakeup_active_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->active_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_active_count); static ssize_t wakeup_abort_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->wakeup_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_abort_count); static ssize_t wakeup_expire_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->expire_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_expire_count); static ssize_t wakeup_active_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int active; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { active = dev->power.wakeup->active; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%u\n", active); } static DEVICE_ATTR_RO(wakeup_active); static ssize_t wakeup_total_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->total_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_total_time_ms); static ssize_t wakeup_max_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->max_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_max_time_ms); static ssize_t wakeup_last_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->last_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_last_time_ms); #ifdef CONFIG_PM_AUTOSLEEP static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); #endif /* CONFIG_PM_AUTOSLEEP */ static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { if (dev->power.wakeup && dev->power.wakeup->dev) return device_change_owner(dev->power.wakeup->dev, kuid, kgid); return 0; } #else /* CONFIG_PM_SLEEP */ static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { return 0; } #endif #ifdef CONFIG_PM_ADVANCED_DEBUG static ssize_t runtime_usage_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count)); } static DEVICE_ATTR_RO(runtime_usage); static ssize_t runtime_active_kids_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", dev->power.ignore_children ? 0 : atomic_read(&dev->power.child_count)); } static DEVICE_ATTR_RO(runtime_active_kids); static ssize_t runtime_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *output; if (dev->power.disable_depth && !dev->power.runtime_auto) output = "disabled & forbidden"; else if (dev->power.disable_depth) output = "disabled"; else if (!dev->power.runtime_auto) output = "forbidden"; else output = "enabled"; return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(runtime_enabled); #ifdef CONFIG_PM_SLEEP static ssize_t async_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", device_async_suspend_enabled(dev) ? _enabled : _disabled); } static ssize_t async_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { if (sysfs_streq(buf, _enabled)) device_enable_async_suspend(dev); else if (sysfs_streq(buf, _disabled)) device_disable_async_suspend(dev); else return -EINVAL; return n; } static DEVICE_ATTR_RW(async); #endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_SLEEP &dev_attr_async.attr, #endif &dev_attr_runtime_status.attr, &dev_attr_runtime_usage.attr, &dev_attr_runtime_active_kids.attr, &dev_attr_runtime_enabled.attr, #endif /* CONFIG_PM_ADVANCED_DEBUG */ NULL, }; static const struct attribute_group pm_attr_group = { .name = power_group_name, .attrs = power_attrs, }; static struct attribute *wakeup_attrs[] = { #ifdef CONFIG_PM_SLEEP &dev_attr_wakeup.attr, &dev_attr_wakeup_count.attr, &dev_attr_wakeup_active_count.attr, &dev_attr_wakeup_abort_count.attr, &dev_attr_wakeup_expire_count.attr, &dev_attr_wakeup_active.attr, &dev_attr_wakeup_total_time_ms.attr, &dev_attr_wakeup_max_time_ms.attr, &dev_attr_wakeup_last_time_ms.attr, #ifdef CONFIG_PM_AUTOSLEEP &dev_attr_wakeup_prevent_sleep_time_ms.attr, #endif #endif NULL, }; static const struct attribute_group pm_wakeup_attr_group = { .name = power_group_name, .attrs = wakeup_attrs, }; static struct attribute *runtime_attrs[] = { #ifndef CONFIG_PM_ADVANCED_DEBUG &dev_attr_runtime_status.attr, #endif &dev_attr_control.attr, &dev_attr_runtime_suspended_time.attr, &dev_attr_runtime_active_time.attr, &dev_attr_autosuspend_delay_ms.attr, NULL, }; static const struct attribute_group pm_runtime_attr_group = { .name = power_group_name, .attrs = runtime_attrs, }; static struct attribute *pm_qos_resume_latency_attrs[] = { &dev_attr_pm_qos_resume_latency_us.attr, NULL, }; static const struct attribute_group pm_qos_resume_latency_attr_group = { .name = power_group_name, .attrs = pm_qos_resume_latency_attrs, }; static struct attribute *pm_qos_latency_tolerance_attrs[] = { &dev_attr_pm_qos_latency_tolerance_us.attr, NULL, }; static const struct attribute_group pm_qos_latency_tolerance_attr_group = { .name = power_group_name, .attrs = pm_qos_latency_tolerance_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { &dev_attr_pm_qos_no_power_off.attr, NULL, }; static const struct attribute_group pm_qos_flags_attr_group = { .name = power_group_name, .attrs = pm_qos_flags_attrs, }; int dpm_sysfs_add(struct device *dev) { int rc; /* No need to create PM sysfs if explicitly disabled. */ if (device_pm_not_required(dev)) return 0; rc = sysfs_create_group(&dev->kobj, &pm_attr_group); if (rc) return rc; if (!pm_runtime_has_no_callbacks(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); if (rc) goto err_out; } if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); if (rc) goto err_runtime; } if (dev->power.set_latency_tolerance) { rc = sysfs_merge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); if (rc) goto err_wakeup; } rc = pm_wakeup_source_sysfs_add(dev); if (rc) goto err_latency; return 0; err_latency: sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); err_wakeup: sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); err_runtime: sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; } int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { int rc; if (device_pm_not_required(dev)) return 0; rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid); if (rc) return rc; if (!pm_runtime_has_no_callbacks(dev)) { rc = sysfs_group_change_owner( &dev->kobj, &pm_runtime_attr_group, kuid, kgid); if (rc) return rc; } if (device_can_wakeup(dev)) { rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group, kuid, kgid); if (rc) return rc; rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid); if (rc) return rc; } if (dev->power.set_latency_tolerance) { rc = sysfs_group_change_owner( &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid, kgid); if (rc) return rc; } return 0; } int wakeup_sysfs_add(struct device *dev) { int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); if (!ret) kobject_uevent(&dev->kobj, KOBJ_CHANGE); return ret; } void wakeup_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); kobject_uevent(&dev->kobj, KOBJ_CHANGE); } int pm_qos_sysfs_add_resume_latency(struct device *dev) { return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } void pm_qos_sysfs_remove_resume_latency(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) { return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); } void pm_qos_sysfs_remove_flags(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); } int pm_qos_sysfs_add_latency_tolerance(struct device *dev) { return sysfs_merge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); } void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); } void rpm_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); } void dpm_sysfs_remove(struct device *dev) { if (device_pm_not_required(dev)) return; sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); sysfs_remove_group(&dev->kobj, &pm_attr_group); } |
23 27 1 23 27 26 27 27 460 455 26 922 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_DESC_H #define _ASM_X86_DESC_H #include <asm/desc_defs.h> #include <asm/ldt.h> #include <asm/mmu.h> #include <asm/fixmap.h> #include <asm/irq_vectors.h> #include <asm/cpu_entry_area.h> #include <linux/debug_locks.h> #include <linux/smp.h> #include <linux/percpu.h> static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) { desc->limit0 = info->limit & 0x0ffff; desc->base0 = (info->base_addr & 0x0000ffff); desc->base1 = (info->base_addr & 0x00ff0000) >> 16; desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; /* Set the ACCESS bit so it can be mapped RO */ desc->type |= 1; desc->s = 1; desc->dpl = 0x3; desc->p = info->seg_not_present ^ 1; desc->limit1 = (info->limit & 0xf0000) >> 16; desc->avl = info->useable; desc->d = info->seg_32bit; desc->g = info->limit_in_pages; desc->base2 = (info->base_addr & 0xff000000) >> 24; /* * Don't allow setting of the lm bit. It would confuse * user_64bit_mode and would get overridden by sysret anyway. */ desc->l = 0; } struct gdt_page { struct desc_struct gdt[GDT_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))); DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); DECLARE_INIT_PER_CPU(gdt_page); /* Provide the original GDT */ static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu) { return per_cpu(gdt_page, cpu).gdt; } /* Provide the current original GDT */ static inline struct desc_struct *get_current_gdt_rw(void) { return this_cpu_ptr(&gdt_page)->gdt; } /* Provide the fixmap address of the remapped GDT */ static inline struct desc_struct *get_cpu_gdt_ro(int cpu) { return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt; } /* Provide the current read-only GDT */ static inline struct desc_struct *get_current_gdt_ro(void) { return get_cpu_gdt_ro(smp_processor_id()); } /* Provide the physical address of the GDT page. */ static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu) { return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu)); } static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, unsigned dpl, unsigned ist, unsigned seg) { gate->offset_low = (u16) func; gate->bits.p = 1; gate->bits.dpl = dpl; gate->bits.zero = 0; gate->bits.type = type; gate->offset_middle = (u16) (func >> 16); #ifdef CONFIG_X86_64 gate->segment = __KERNEL_CS; gate->bits.ist = ist; gate->reserved = 0; gate->offset_high = (u32) (func >> 32); #else gate->segment = seg; gate->bits.ist = 0; #endif } static inline int desc_empty(const void *ptr) { const u32 *desc = ptr; return !(desc[0] | desc[1]); } #ifdef CONFIG_PARAVIRT_XXL #include <asm/paravirt.h> #else #define load_TR_desc() native_load_tr_desc() #define load_gdt(dtr) native_load_gdt(dtr) #define load_idt(dtr) native_load_idt(dtr) #define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) native_store_gdt(dtr) #define store_tr(tr) (tr = native_store_tr()) #define load_TLS(t, cpu) native_load_tls(t, cpu) #define set_ldt native_set_ldt #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc) #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type) #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { } static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) { } #endif /* CONFIG_PARAVIRT_XXL */ #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) { memcpy(&idt[entry], gate, sizeof(*gate)); } static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) { memcpy(&ldt[entry], desc, 8); } static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) { unsigned int size; switch (type) { case DESC_TSS: size = sizeof(tss_desc); break; case DESC_LDT: size = sizeof(ldt_desc); break; default: size = sizeof(*gdt); break; } memcpy(&gdt[entry], desc, size); } static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size) { struct ldttss_desc *desc = d; memset(desc, 0, sizeof(*desc)); desc->limit0 = (u16) size; desc->base0 = (u16) addr; desc->base1 = (addr >> 16) & 0xFF; desc->type = type; desc->p = 1; desc->limit1 = (size >> 16) & 0xF; desc->base2 = (addr >> 24) & 0xFF; #ifdef CONFIG_X86_64 desc->base3 = (u32) (addr >> 32); #endif } static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr) { struct desc_struct *d = get_cpu_gdt_rw(cpu); tss_desc tss; set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, __KERNEL_TSS_LIMIT); write_gdt_entry(d, entry, &tss, DESC_TSS); } #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) static inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) asm volatile("lldt %w0"::"q" (0)); else { unsigned cpu = smp_processor_id(); ldt_desc ldt; set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT, entries * LDT_ENTRY_SIZE - 1); write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); } } static inline void native_load_gdt(const struct desc_ptr *dtr) { asm volatile("lgdt %0"::"m" (*dtr)); } static __always_inline void native_load_idt(const struct desc_ptr *dtr) { asm volatile("lidt %0"::"m" (*dtr)); } static inline void native_store_gdt(struct desc_ptr *dtr) { asm volatile("sgdt %0":"=m" (*dtr)); } static inline void store_idt(struct desc_ptr *dtr) { asm volatile("sidt %0":"=m" (*dtr)); } static inline void native_gdt_invalidate(void) { const struct desc_ptr invalid_gdt = { .address = 0, .size = 0 }; native_load_gdt(&invalid_gdt); } static inline void native_idt_invalidate(void) { const struct desc_ptr invalid_idt = { .address = 0, .size = 0 }; native_load_idt(&invalid_idt); } /* * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is * a read-only remapping. To prevent a page fault, the GDT is switched to the * original writeable version when needed. */ #ifdef CONFIG_X86_64 static inline void native_load_tr_desc(void) { struct desc_ptr gdt; int cpu = raw_smp_processor_id(); bool restore = 0; struct desc_struct *fixmap_gdt; native_store_gdt(&gdt); fixmap_gdt = get_cpu_gdt_ro(cpu); /* * If the current GDT is the read-only fixmap, swap to the original * writeable version. Swap back at the end. */ if (gdt.address == (unsigned long)fixmap_gdt) { load_direct_gdt(cpu); restore = 1; } asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); if (restore) load_fixmap_gdt(cpu); } #else static inline void native_load_tr_desc(void) { asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); } #endif static inline unsigned long native_store_tr(void) { unsigned long tr; asm volatile("str %0":"=r" (tr)); return tr; } static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) { struct desc_struct *gdt = get_cpu_gdt_rw(cpu); unsigned int i; for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; } DECLARE_PER_CPU(bool, __tss_limit_invalid); static inline void force_reload_TR(void) { struct desc_struct *d = get_current_gdt_rw(); tss_desc tss; memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc)); /* * LTR requires an available TSS, and the TSS is currently * busy. Make it be available so that LTR will work. */ tss.type = DESC_TSS; write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); load_TR_desc(); this_cpu_write(__tss_limit_invalid, false); } /* * Call this if you need the TSS limit to be correct, which should be the case * if and only if you have TIF_IO_BITMAP set or you're switching to a task * with TIF_IO_BITMAP set. */ static inline void refresh_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); if (unlikely(this_cpu_read(__tss_limit_invalid))) force_reload_TR(); } /* * If you do something evil that corrupts the cached TSS limit (I'm looking * at you, VMX exits), call this function. * * The optimization here is that the TSS limit only matters for Linux if the * IO bitmap is in use. If the TSS limit gets forced to its minimum value, * everything works except that IO bitmap will be ignored and all CPL 3 IO * instructions will #GP, which is exactly what we want for normal tasks. */ static inline void invalidate_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); if (unlikely(test_thread_flag(TIF_IO_BITMAP))) force_reload_TR(); else this_cpu_write(__tss_limit_invalid, true); } /* This intentionally ignores lm, since 32-bit apps don't have that field. */ #define LDT_empty(info) \ ((info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0) /* Lots of programs expect an all-zero user_desc to mean "no segment at all". */ static inline bool LDT_zero(const struct user_desc *info) { return (info->base_addr == 0 && info->limit == 0 && info->contents == 0 && info->read_exec_only == 0 && info->seg_32bit == 0 && info->limit_in_pages == 0 && info->seg_not_present == 0 && info->useable == 0); } static inline void clear_LDT(void) { set_ldt(NULL, 0); } static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); } static inline void set_desc_base(struct desc_struct *desc, unsigned long base) { desc->base0 = base & 0xffff; desc->base1 = (base >> 16) & 0xff; desc->base2 = (base >> 24) & 0xff; } static inline unsigned long get_desc_limit(const struct desc_struct *desc) { return desc->limit0 | (desc->limit1 << 16); } static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) { desc->limit0 = limit & 0xffff; desc->limit1 = (limit >> 16) & 0xf; } static inline void init_idt_data(struct idt_data *data, unsigned int n, const void *addr) { BUG_ON(n > 0xFF); memset(data, 0, sizeof(*data)); data->vector = n; data->addr = addr; data->segment = __KERNEL_CS; data->bits.type = GATE_INTERRUPT; data->bits.p = 1; } static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) { unsigned long addr = (unsigned long) d->addr; gate->offset_low = (u16) addr; gate->segment = (u16) d->segment; gate->bits = d->bits; gate->offset_middle = (u16) (addr >> 16); #ifdef CONFIG_X86_64 gate->offset_high = (u32) (addr >> 32); gate->reserved = 0; #endif } extern unsigned long system_vectors[]; extern void load_current_idt(void); extern void idt_setup_early_handler(void); extern void idt_setup_early_traps(void); extern void idt_setup_traps(void); extern void idt_setup_apic_and_irq_gates(void); extern bool idt_is_f00f_address(unsigned long address); #ifdef CONFIG_X86_64 extern void idt_setup_early_pf(void); #else static inline void idt_setup_early_pf(void) { } #endif extern void idt_invalidate(void); #endif /* _ASM_X86_DESC_H */ |
25 38 38 2 2 2 2 14 14 14 14 14 26 25 26 3 25 25 5 5 5 22 22 1 3 2 1 1 14 22 5 3 1 1 8 4 2 1 4 12 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 | // SPDX-License-Identifier: GPL-2.0-only /* * 6pack.c This module implements the 6pack protocol for kernel-based * devices like TTY. It interfaces between a raw TTY and the * kernel's AX.25 protocol layers. * * Authors: Andreas Könsgen <ajk@comnets.uni-bremen.de> * Ralf Baechle DL5RB <ralf@linux-mips.org> * * Quite a lot of stuff "stolen" by Joerg Reuter from slip.c, written by * * Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/timer.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/semaphore.h> #include <linux/refcount.h> /* sixpack priority commands */ #define SIXP_SEOF 0x40 /* start and end of a 6pack frame */ #define SIXP_TX_URUN 0x48 /* transmit overrun */ #define SIXP_RX_ORUN 0x50 /* receive overrun */ #define SIXP_RX_BUF_OVL 0x58 /* receive buffer overflow */ #define SIXP_CHKSUM 0xFF /* valid checksum of a 6pack frame */ /* masks to get certain bits out of the status bytes sent by the TNC */ #define SIXP_CMD_MASK 0xC0 #define SIXP_CHN_MASK 0x07 #define SIXP_PRIO_CMD_MASK 0x80 #define SIXP_STD_CMD_MASK 0x40 #define SIXP_PRIO_DATA_MASK 0x38 #define SIXP_TX_MASK 0x20 #define SIXP_RX_MASK 0x10 #define SIXP_RX_DCD_MASK 0x18 #define SIXP_LEDS_ON 0x78 #define SIXP_LEDS_OFF 0x60 #define SIXP_CON 0x08 #define SIXP_STA 0x10 #define SIXP_FOUND_TNC 0xe9 #define SIXP_CON_ON 0x68 #define SIXP_DCD_MASK 0x08 #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ #define SIXP_TXDELAY 25 /* 250 ms */ #define SIXP_PERSIST 50 /* in 256ths */ #define SIXP_SLOTTIME 10 /* 100 ms */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ /* 6pack configuration. */ #define SIXP_NRUNIT 31 /* MAX number of 6pack channels */ #define SIXP_MTU 256 /* Default MTU */ enum sixpack_flags { SIXPF_ERROR, /* Parity, etc. error */ }; struct sixpack { /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ /* These are pointers to the malloc()ed frame buffers. */ int rcount; /* received chars counter */ unsigned char *xbuff; /* transmitter buffer */ unsigned char *xhead; /* next byte to XMIT */ int xleft; /* bytes left in XMIT queue */ u8 raw_buf[4]; u8 cooked_buf[400]; unsigned int rx_count; unsigned int rx_count_cooked; spinlock_t rxlock; unsigned long flags; /* Flag values/ mode etc */ unsigned char mode; /* 6pack mode */ /* 6pack stuff */ unsigned char tx_delay; unsigned char persistence; unsigned char slottime; unsigned char duplex; unsigned char led_state; u8 status; u8 status1; unsigned char status2; unsigned char tx_enable; unsigned char tnc_state; struct timer_list tx_t; struct timer_list resync_t; refcount_t refcnt; struct completion dead; spinlock_t lock; }; #define AX25_6PACK_HEADER_LEN 0 static void sixpack_decode(struct sixpack *, const u8 *, size_t); static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char); /* * Perform the persistence/slottime algorithm for CSMA access. If the * persistence check was successful, write the data to the serial driver. * Note that in case of DAMA operation, the data is not sent here. */ static void sp_xmit_on_air(struct timer_list *t) { struct sixpack *sp = from_timer(sp, t, tx_t); int actual, when = sp->slottime; static unsigned char random; random = random * 17 + 41; if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->status2 = 0; } else mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); } /* ----> 6pack timer interrupt handler and friends. <---- */ /* Encapsulate one AX.25 frame and stuff into a TTY queue. */ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) { unsigned char *msg, *p = icp; int actual, count; if (len > AX25_MTU + 73) { msg = "oversized transmit packet!"; goto out_drop; } if (p[0] > 5) { msg = "invalid KISS command"; goto out_drop; } if ((p[0] != 0) && (len > 2)) { msg = "KISS control packet too long"; goto out_drop; } if ((p[0] == 0) && (len < 15)) { msg = "bad AX.25 packet to transmit"; goto out_drop; } count = encode_sixpack(p, sp->xbuff, len, sp->tx_delay); set_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); switch (p[0]) { case 1: sp->tx_delay = p[1]; return; case 2: sp->persistence = p[1]; return; case 3: sp->slottime = p[1]; return; case 4: /* ignored */ return; case 5: sp->duplex = p[1]; return; } if (p[0] != 0) return; /* * In case of fullduplex or DAMA operation, we don't take care about the * state of the DCD or of any timers, as the determination of the * correct time to send is the job of the AX.25 layer. We send * immediately after data has arrived. */ if (sp->duplex == 1) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, count); sp->xleft = count - actual; sp->xhead = sp->xbuff + actual; sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } else { sp->xleft = count; sp->xhead = sp->xbuff; sp->status2 = count; sp_xmit_on_air(&sp->tx_t); } return; out_drop: sp->dev->stats.tx_dropped++; netif_start_queue(sp->dev); if (net_ratelimit()) printk(KERN_DEBUG "%s: %s - dropped.\n", sp->dev->name, msg); } /* Encapsulate an IP datagram and kick it into a TTY queue. */ static netdev_tx_t sp_xmit(struct sk_buff *skb, struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); if (skb->protocol == htons(ETH_P_IP)) return ax25_ip_xmit(skb); spin_lock_bh(&sp->lock); /* We were not busy, so we are now... :-) */ netif_stop_queue(dev); dev->stats.tx_bytes += skb->len; sp_encaps(sp, skb->data, skb->len); spin_unlock_bh(&sp->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } static int sp_open_dev(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); if (sp->tty == NULL) return -ENODEV; return 0; } /* Close the low-level part of the 6pack channel. */ static int sp_close(struct net_device *dev) { struct sixpack *sp = netdev_priv(dev); spin_lock_bh(&sp->lock); if (sp->tty) { /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sp->tty->flags); } netif_stop_queue(dev); spin_unlock_bh(&sp->lock); return 0; } static int sp_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr_ax25 *sa = addr; netif_tx_lock_bh(dev); netif_addr_lock(dev); __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN); netif_addr_unlock(dev); netif_tx_unlock_bh(dev); return 0; } static const struct net_device_ops sp_netdev_ops = { .ndo_open = sp_open_dev, .ndo_stop = sp_close, .ndo_start_xmit = sp_xmit, .ndo_set_mac_address = sp_set_mac_address, }; static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; /* Only activated in AX.25 mode */ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); dev_addr_set(dev, (u8 *)&ax25_defaddr); dev->flags = 0; } /* Send one completely decapsulated IP datagram to the IP layer. */ /* * This is the routine that sends the received data to the kernel AX.25. * 'cmd' is the KISS command. For AX.25 data, it is zero. */ static void sp_bump(struct sixpack *sp, char cmd) { struct sk_buff *skb; int count; u8 *ptr; count = sp->rcount + 1; sp->dev->stats.rx_bytes += count; if ((skb = dev_alloc_skb(count + 1)) == NULL) goto out_mem; ptr = skb_put(skb, count + 1); *ptr++ = cmd; /* KISS command */ memcpy(ptr, sp->cooked_buf + 1, count); skb->protocol = ax25_type_trans(skb, sp->dev); netif_rx(skb); sp->dev->stats.rx_packets++; return; out_mem: sp->dev->stats.rx_dropped++; } /* ----------------------------------------------------------------------- */ /* * We have a potential race on dereferencing tty->disc_data, because the tty * layer provides no locking at all - thus one cpu could be running * sixpack_receive_buf while another calls sixpack_close, which zeroes * tty->disc_data and frees the memory that sixpack_receive_buf is using. The * best way to fix this is to use a rwlock in the tty struct, but for now we * use a single global rwlock for all ttys in ppp line discipline. */ static DEFINE_RWLOCK(disc_data_lock); static struct sixpack *sp_get(struct tty_struct *tty) { struct sixpack *sp; read_lock(&disc_data_lock); sp = tty->disc_data; if (sp) refcount_inc(&sp->refcnt); read_unlock(&disc_data_lock); return sp; } static void sp_put(struct sixpack *sp) { if (refcount_dec_and_test(&sp->refcnt)) complete(&sp->dead); } /* * Called by the TTY driver when there's room for more data. If we have * more packets to send, we send them here. */ static void sixpack_write_wakeup(struct tty_struct *tty) { struct sixpack *sp = sp_get(tty); int actual; if (!sp) return; if (sp->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sp->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); sp->tx_enable = 0; netif_wake_queue(sp->dev); goto out; } if (sp->tx_enable) { actual = tty->ops->write(tty, sp->xhead, sp->xleft); sp->xleft -= actual; sp->xhead += actual; } out: sp_put(sp); } /* ----------------------------------------------------------------------- */ /* * Handle the 'receiver data ready' interrupt. * This function is called by the tty module in the kernel when * a block of 6pack data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. */ static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct sixpack *sp; size_t count1; if (!count) return; sp = sp_get(tty); if (!sp) return; /* Read the characters out of the buffer */ count1 = count; while (count) { count--; if (fp && *fp++) { if (!test_and_set_bit(SIXPF_ERROR, &sp->flags)) sp->dev->stats.rx_errors++; continue; } } sixpack_decode(sp, cp, count1); sp_put(sp); tty_unthrottle(tty); } /* * Try to resync the TNC. Called by the resync timer defined in * decode_prio_command */ #define TNC_UNINITIALIZED 0 #define TNC_UNSYNC_STARTUP 1 #define TNC_UNSYNCED 2 #define TNC_IN_SYNC 3 static void __tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { char *msg; switch (new_tnc_state) { default: /* gcc oh piece-o-crap ... */ case TNC_UNSYNC_STARTUP: msg = "Synchronizing with TNC"; break; case TNC_UNSYNCED: msg = "Lost synchronization with TNC\n"; break; case TNC_IN_SYNC: msg = "Found TNC"; break; } sp->tnc_state = new_tnc_state; printk(KERN_INFO "%s: %s\n", sp->dev->name, msg); } static inline void tnc_set_sync_state(struct sixpack *sp, int new_tnc_state) { int old_tnc_state = sp->tnc_state; if (old_tnc_state != new_tnc_state) __tnc_set_sync_state(sp, new_tnc_state); } static void resync_tnc(struct timer_list *t) { struct sixpack *sp = from_timer(sp, t, resync_t); static char resync_cmd = 0xe8; /* clear any data that might have been received */ sp->rx_count = 0; sp->rx_count_cooked = 0; /* reset state machine */ sp->status = 1; sp->status1 = 1; sp->status2 = 0; /* resync the TNC */ sp->led_state = 0x60; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tty->ops->write(sp->tty, &resync_cmd, 1); /* Start resync timer again -- the TNC might be still absent */ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); } static inline int tnc_init(struct sixpack *sp) { unsigned char inbyte = 0xe8; tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); sp->tty->ops->write(sp->tty, &inbyte, 1); mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT); return 0; } /* * Open the high-level part of the 6pack channel. * This function is called by the TTY module when the * 6pack line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free 6pcack channel... */ static int sixpack_open(struct tty_struct *tty) { char *xbuff = NULL; struct net_device *dev; struct sixpack *sp; unsigned long len; int err = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; dev = alloc_netdev(sizeof(struct sixpack), "sp%d", NET_NAME_UNKNOWN, sp_setup); if (!dev) { err = -ENOMEM; goto out; } sp = netdev_priv(dev); sp->dev = dev; spin_lock_init(&sp->lock); spin_lock_init(&sp->rxlock); refcount_set(&sp->refcnt, 1); init_completion(&sp->dead); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ len = dev->mtu * 2; xbuff = kmalloc(len + 4, GFP_KERNEL); if (xbuff == NULL) { err = -ENOBUFS; goto out_free; } spin_lock_bh(&sp->lock); sp->tty = tty; sp->xbuff = xbuff; sp->rcount = 0; sp->rx_count = 0; sp->rx_count_cooked = 0; sp->xleft = 0; sp->flags = 0; /* Clear ESCAPE & ERROR flags */ sp->duplex = 0; sp->tx_delay = SIXP_TXDELAY; sp->persistence = SIXP_PERSIST; sp->slottime = SIXP_SLOTTIME; sp->led_state = 0x60; sp->status = 1; sp->status1 = 1; sp->status2 = 0; sp->tx_enable = 0; netif_start_queue(dev); timer_setup(&sp->tx_t, sp_xmit_on_air, 0); timer_setup(&sp->resync_t, resync_tnc, 0); spin_unlock_bh(&sp->lock); /* Done. We have linked the TTY line to a channel. */ tty->disc_data = sp; tty->receive_room = 65536; /* Now we're ready to register. */ err = register_netdev(dev); if (err) goto out_free; tnc_init(sp); return 0; out_free: kfree(xbuff); free_netdev(dev); out: return err; } /* * Close down a 6pack channel. * This means flushing out any pending queues, and then restoring the * TTY line discipline to what it was before it got hooked to 6pack * (which usually is TTY again). */ static void sixpack_close(struct tty_struct *tty) { struct sixpack *sp; write_lock_irq(&disc_data_lock); sp = tty->disc_data; tty->disc_data = NULL; write_unlock_irq(&disc_data_lock); if (!sp) return; /* * We have now ensured that nobody can start using ap from now on, but * we have to wait for all existing users to finish. */ if (!refcount_dec_and_test(&sp->refcnt)) wait_for_completion(&sp->dead); /* We must stop the queue to avoid potentially scribbling * on the free buffers. The sp->dead completion is not sufficient * to protect us from sp->xbuff access. */ netif_stop_queue(sp->dev); unregister_netdev(sp->dev); del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); /* Free all 6pack frame buffers after unreg. */ kfree(sp->xbuff); free_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct sixpack *sp = sp_get(tty); struct net_device *dev; unsigned int tmp, err; if (!sp) return -ENXIO; dev = sp->dev; switch(cmd) { case SIOCGIFNAME: err = copy_to_user((void __user *) arg, dev->name, strlen(dev->name) + 1) ? -EFAULT : 0; break; case SIOCGIFENCAP: err = put_user(0, (int __user *) arg); break; case SIOCSIFENCAP: if (get_user(tmp, (int __user *) arg)) { err = -EFAULT; break; } sp->mode = tmp; dev->addr_len = AX25_ADDR_LEN; dev->hard_header_len = AX25_KISS_HEADER_LEN + AX25_MAX_HEADER_LEN + 3; dev->type = ARPHRD_AX25; err = 0; break; case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; if (copy_from_user(&addr, (void __user *)arg, AX25_ADDR_LEN)) { err = -EFAULT; break; } netif_tx_lock_bh(dev); __dev_addr_set(dev, &addr, AX25_ADDR_LEN); netif_tx_unlock_bh(dev); err = 0; break; } default: err = tty_mode_ioctl(tty, cmd, arg); } sp_put(sp); return err; } static struct tty_ldisc_ops sp_ldisc = { .owner = THIS_MODULE, .num = N_6PACK, .name = "6pack", .open = sixpack_open, .close = sixpack_close, .ioctl = sixpack_ioctl, .receive_buf = sixpack_receive_buf, .write_wakeup = sixpack_write_wakeup, }; /* Initialize 6pack control device -- register 6pack line discipline */ static int __init sixpack_init_driver(void) { int status; /* Register the provided line protocol discipline */ status = tty_register_ldisc(&sp_ldisc); if (status) pr_err("6pack: can't register line discipline (err = %d)\n", status); return status; } static void __exit sixpack_exit_driver(void) { tty_unregister_ldisc(&sp_ldisc); } /* encode an AX.25 packet into 6pack */ static int encode_sixpack(unsigned char *tx_buf, unsigned char *tx_buf_raw, int length, unsigned char tx_delay) { int count = 0; unsigned char checksum = 0, buf[400]; int raw_count = 0; tx_buf_raw[raw_count++] = SIXP_PRIO_CMD_MASK | SIXP_TX_MASK; tx_buf_raw[raw_count++] = SIXP_SEOF; buf[0] = tx_delay; for (count = 1; count < length; count++) buf[count] = tx_buf[count]; for (count = 0; count < length; count++) checksum += buf[count]; buf[length] = (unsigned char) 0xff - checksum; for (count = 0; count <= length; count++) { if ((count % 3) == 0) { tx_buf_raw[raw_count++] = (buf[count] & 0x3f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x30); } else if ((count % 3) == 1) { tx_buf_raw[raw_count++] |= (buf[count] & 0x0f); tx_buf_raw[raw_count] = ((buf[count] >> 2) & 0x3c); } else { tx_buf_raw[raw_count++] |= (buf[count] & 0x03); tx_buf_raw[raw_count++] = (buf[count] >> 2); } } if ((length % 3) != 2) raw_count++; tx_buf_raw[raw_count++] = SIXP_SEOF; return raw_count; } /* decode 4 sixpack-encoded bytes into 3 data bytes */ static void decode_data(struct sixpack *sp, u8 inbyte) { u8 *buf; if (sp->rx_count != 3) { sp->raw_buf[sp->rx_count++] = inbyte; return; } if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { pr_err("6pack: cooked buffer overrun, data loss\n"); sp->rx_count = 0; return; } buf = sp->raw_buf; sp->cooked_buf[sp->rx_count_cooked++] = buf[0] | ((buf[1] << 2) & 0xc0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[1] & 0x0f) | ((buf[2] << 2) & 0xf0); sp->cooked_buf[sp->rx_count_cooked++] = (buf[2] & 0x03) | (inbyte << 2); sp->rx_count = 0; } /* identify and execute a 6pack priority command byte */ static void decode_prio_command(struct sixpack *sp, u8 cmd) { ssize_t actual; if ((cmd & SIXP_PRIO_DATA_MASK) != 0) { /* idle ? */ /* RX and DCD flags can only be set in the same prio command, if the DCD flag has been set without the RX flag in the previous prio command. If DCD has not been set before, something in the transmission has gone wrong. In this case, RX and DCD are cleared in order to prevent the decode_data routine from reading further data that might be corrupt. */ if (((sp->status & SIXP_DCD_MASK) == 0) && ((cmd & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK)) { if (sp->status != 1) printk(KERN_DEBUG "6pack: protocol violation\n"); else sp->status = 0; cmd &= ~SIXP_RX_DCD_MASK; } sp->status = cmd & SIXP_PRIO_DATA_MASK; } else { /* output watchdog char if idle */ if ((sp->status2 != 0) && (sp->duplex == 1)) { sp->led_state = 0x70; sp->tty->ops->write(sp->tty, &sp->led_state, 1); sp->tx_enable = 1; actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); sp->xleft -= actual; sp->xhead += actual; sp->led_state = 0x60; sp->status2 = 0; } } /* needed to trigger the TNC watchdog */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); /* if the state byte has been received, the TNC is present, so the resync timer can be reset. */ if (sp->tnc_state == TNC_IN_SYNC) mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT); sp->status1 = cmd & SIXP_PRIO_DATA_MASK; } /* identify and execute a standard 6pack command byte */ static void decode_std_command(struct sixpack *sp, u8 cmd) { u8 checksum = 0, rest = 0; short i; switch (cmd & SIXP_CMD_MASK) { /* normal command */ case SIXP_SEOF: if ((sp->rx_count == 0) && (sp->rx_count_cooked == 0)) { if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { sp->led_state = 0x68; sp->tty->ops->write(sp->tty, &sp->led_state, 1); } } else { sp->led_state = 0x60; /* fill trailing bytes with zeroes */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); spin_lock_bh(&sp->rxlock); rest = sp->rx_count; if (rest != 0) for (i = rest; i <= 3; i++) decode_data(sp, 0); if (rest == 2) sp->rx_count_cooked -= 2; else if (rest == 3) sp->rx_count_cooked -= 1; for (i = 0; i < sp->rx_count_cooked; i++) checksum += sp->cooked_buf[i]; if (checksum != SIXP_CHKSUM) { printk(KERN_DEBUG "6pack: bad checksum %2.2x\n", checksum); } else { sp->rcount = sp->rx_count_cooked-2; sp_bump(sp, 0); } sp->rx_count_cooked = 0; spin_unlock_bh(&sp->rxlock); } break; case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n"); break; case SIXP_RX_ORUN: printk(KERN_DEBUG "6pack: RX overrun\n"); break; case SIXP_RX_BUF_OVL: printk(KERN_DEBUG "6pack: RX buffer overflow\n"); } } /* decode a 6pack packet */ static void sixpack_decode(struct sixpack *sp, const u8 *pre_rbuff, size_t count) { size_t count1; u8 inbyte; for (count1 = 0; count1 < count; count1++) { inbyte = pre_rbuff[count1]; if (inbyte == SIXP_FOUND_TNC) { tnc_set_sync_state(sp, TNC_IN_SYNC); del_timer(&sp->resync_t); } if ((inbyte & SIXP_PRIO_CMD_MASK) != 0) decode_prio_command(sp, inbyte); else if ((inbyte & SIXP_STD_CMD_MASK) != 0) decode_std_command(sp, inbyte); else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { spin_lock_bh(&sp->rxlock); decode_data(sp, inbyte); spin_unlock_bh(&sp->rxlock); } } } MODULE_AUTHOR("Ralf Baechle DO1GRB <ralf@linux-mips.org>"); MODULE_DESCRIPTION("6pack driver for AX.25"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_6PACK); module_init(sixpack_init_driver); module_exit(sixpack_exit_driver); |
4 4 4 2 2 4 2 2 4 4 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 | // SPDX-License-Identifier: GPL-2.0+ /* * Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC * * Current development and maintenance: * (C) 2001-2002 Björn Stenberg (bjorn@haxx.se) * * Developed with the assistance of: * (C) 2002 Alan Stern <stern@rowland.org> * * Initial work: * (C) 2000 In-System Design, Inc. (support@in-system.com) * * The ISD200 ASIC does not natively support ATA devices. The chip * does implement an interface, the ATA Command Block (ATACB) which provides * a means of passing ATA commands and ATA register accesses to a device. * * History: * * 2002-10-19: Removed the specialized transfer routines. * (Alan Stern <stern@rowland.harvard.edu>) * 2001-02-24: Removed lots of duplicate code and simplified the structure. * (bjorn@haxx.se) * 2002-01-16: Fixed endianness bug so it works on the ppc arch. * (Luc Saillard <luc@saillard.org>) * 2002-01-17: All bitfields removed. * (bjorn@haxx.se) */ /* Include files */ #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/scatterlist.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-isd200" MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC"); MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); static int isd200_Initialization(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id isd200_usb_ids[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, isd200_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev isd200_unusual_dev_list[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* Timeout defines (in Seconds) */ #define ISD200_ENUM_BSY_TIMEOUT 35 #define ISD200_ENUM_DETECT_TIMEOUT 30 #define ISD200_DEFAULT_TIMEOUT 30 /* device flags */ #define DF_ATA_DEVICE 0x0001 #define DF_MEDIA_STATUS_ENABLED 0x0002 #define DF_REMOVABLE_MEDIA 0x0004 /* capability bit definitions */ #define CAPABILITY_DMA 0x01 #define CAPABILITY_LBA 0x02 /* command_setX bit definitions */ #define COMMANDSET_REMOVABLE 0x02 #define COMMANDSET_MEDIA_STATUS 0x10 /* ATA Vendor Specific defines */ #define ATA_ADDRESS_DEVHEAD_STD 0xa0 #define ATA_ADDRESS_DEVHEAD_LBA_MODE 0x40 #define ATA_ADDRESS_DEVHEAD_SLAVE 0x10 /* Action Select bits */ #define ACTION_SELECT_0 0x01 #define ACTION_SELECT_1 0x02 #define ACTION_SELECT_2 0x04 #define ACTION_SELECT_3 0x08 #define ACTION_SELECT_4 0x10 #define ACTION_SELECT_5 0x20 #define ACTION_SELECT_6 0x40 #define ACTION_SELECT_7 0x80 /* Register Select bits */ #define REG_ALTERNATE_STATUS 0x01 #define REG_DEVICE_CONTROL 0x01 #define REG_ERROR 0x02 #define REG_FEATURES 0x02 #define REG_SECTOR_COUNT 0x04 #define REG_SECTOR_NUMBER 0x08 #define REG_CYLINDER_LOW 0x10 #define REG_CYLINDER_HIGH 0x20 #define REG_DEVICE_HEAD 0x40 #define REG_STATUS 0x80 #define REG_COMMAND 0x80 /* ATA registers offset definitions */ #define ATA_REG_ERROR_OFFSET 1 #define ATA_REG_LCYL_OFFSET 4 #define ATA_REG_HCYL_OFFSET 5 #define ATA_REG_STATUS_OFFSET 7 /* ATA error definitions not in <linux/hdreg.h> */ #define ATA_ERROR_MEDIA_CHANGE 0x20 /* ATA command definitions not in <linux/hdreg.h> */ #define ATA_COMMAND_GET_MEDIA_STATUS 0xDA #define ATA_COMMAND_MEDIA_EJECT 0xED /* ATA drive control definitions */ #define ATA_DC_DISABLE_INTERRUPTS 0x02 #define ATA_DC_RESET_CONTROLLER 0x04 #define ATA_DC_REENABLE_CONTROLLER 0x00 /* * General purpose return codes */ #define ISD200_ERROR -1 #define ISD200_GOOD 0 /* * Transport return codes */ #define ISD200_TRANSPORT_GOOD 0 /* Transport good, command good */ #define ISD200_TRANSPORT_FAILED 1 /* Transport good, command failed */ #define ISD200_TRANSPORT_ERROR 2 /* Transport bad (i.e. device dead) */ /* driver action codes */ #define ACTION_READ_STATUS 0 #define ACTION_RESET 1 #define ACTION_REENABLE 2 #define ACTION_SOFT_RESET 3 #define ACTION_ENUM 4 #define ACTION_IDENTIFY 5 /* * ata_cdb struct */ union ata_cdb { struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char WriteData3F6; unsigned char WriteData1F1; unsigned char WriteData1F2; unsigned char WriteData1F3; unsigned char WriteData1F4; unsigned char WriteData1F5; unsigned char WriteData1F6; unsigned char WriteData1F7; unsigned char Reserved[3]; } generic; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char AlternateStatusByte; unsigned char ErrorByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char StatusByte; unsigned char Reserved[3]; } read; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char DeviceControlByte; unsigned char FeaturesByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char CommandByte; unsigned char Reserved[3]; } write; }; /* * Inquiry data structure. This is the data returned from the target * after it receives an inquiry. * * This structure may be extended by the number of bytes specified * in the field AdditionalLength. The defined size constant only * includes fields through ProductRevisionLevel. */ /* * DeviceType field */ #define DIRECT_ACCESS_DEVICE 0x00 /* disks */ #define DEVICE_REMOVABLE 0x80 struct inquiry_data { unsigned char DeviceType; unsigned char DeviceTypeModifier; unsigned char Versions; unsigned char Format; unsigned char AdditionalLength; unsigned char Reserved[2]; unsigned char Capability; unsigned char VendorId[8]; unsigned char ProductId[16]; unsigned char ProductRevisionLevel[4]; unsigned char VendorSpecific[20]; unsigned char Reserved3[40]; } __attribute__ ((packed)); /* * INQUIRY data buffer size */ #define INQUIRYDATABUFFERSIZE 36 /* * ISD200 CONFIG data struct */ #define ATACFG_TIMING 0x0f #define ATACFG_ATAPI_RESET 0x10 #define ATACFG_MASTER 0x20 #define ATACFG_BLOCKSIZE 0xa0 #define ATACFGE_LAST_LUN 0x07 #define ATACFGE_DESC_OVERRIDE 0x08 #define ATACFGE_STATE_SUSPEND 0x10 #define ATACFGE_SKIP_BOOT 0x20 #define ATACFGE_CONF_DESC2 0x40 #define ATACFGE_INIT_STATUS 0x80 #define CFG_CAPABILITY_SRST 0x01 struct isd200_config { unsigned char EventNotification; unsigned char ExternalClock; unsigned char ATAInitTimeout; unsigned char ATAConfig; unsigned char ATAMajorCommand; unsigned char ATAMinorCommand; unsigned char ATAExtraConfig; unsigned char Capability; }__attribute__ ((packed)); /* * ISD200 driver information struct */ struct isd200_info { struct inquiry_data InquiryData; u16 *id; struct isd200_config ConfigData; unsigned char *RegsBuf; unsigned char ATARegs[8]; unsigned char DeviceHead; unsigned char DeviceFlags; /* maximum number of LUNs supported */ unsigned char MaxLUNs; unsigned char cmnd[MAX_COMMAND_SIZE]; struct scsi_cmnd srb; struct scatterlist sg; }; /* * Read Capacity Data - returned in Big Endian format */ struct read_capacity_data { __be32 LogicalBlockAddress; __be32 BytesPerBlock; }; /* * Read Block Limits Data - returned in Big Endian format * This structure returns the maximum and minimum block * size for a TAPE device. */ struct read_block_limits { unsigned char Reserved; unsigned char BlockMaximumSize[3]; unsigned char BlockMinimumSize[2]; }; /* * Sense Data Format */ #define SENSE_ERRCODE 0x7f #define SENSE_ERRCODE_VALID 0x80 #define SENSE_FLAG_SENSE_KEY 0x0f #define SENSE_FLAG_BAD_LENGTH 0x20 #define SENSE_FLAG_END_OF_MEDIA 0x40 #define SENSE_FLAG_FILE_MARK 0x80 struct sense_data { unsigned char ErrorCode; unsigned char SegmentNumber; unsigned char Flags; unsigned char Information[4]; unsigned char AdditionalSenseLength; unsigned char CommandSpecificInformation[4]; unsigned char AdditionalSenseCode; unsigned char AdditionalSenseCodeQualifier; unsigned char FieldReplaceableUnitCode; unsigned char SenseKeySpecific[3]; } __attribute__ ((packed)); /* * Default request sense buffer size */ #define SENSE_BUFFER_SIZE 18 /*********************************************************************** * Helper routines ***********************************************************************/ /************************************************************************** * isd200_build_sense * * Builds an artificial sense buffer to report the results of a * failed command. * * RETURNS: * void */ static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb) { struct isd200_info *info = (struct isd200_info *)us->extra; struct sense_data *buf = (struct sense_data *) &srb->sense_buffer[0]; unsigned char error = info->ATARegs[ATA_REG_ERROR_OFFSET]; if(error & ATA_ERROR_MEDIA_CHANGE) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_MCR) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_TRK0NF) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = NOT_READY; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_UNC) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = DATA_PROTECT; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else { buf->ErrorCode = 0; buf->AdditionalSenseLength = 0; buf->Flags = 0; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } } /*********************************************************************** * Transport routines ***********************************************************************/ /************************************************************************** * isd200_set_srb(), isd200_srb_set_bufflen() * * Two helpers to facilitate in initialization of scsi_cmnd structure * Will need to change when struct scsi_cmnd changes */ static void isd200_set_srb(struct isd200_info *info, enum dma_data_direction dir, void* buff, unsigned bufflen) { struct scsi_cmnd *srb = &info->srb; if (buff) sg_init_one(&info->sg, buff, bufflen); srb->sc_data_direction = dir; srb->sdb.table.sgl = buff ? &info->sg : NULL; srb->sdb.length = bufflen; srb->sdb.table.nents = buff ? 1 : 0; } static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen) { srb->sdb.length = bufflen; } /************************************************************************** * isd200_action * * Routine for sending commands to the isd200 * * RETURNS: * ISD status code */ static int isd200_action( struct us_data *us, int action, void* pointer, int value ) { union ata_cdb ata; /* static to prevent this large struct being placed on the valuable stack */ static struct scsi_device srb_dev; struct isd200_info *info = (struct isd200_info *)us->extra; struct scsi_cmnd *srb = &info->srb; int status; memset(&ata, 0, sizeof(ata)); memcpy(srb->cmnd, info->cmnd, MAX_COMMAND_SIZE); srb->device = &srb_dev; ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ata.generic.TransferBlockSize = 1; switch ( action ) { case ACTION_READ_STATUS: usb_stor_dbg(us, " isd200_action(READ_STATUS)\n"); ata.generic.ActionSelect = ACTION_SELECT_0|ACTION_SELECT_2; ata.generic.RegisterSelect = REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_STATUS | REG_ERROR; isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value); break; case ACTION_ENUM: usb_stor_dbg(us, " isd200_action(ENUM,0x%02x)\n", value); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4| ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD; ata.write.DeviceHeadByte = value; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_RESET: usb_stor_dbg(us, " isd200_action(RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_REENABLE: usb_stor_dbg(us, " isd200_action(REENABLE)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_SOFT_RESET: usb_stor_dbg(us, " isd200_action(SOFT_RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND; ata.write.DeviceHeadByte = info->DeviceHead; ata.write.CommandByte = ATA_CMD_DEV_RESET; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_IDENTIFY: usb_stor_dbg(us, " isd200_action(IDENTIFY)\n"); ata.generic.RegisterSelect = REG_COMMAND; ata.write.CommandByte = ATA_CMD_ID_ATA; isd200_set_srb(info, DMA_FROM_DEVICE, info->id, ATA_ID_WORDS * 2); break; default: usb_stor_dbg(us, "Error: Undefined action %d\n", action); return ISD200_ERROR; } memcpy(srb->cmnd, &ata, sizeof(ata.generic)); srb->cmd_len = sizeof(ata.generic); status = usb_stor_Bulk_transport(srb, us); if (status == USB_STOR_TRANSPORT_GOOD) status = ISD200_GOOD; else { usb_stor_dbg(us, " isd200_action(0x%02x) error: %d\n", action, status); status = ISD200_ERROR; /* need to reset device here */ } return status; } /************************************************************************** * isd200_read_regs * * Read ATA Registers * * RETURNS: * ISD status code */ static int isd200_read_regs( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_IssueATAReadRegs\n"); transferStatus = isd200_action( us, ACTION_READ_STATUS, info->RegsBuf, sizeof(info->ATARegs) ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error reading ATA registers\n"); retStatus = ISD200_ERROR; } else { memcpy(info->ATARegs, info->RegsBuf, sizeof(info->ATARegs)); usb_stor_dbg(us, " Got ATA Register[ATA_REG_ERROR_OFFSET] = 0x%x\n", info->ATARegs[ATA_REG_ERROR_OFFSET]); } return retStatus; } /************************************************************************** * Invoke the transport and basic error-handling/recovery methods * * This is used by the protocol layers to actually send the message to * the device and receive the response. */ static void isd200_invoke_transport( struct us_data *us, struct scsi_cmnd *srb, union ata_cdb *ataCdb ) { int need_auto_sense = 0; int transferStatus; int result; /* send the command to the transport layer */ memcpy(srb->cmnd, ataCdb, sizeof(ataCdb->generic)); srb->cmd_len = sizeof(ataCdb->generic); transferStatus = usb_stor_Bulk_transport(srb, us); /* * if the command gets aborted by the higher layers, we need to * short-circuit all other processing */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- command was aborted\n"); goto Handle_Abort; } switch (transferStatus) { case USB_STOR_TRANSPORT_GOOD: /* Indicate a good result */ srb->result = SAM_STAT_GOOD; break; case USB_STOR_TRANSPORT_NO_SENSE: usb_stor_dbg(us, "-- transport indicates protocol failure\n"); srb->result = SAM_STAT_CHECK_CONDITION; return; case USB_STOR_TRANSPORT_FAILED: usb_stor_dbg(us, "-- transport indicates command failure\n"); need_auto_sense = 1; break; case USB_STOR_TRANSPORT_ERROR: usb_stor_dbg(us, "-- transport indicates transport error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; default: usb_stor_dbg(us, "-- transport indicates unknown error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; } if ((scsi_get_resid(srb) > 0) && !((srb->cmnd[0] == REQUEST_SENSE) || (srb->cmnd[0] == INQUIRY) || (srb->cmnd[0] == MODE_SENSE) || (srb->cmnd[0] == LOG_SENSE) || (srb->cmnd[0] == MODE_SENSE_10))) { usb_stor_dbg(us, "-- unexpectedly short transfer\n"); need_auto_sense = 1; } if (need_auto_sense) { result = isd200_read_regs(us); if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- auto-sense aborted\n"); goto Handle_Abort; } if (result == ISD200_GOOD) { isd200_build_sense(us, srb); srb->result = SAM_STAT_CHECK_CONDITION; /* If things are really okay, then let's show that */ if ((srb->sense_buffer[2] & 0xf) == 0x0) srb->result = SAM_STAT_GOOD; } else { srb->result = DID_ERROR << 16; /* Need reset here */ } } /* * Regardless of auto-sense, if we _know_ we have an error * condition, show that in the result code */ if (transferStatus == USB_STOR_TRANSPORT_FAILED) srb->result = SAM_STAT_CHECK_CONDITION; return; /* * abort processing: the bulk-only transport requires a reset * following an abort */ Handle_Abort: srb->result = DID_ABORT << 16; /* permit the reset transfer to take place */ clear_bit(US_FLIDX_ABORTING, &us->dflags); /* Need reset here */ } #ifdef CONFIG_USB_STORAGE_DEBUG static void isd200_log_config(struct us_data *us, struct isd200_info *info) { usb_stor_dbg(us, " Event Notification: 0x%x\n", info->ConfigData.EventNotification); usb_stor_dbg(us, " External Clock: 0x%x\n", info->ConfigData.ExternalClock); usb_stor_dbg(us, " ATA Init Timeout: 0x%x\n", info->ConfigData.ATAInitTimeout); usb_stor_dbg(us, " ATAPI Command Block Size: 0x%x\n", (info->ConfigData.ATAConfig & ATACFG_BLOCKSIZE) >> 6); usb_stor_dbg(us, " Master/Slave Selection: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_MASTER); usb_stor_dbg(us, " ATAPI Reset: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_ATAPI_RESET); usb_stor_dbg(us, " ATA Timing: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_TIMING); usb_stor_dbg(us, " ATA Major Command: 0x%x\n", info->ConfigData.ATAMajorCommand); usb_stor_dbg(us, " ATA Minor Command: 0x%x\n", info->ConfigData.ATAMinorCommand); usb_stor_dbg(us, " Init Status: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_INIT_STATUS); usb_stor_dbg(us, " Config Descriptor 2: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_CONF_DESC2); usb_stor_dbg(us, " Skip Device Boot: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_SKIP_BOOT); usb_stor_dbg(us, " ATA 3 State Suspend: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_STATE_SUSPEND); usb_stor_dbg(us, " Descriptor Override: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_DESC_OVERRIDE); usb_stor_dbg(us, " Last LUN Identifier: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_LAST_LUN); usb_stor_dbg(us, " SRST Enable: 0x%x\n", info->ConfigData.ATAExtraConfig & CFG_CAPABILITY_SRST); } #endif /************************************************************************** * isd200_write_config * * Write the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_write_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; #ifdef CONFIG_USB_STORAGE_DEBUG usb_stor_dbg(us, "Entering isd200_write_config\n"); usb_stor_dbg(us, " Writing the following ISD200 Config Data:\n"); isd200_log_config(us, info); #endif /* let's send the command via the control pipe */ result = usb_stor_ctrl_transfer( us, us->send_ctrl_pipe, 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " ISD200 Config Data was written successfully\n"); } else { usb_stor_dbg(us, " Request to write ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_write_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_read_config * * Reads the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_read_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; usb_stor_dbg(us, "Entering isd200_read_config\n"); /* read the configuration information from ISD200. Use this to */ /* determine what the special ATA CDB bytes are. */ result = usb_stor_ctrl_transfer( us, us->recv_ctrl_pipe, 0x02, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " Retrieved the following ISD200 Config Data:\n"); #ifdef CONFIG_USB_STORAGE_DEBUG isd200_log_config(us, info); #endif } else { usb_stor_dbg(us, " Request to get ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_read_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_atapi_soft_reset * * Perform an Atapi Soft Reset on the device * * RETURNS: * NT status code */ static int isd200_atapi_soft_reset( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_atapi_soft_reset\n"); transferStatus = isd200_action( us, ACTION_SOFT_RESET, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing Atapi Soft Reset\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_atapi_soft_reset %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_srst * * Perform an SRST on the device * * RETURNS: * ISD status code */ static int isd200_srst( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_SRST\n"); transferStatus = isd200_action( us, ACTION_RESET, NULL, 0 ); /* check to see if this request failed */ if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing SRST\n"); retStatus = ISD200_ERROR; } else { /* delay 10ms to give the drive a chance to see it */ msleep(10); transferStatus = isd200_action( us, ACTION_REENABLE, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error taking drive out of reset\n"); retStatus = ISD200_ERROR; } else { /* delay 50ms to give the drive a chance to recover after SRST */ msleep(50); } } usb_stor_dbg(us, "Leaving isd200_srst %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_try_enum * * Helper function for isd200_manual_enum(). Does ENUM and READ_STATUS * and tries to analyze the status registers * * RETURNS: * ISD status code */ static int isd200_try_enum(struct us_data *us, unsigned char master_slave, int detect ) { int status = ISD200_GOOD; unsigned long endTime; struct isd200_info *info = (struct isd200_info *)us->extra; unsigned char *regs = info->RegsBuf; int recheckAsMaster = 0; if ( detect ) endTime = jiffies + ISD200_ENUM_DETECT_TIMEOUT * HZ; else endTime = jiffies + ISD200_ENUM_BSY_TIMEOUT * HZ; /* loop until we detect !BSY or timeout */ while(1) { status = isd200_action( us, ACTION_ENUM, NULL, master_slave ); if ( status != ISD200_GOOD ) break; status = isd200_action( us, ACTION_READ_STATUS, regs, 8 ); if ( status != ISD200_GOOD ) break; if (!detect) { if (regs[ATA_REG_STATUS_OFFSET] & ATA_BUSY) { usb_stor_dbg(us, " %s status is still BSY, try again...\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); } else { usb_stor_dbg(us, " %s status !BSY, continue with next operation\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); break; } } /* check for ATA_BUSY and */ /* ATA_DF (workaround ATA Zip drive) and */ /* ATA_ERR (workaround for Archos CD-ROM) */ else if (regs[ATA_REG_STATUS_OFFSET] & (ATA_BUSY | ATA_DF | ATA_ERR)) { usb_stor_dbg(us, " Status indicates it is not ready, try again...\n"); } /* check for DRDY, ATA devices set DRDY after SRST */ else if (regs[ATA_REG_STATUS_OFFSET] & ATA_DRDY) { usb_stor_dbg(us, " Identified ATA device\n"); info->DeviceFlags |= DF_ATA_DEVICE; info->DeviceHead = master_slave; break; } /* * check Cylinder High/Low to * determine if it is an ATAPI device */ else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB && regs[ATA_REG_LCYL_OFFSET] == 0x14) { /* * It seems that the RICOH * MP6200A CD/RW drive will * report itself okay as a * slave when it is really a * master. So this check again * as a master device just to * make sure it doesn't report * itself okay as a master also */ if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) && !recheckAsMaster) { usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n"); recheckAsMaster = 1; master_slave = ATA_ADDRESS_DEVHEAD_STD; } else { usb_stor_dbg(us, " Identified ATAPI device\n"); info->DeviceHead = master_slave; status = isd200_atapi_soft_reset(us); break; } } else { usb_stor_dbg(us, " Not ATA, not ATAPI - Weird\n"); break; } /* check for timeout on this request */ if (time_after_eq(jiffies, endTime)) { if (!detect) usb_stor_dbg(us, " BSY check timeout, just continue with next operation...\n"); else usb_stor_dbg(us, " Device detect timeout!\n"); break; } } return status; } /************************************************************************** * isd200_manual_enum * * Determines if the drive attached is an ATA or ATAPI and if it is a * master or slave. * * RETURNS: * ISD status code */ static int isd200_manual_enum(struct us_data *us) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; usb_stor_dbg(us, "Entering isd200_manual_enum\n"); retStatus = isd200_read_config(us); if (retStatus == ISD200_GOOD) { int isslave; /* master or slave? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 0); if (retStatus == ISD200_GOOD) retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_SLAVE, 0); if (retStatus == ISD200_GOOD) { retStatus = isd200_srst(us); if (retStatus == ISD200_GOOD) /* ata or atapi? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 1); } isslave = (info->DeviceHead & ATA_ADDRESS_DEVHEAD_SLAVE) ? 1 : 0; if (!(info->ConfigData.ATAConfig & ATACFG_MASTER)) { usb_stor_dbg(us, " Setting Master/Slave selection to %d\n", isslave); info->ConfigData.ATAConfig &= 0x3f; info->ConfigData.ATAConfig |= (isslave<<6); retStatus = isd200_write_config(us); } } usb_stor_dbg(us, "Leaving isd200_manual_enum %08X\n", retStatus); return(retStatus); } static void isd200_fix_driveid(u16 *id) { #ifndef __LITTLE_ENDIAN # ifdef __BIG_ENDIAN int i; for (i = 0; i < ATA_ID_WORDS; i++) id[i] = __le16_to_cpu(id[i]); # else # error "Please fix <asm/byteorder.h>" # endif #endif } static void isd200_dump_driveid(struct us_data *us, u16 *id) { usb_stor_dbg(us, " Identify Data Structure:\n"); usb_stor_dbg(us, " config = 0x%x\n", id[ATA_ID_CONFIG]); usb_stor_dbg(us, " cyls = 0x%x\n", id[ATA_ID_CYLS]); usb_stor_dbg(us, " heads = 0x%x\n", id[ATA_ID_HEADS]); usb_stor_dbg(us, " track_bytes = 0x%x\n", id[4]); usb_stor_dbg(us, " sector_bytes = 0x%x\n", id[5]); usb_stor_dbg(us, " sectors = 0x%x\n", id[ATA_ID_SECTORS]); usb_stor_dbg(us, " serial_no[0] = 0x%x\n", *(char *)&id[ATA_ID_SERNO]); usb_stor_dbg(us, " buf_type = 0x%x\n", id[20]); usb_stor_dbg(us, " buf_size = 0x%x\n", id[ATA_ID_BUF_SIZE]); usb_stor_dbg(us, " ecc_bytes = 0x%x\n", id[22]); usb_stor_dbg(us, " fw_rev[0] = 0x%x\n", *(char *)&id[ATA_ID_FW_REV]); usb_stor_dbg(us, " model[0] = 0x%x\n", *(char *)&id[ATA_ID_PROD]); usb_stor_dbg(us, " max_multsect = 0x%x\n", id[ATA_ID_MAX_MULTSECT] & 0xff); usb_stor_dbg(us, " dword_io = 0x%x\n", id[ATA_ID_DWORD_IO]); usb_stor_dbg(us, " capability = 0x%x\n", id[ATA_ID_CAPABILITY] >> 8); usb_stor_dbg(us, " tPIO = 0x%x\n", id[ATA_ID_OLD_PIO_MODES] >> 8); usb_stor_dbg(us, " tDMA = 0x%x\n", id[ATA_ID_OLD_DMA_MODES] >> 8); usb_stor_dbg(us, " field_valid = 0x%x\n", id[ATA_ID_FIELD_VALID]); usb_stor_dbg(us, " cur_cyls = 0x%x\n", id[ATA_ID_CUR_CYLS]); usb_stor_dbg(us, " cur_heads = 0x%x\n", id[ATA_ID_CUR_HEADS]); usb_stor_dbg(us, " cur_sectors = 0x%x\n", id[ATA_ID_CUR_SECTORS]); usb_stor_dbg(us, " cur_capacity = 0x%x\n", ata_id_u32(id, 57)); usb_stor_dbg(us, " multsect = 0x%x\n", id[ATA_ID_MULTSECT] & 0xff); usb_stor_dbg(us, " lba_capacity = 0x%x\n", ata_id_u32(id, ATA_ID_LBA_CAPACITY)); usb_stor_dbg(us, " command_set_1 = 0x%x\n", id[ATA_ID_COMMAND_SET_1]); usb_stor_dbg(us, " command_set_2 = 0x%x\n", id[ATA_ID_COMMAND_SET_2]); } /************************************************************************** * isd200_get_inquiry_data * * Get inquiry data * * RETURNS: * ISD status code */ static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); /* set default to Master */ info->DeviceHead = ATA_ADDRESS_DEVHEAD_STD; /* attempt to manually enumerate this device */ retStatus = isd200_manual_enum(us); if (retStatus == ISD200_GOOD) { int transferStatus; /* check for an ATA device */ if (info->DeviceFlags & DF_ATA_DEVICE) { /* this must be an ATA device */ /* perform an ATA Command Identify */ transferStatus = isd200_action( us, ACTION_IDENTIFY, id, ATA_ID_WORDS * 2); if (transferStatus != ISD200_TRANSPORT_GOOD) { /* Error issuing ATA Command Identify */ usb_stor_dbg(us, " Error issuing ATA Command Identify\n"); retStatus = ISD200_ERROR; } else { /* ATA Command Identify successful */ int i; __be16 *src; __u16 *dest; isd200_fix_driveid(id); isd200_dump_driveid(us, id); /* Prevent division by 0 in isd200_scsi_to_ata() */ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { usb_stor_dbg(us, " Invalid ATA Identify data\n"); retStatus = ISD200_ERROR; goto Done; } memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE; /* The length must be at least 36 (5 + 31) */ info->InquiryData.AdditionalLength = 0x1F; if (id[ATA_ID_COMMAND_SET_1] & COMMANDSET_MEDIA_STATUS) { /* set the removable bit */ info->InquiryData.DeviceTypeModifier = DEVICE_REMOVABLE; info->DeviceFlags |= DF_REMOVABLE_MEDIA; } /* Fill in vendor identification fields */ src = (__be16 *)&id[ATA_ID_PROD]; dest = (__u16*)info->InquiryData.VendorId; for (i = 0; i < 4; i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_PROD + 8/2]; dest = (__u16*)info->InquiryData.ProductId; for (i=0;i<8;i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_FW_REV]; dest = (__u16*)info->InquiryData.ProductRevisionLevel; for (i=0;i<2;i++) dest[i] = be16_to_cpu(src[i]); /* determine if it supports Media Status Notification */ if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) { usb_stor_dbg(us, " Device supports Media Status Notification\n"); /* * Indicate that it is enabled, even * though it is not. * This allows the lock/unlock of the * media to work correctly. */ info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED; } else info->DeviceFlags &= ~DF_MEDIA_STATUS_ENABLED; } } else { /* * this must be an ATAPI device * use an ATAPI protocol (Transparent SCSI) */ us->protocol_name = "Transparent SCSI"; us->proto_handler = usb_stor_transparent_scsi_command; usb_stor_dbg(us, "Protocol changed to: %s\n", us->protocol_name); /* Free driver structure */ us->extra_destructor(info); kfree(info); us->extra = NULL; us->extra_destructor = NULL; } } Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); } /************************************************************************** * isd200_scsi_to_ata * * Translate SCSI commands to ATA commands. * * RETURNS: * 1 if the command needs to be sent to the transport layer * 0 otherwise */ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us, union ata_cdb * ataCdb) { struct isd200_info *info = (struct isd200_info *)us->extra; u16 *id = info->id; int sendToTransport = 1; unsigned char sectnum, head; unsigned short cylinder; unsigned long lba; unsigned long blockCount; unsigned char senseData[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; memset(ataCdb, 0, sizeof(union ata_cdb)); /* SCSI Command */ switch (srb->cmnd[0]) { case INQUIRY: usb_stor_dbg(us, " ATA OUT - INQUIRY\n"); /* copy InquiryData */ usb_stor_set_xfer_buf((unsigned char *) &info->InquiryData, sizeof(info->InquiryData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; break; case MODE_SENSE: usb_stor_dbg(us, " ATA OUT - SCSIOP_MODE_SENSE\n"); /* Initialize the return buffer */ usb_stor_set_xfer_buf(senseData, sizeof(senseData), srb); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case TEST_UNIT_READY: usb_stor_dbg(us, " ATA OUT - SCSIOP_TEST_UNIT_READY\n"); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_CAPACITY: { unsigned long capacity; struct read_capacity_data readCapacityData; usb_stor_dbg(us, " ATA OUT - SCSIOP_READ_CAPACITY\n"); if (ata_id_has_lba(id)) capacity = ata_id_u32(id, ATA_ID_LBA_CAPACITY) - 1; else capacity = (id[ATA_ID_HEADS] * id[ATA_ID_CYLS] * id[ATA_ID_SECTORS]) - 1; readCapacityData.LogicalBlockAddress = cpu_to_be32(capacity); readCapacityData.BytesPerBlock = cpu_to_be32(0x200); usb_stor_set_xfer_buf((unsigned char *) &readCapacityData, sizeof(readCapacityData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_READ\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_READ; break; case WRITE_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_WRITE\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_WRITE; break; case ALLOW_MEDIUM_REMOVAL: usb_stor_dbg(us, " ATA OUT - SCSIOP_MEDIUM_REMOVAL\n"); if (info->DeviceFlags & DF_REMOVABLE_MEDIA) { usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Not removable media, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case START_STOP: usb_stor_dbg(us, " ATA OUT - SCSIOP_START_STOP_UNIT\n"); usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); if ((srb->cmnd[4] & 0x3) == 0x2) { usb_stor_dbg(us, " Media Eject\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 0; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_MEDIA_EJECT; } else if ((srb->cmnd[4] & 0x3) == 0x1) { usb_stor_dbg(us, " Get Media Status\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Nothing to do, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; default: usb_stor_dbg(us, "Unsupported SCSI command - 0x%X\n", srb->cmnd[0]); srb->result = DID_ERROR << 16; sendToTransport = 0; break; } return(sendToTransport); } /************************************************************************** * isd200_free_info * * Frees the driver structure. */ static void isd200_free_info_ptrs(void *info_) { struct isd200_info *info = (struct isd200_info *) info_; if (info) { kfree(info->id); kfree(info->RegsBuf); kfree(info->srb.sense_buffer); } } /************************************************************************** * isd200_init_info * * Allocates (if necessary) and initializes the driver structure. * * RETURNS: * error status code */ static int isd200_init_info(struct us_data *us) { struct isd200_info *info; info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL); if (!info) return -ENOMEM; info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL); info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL); info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) { isd200_free_info_ptrs(info); kfree(info); return -ENOMEM; } us->extra = info; us->extra_destructor = isd200_free_info_ptrs; return 0; } /************************************************************************** * Initialization for the ISD200 */ static int isd200_Initialization(struct us_data *us) { int rc = 0; usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); rc = -ENOMEM; } else { /* Get device specific data */ if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); rc = -EINVAL; } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); } } return rc; } /************************************************************************** * Protocol and Transport for the ISD200 ASIC * * This protocol and transport are for ATA devices connected to an ISD200 * ASIC. An ATAPI device that is connected as a slave device will be * detected in the driver initialization function and the protocol will * be changed to an ATAPI protocol (Transparent SCSI). * */ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) { int sendToTransport, orig_bufflen; union ata_cdb ataCdb; /* Make sure driver was initialized */ if (us->extra == NULL) { usb_stor_dbg(us, "ERROR Driver not initialized\n"); srb->result = DID_ERROR << 16; return; } scsi_set_resid(srb, 0); /* scsi_bufflen might change in protocol translation to ata */ orig_bufflen = scsi_bufflen(srb); sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb); /* send the command to the transport layer */ if (sendToTransport) isd200_invoke_transport(us, srb, &ataCdb); isd200_srb_set_bufflen(srb, orig_bufflen); } static struct scsi_host_template isd200_host_template; static int isd200_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - isd200_usb_ids) + isd200_unusual_dev_list, &isd200_host_template); if (result) return result; us->protocol_name = "ISD200 ATA/ATAPI"; us->proto_handler = isd200_ata_command; result = usb_stor_probe2(us); return result; } static struct usb_driver isd200_driver = { .name = DRV_NAME, .probe = isd200_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = isd200_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(isd200_driver, isd200_host_template, DRV_NAME); |
3 1 2 1 64 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/types.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_timeout.h> static const unsigned int nf_ct_generic_timeout = 600*HZ; #ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { struct nf_generic_net *gn = nf_generic_pernet(net); unsigned int *timeout = data; if (!timeout) timeout = &gn->timeout; if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT]) *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ; else { /* Set default generic timeout. */ *timeout = gn->timeout; } return 0; } static int generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ void nf_conntrack_generic_init_net(struct net *net) { struct nf_generic_net *gn = nf_generic_pernet(net); gn->timeout = nf_ct_generic_timeout; } const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = { .l4proto = 255, #ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = generic_timeout_nlattr_to_obj, .obj_to_nlattr = generic_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_GENERIC_MAX, .obj_size = sizeof(unsigned int), .nla_policy = generic_timeout_nla_policy, }, #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ }; |
186 186 186 186 186 186 186 574 163 574 571 1 73 28 47 655 657 462 428 73 650 476 314 270 65 314 592 592 267 25 25 23 1 25 24 25 25 25 25 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 | // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2002 International Business Machines, Corp. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * * This file is part of the SCTP kernel implementation * * This abstraction represents an SCTP endpoint. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@austin.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Dajiang Zhang <dajiang.zhang@nokia.com> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/random.h> /* get_random_bytes() */ #include <net/sock.h> #include <net/ipv6.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal helpers. */ static void sctp_endpoint_bh_rcv(struct work_struct *work); /* * Initialize the base fields of the endpoint structure. */ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, gfp_t gfp) { struct net *net = sock_net(sk); struct sctp_shared_key *null_key; ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); if (!ep->digest) return NULL; ep->asconf_enable = net->sctp.addip_enable; ep->auth_enable = net->sctp.auth_enable; if (ep->auth_enable) { if (sctp_auth_init(ep, gfp)) goto nomem; if (ep->asconf_enable) { sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF); sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK); } } /* Initialize the base structure. */ /* What type of endpoint are we? */ ep->base.type = SCTP_EP_TYPE_SOCKET; /* Initialize the basic object fields. */ refcount_set(&ep->base.refcnt, 1); ep->base.dead = false; /* Create an input queue. */ sctp_inq_init(&ep->base.inqueue); /* Set its top-half handler */ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); /* Create the lists of associations. */ INIT_LIST_HEAD(&ep->asocs); /* Use SCTP specific send buffer space queues. */ ep->sndbuf_policy = net->sctp.sndbuf_policy; sk->sk_data_ready = sctp_data_ready; sk->sk_write_space = sctp_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); /* Get the receive buffer policy for this endpoint */ ep->rcvbuf_policy = net->sctp.rcvbuf_policy; /* Initialize the secret key used with cookie. */ get_random_bytes(ep->secret_key, sizeof(ep->secret_key)); /* SCTP-AUTH extensions*/ INIT_LIST_HEAD(&ep->endpoint_shared_keys); null_key = sctp_auth_shkey_create(0, gfp); if (!null_key) goto nomem_shkey; list_add(&null_key->key_list, &ep->endpoint_shared_keys); /* Add the null key to the endpoint shared keys list and * set the hmcas and chunks pointers. */ ep->prsctp_enable = net->sctp.prsctp_enable; ep->reconf_enable = net->sctp.reconf_enable; ep->ecn_enable = net->sctp.ecn_enable; /* Remember who we are attached to. */ ep->base.sk = sk; ep->base.net = sock_net(sk); sock_hold(ep->base.sk); return ep; nomem_shkey: sctp_auth_free(ep); nomem: kfree(ep->digest); return NULL; } /* Create a sctp_endpoint with all that boring stuff initialized. * Returns NULL if there isn't enough memory. */ struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) { struct sctp_endpoint *ep; /* Build a local endpoint. */ ep = kzalloc(sizeof(*ep), gfp); if (!ep) goto fail; if (!sctp_endpoint_init(ep, sk, gfp)) goto fail_init; SCTP_DBG_OBJCNT_INC(ep); return ep; fail_init: kfree(ep); fail: return NULL; } /* Add an association to an endpoint. */ void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, struct sctp_association *asoc) { struct sock *sk = ep->base.sk; /* If this is a temporary association, don't bother * since we'll be removing it shortly and don't * want anyone to find it anyway. */ if (asoc->temp) return; /* Now just add it to our list of asocs */ list_add_tail(&asoc->asocs, &ep->asocs); /* Increment the backlog value for a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sk_acceptq_added(sk); } /* Free the endpoint structure. Delay cleanup until * all users have released their reference count on this structure. */ void sctp_endpoint_free(struct sctp_endpoint *ep) { ep->base.dead = true; inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED); /* Unlink this endpoint, so we can't find it again! */ sctp_unhash_endpoint(ep); sctp_endpoint_put(ep); } /* Final destructor for endpoint. */ static void sctp_endpoint_destroy_rcu(struct rcu_head *head) { struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu); struct sock *sk = ep->base.sk; sctp_sk(sk)->ep = NULL; sock_put(sk); kfree(ep); SCTP_DBG_OBJCNT_DEC(ep); } static void sctp_endpoint_destroy(struct sctp_endpoint *ep) { struct sock *sk; if (unlikely(!ep->base.dead)) { WARN(1, "Attempt to destroy undead endpoint %p!\n", ep); return; } /* Free the digest buffer */ kfree(ep->digest); /* SCTP-AUTH: Free up AUTH releated data such as shared keys * chunks and hmacs arrays that were allocated */ sctp_auth_destroy_keys(&ep->endpoint_shared_keys); sctp_auth_free(ep); /* Cleanup. */ sctp_inq_free(&ep->base.inqueue); sctp_bind_addr_free(&ep->base.bind_addr); memset(ep->secret_key, 0, sizeof(ep->secret_key)); sk = ep->base.sk; /* Remove and free the port */ if (sctp_sk(sk)->bind_hash) sctp_put_port(sk); call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu); } /* Hold a reference to an endpoint. */ int sctp_endpoint_hold(struct sctp_endpoint *ep) { return refcount_inc_not_zero(&ep->base.refcnt); } /* Release a reference to an endpoint and clean up if there are * no more references. */ void sctp_endpoint_put(struct sctp_endpoint *ep) { if (refcount_dec_and_test(&ep->base.refcnt)) sctp_endpoint_destroy(ep); } /* Is this the endpoint we are looking for? */ struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, struct net *net, const union sctp_addr *laddr, int dif, int sdif) { int bound_dev_if = READ_ONCE(ep->base.sk->sk_bound_dev_if); struct sctp_endpoint *retval = NULL; if (net_eq(ep->base.net, net) && sctp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif) && (htons(ep->base.bind_addr.port) == laddr->v4.sin_port)) { if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, sctp_sk(ep->base.sk))) retval = ep; } return retval; } /* Find the association that goes with this chunk. * We lookup the transport from hashtable at first, then get association * through t->assoc. */ struct sctp_association *sctp_endpoint_lookup_assoc( const struct sctp_endpoint *ep, const union sctp_addr *paddr, struct sctp_transport **transport) { struct sctp_association *asoc = NULL; struct sctp_transport *t; *transport = NULL; /* If the local port is not set, there can't be any associations * on this endpoint. */ if (!ep->base.bind_addr.port) return NULL; rcu_read_lock(); t = sctp_epaddr_lookup_transport(ep, paddr); if (!t) goto out; *transport = t; asoc = t->asoc; out: rcu_read_unlock(); return asoc; } /* Look for any peeled off association from the endpoint that matches the * given peer address. */ bool sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, const union sctp_addr *paddr) { int bound_dev_if = READ_ONCE(ep->base.sk->sk_bound_dev_if); struct sctp_sockaddr_entry *addr; struct net *net = ep->base.net; struct sctp_bind_addr *bp; bp = &ep->base.bind_addr; /* This function is called with the socket lock held, * so the address_list can not change. */ list_for_each_entry(addr, &bp->address_list, list) { if (sctp_has_association(net, &addr->a, paddr, bound_dev_if, bound_dev_if)) return true; } return false; } /* Do delayed input processing. This is scheduled by sctp_rcv(). * This may be called on BH or task time. */ static void sctp_endpoint_bh_rcv(struct work_struct *work) { struct sctp_endpoint *ep = container_of(work, struct sctp_endpoint, base.inqueue.immediate); struct sctp_association *asoc; struct sock *sk; struct net *net; struct sctp_transport *transport; struct sctp_chunk *chunk; struct sctp_inq *inqueue; union sctp_subtype subtype; enum sctp_state state; int error = 0; int first_time = 1; /* is this the first time through the loop */ if (ep->base.dead) return; asoc = NULL; inqueue = &ep->base.inqueue; sk = ep->base.sk; net = sock_net(sk); while (NULL != (chunk = sctp_inq_pop(inqueue))) { subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); /* If the first chunk in the packet is AUTH, do special * processing specified in Section 6.3 of SCTP-AUTH spec */ if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { struct sctp_chunkhdr *next_hdr; next_hdr = sctp_inq_peek(inqueue); if (!next_hdr) goto normal; /* If the next chunk is COOKIE-ECHO, skip the AUTH * chunk while saving a pointer to it so we can do * Authentication later (during cookie-echo * processing). */ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { chunk->auth_chunk = skb_clone(chunk->skb, GFP_ATOMIC); chunk->auth = 1; continue; } } normal: /* We might have grown an association since last we * looked, so try again. * * This happens when we've just processed our * COOKIE-ECHO chunk. */ if (NULL == chunk->asoc) { asoc = sctp_endpoint_lookup_assoc(ep, sctp_source(chunk), &transport); chunk->asoc = asoc; chunk->transport = transport; } state = asoc ? asoc->state : SCTP_STATE_CLOSED; if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) continue; /* Remember where the last DATA chunk came from so we * know where to send the SACK. */ if (asoc && sctp_chunk_is_data(chunk)) asoc->peer.last_data_from = chunk->transport; else { SCTP_INC_STATS(ep->base.net, SCTP_MIB_INCTRLCHUNKS); if (asoc) asoc->stats.ictrlchunks++; } if (chunk->transport) chunk->transport->last_time_heard = ktime_get(); error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state, ep, asoc, chunk, GFP_ATOMIC); if (error && chunk) chunk->pdiscard = 1; /* Check to see if the endpoint is freed in response to * the incoming chunk. If so, get out of the while loop. */ if (!sctp_sk(sk)->ep) break; if (first_time) first_time = 0; } } |
3 3 2 10 2 3 7 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * cec - HDMI Consumer Electronics Control support header * * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #ifndef _MEDIA_CEC_H #define _MEDIA_CEC_H #include <linux/poll.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/cdev.h> #include <linux/kthread.h> #include <linux/timer.h> #include <linux/cec-funcs.h> #include <media/rc-core.h> #define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \ CEC_CAP_PASSTHROUGH | CEC_CAP_RC) /** * struct cec_devnode - cec device node * @dev: cec device * @cdev: cec character device * @minor: device node minor number * @lock: lock to serialize open/release and registration * @registered: the device was correctly registered * @unregistered: the device was unregistered * @lock_fhs: lock to control access to @fhs * @fhs: the list of open filehandles (cec_fh) * * This structure represents a cec-related device node. * * To add or remove filehandles from @fhs the @lock must be taken first, * followed by @lock_fhs. It is safe to access @fhs if either lock is held. * * The @parent is a physical device. It must be set by core or device drivers * before registering the node. */ struct cec_devnode { /* sysfs */ struct device dev; struct cdev cdev; /* device info */ int minor; /* serialize open/release and registration */ struct mutex lock; bool registered; bool unregistered; /* protect access to fhs */ struct mutex lock_fhs; struct list_head fhs; }; struct cec_adapter; struct cec_data; struct cec_pin; struct cec_notifier; struct cec_data { struct list_head list; struct list_head xfer_list; struct cec_adapter *adap; struct cec_msg msg; u8 match_len; u8 match_reply[5]; struct cec_fh *fh; struct delayed_work work; struct completion c; u8 attempts; bool blocking; bool completed; }; struct cec_msg_entry { struct list_head list; struct cec_msg msg; }; struct cec_event_entry { struct list_head list; struct cec_event ev; }; #define CEC_NUM_CORE_EVENTS 2 #define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH struct cec_fh { struct list_head list; struct list_head xfer_list; struct cec_adapter *adap; u8 mode_initiator; u8 mode_follower; /* Events */ wait_queue_head_t wait; struct mutex lock; struct list_head events[CEC_NUM_EVENTS]; /* queued events */ u16 queued_events[CEC_NUM_EVENTS]; unsigned int total_queued_events; struct cec_event_entry core_events[CEC_NUM_CORE_EVENTS]; struct list_head msgs; /* queued messages */ unsigned int queued_msgs; }; #define CEC_SIGNAL_FREE_TIME_RETRY 3 #define CEC_SIGNAL_FREE_TIME_NEW_INITIATOR 5 #define CEC_SIGNAL_FREE_TIME_NEXT_XFER 7 /* The nominal data bit period is 2.4 ms */ #define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400) struct cec_adap_ops { /* Low-level callbacks, called with adap->lock held */ int (*adap_enable)(struct cec_adapter *adap, bool enable); int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable); int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable); int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr); void (*adap_unconfigured)(struct cec_adapter *adap); int (*adap_transmit)(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg); void (*adap_nb_transmit_canceled)(struct cec_adapter *adap, const struct cec_msg *msg); void (*adap_status)(struct cec_adapter *adap, struct seq_file *file); void (*adap_free)(struct cec_adapter *adap); /* Error injection callbacks, called without adap->lock held */ int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf); bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line); /* High-level CEC message callback, called without adap->lock held */ void (*configured)(struct cec_adapter *adap); int (*received)(struct cec_adapter *adap, struct cec_msg *msg); }; /* * The minimum message length you can receive (excepting poll messages) is 2. * With a transfer rate of at most 36 bytes per second this makes 18 messages * per second worst case. * * We queue at most 3 seconds worth of received messages. The CEC specification * requires that messages are replied to within a second, so 3 seconds should * give more than enough margin. Since most messages are actually more than 2 * bytes, this is in practice a lot more than 3 seconds. */ #define CEC_MAX_MSG_RX_QUEUE_SZ (18 * 3) /* * The transmit queue is limited to 1 second worth of messages (worst case). * Messages can be transmitted by userspace and kernel space. But for both it * makes no sense to have a lot of messages queued up. One second seems * reasonable. */ #define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1) /** * struct cec_adapter - cec adapter structure * @owner: module owner * @name: name of the CEC adapter * @devnode: device node for the /dev/cecX device * @lock: mutex controlling access to this structure * @rc: remote control device * @transmit_queue: queue of pending transmits * @transmit_queue_sz: number of pending transmits * @wait_queue: queue of transmits waiting for a reply * @transmitting: CEC messages currently being transmitted * @transmit_in_progress: true if a transmit is in progress * @transmit_in_progress_aborted: true if a transmit is in progress is to be * aborted. This happens if the logical address is * invalidated while the transmit is ongoing. In that * case the transmit will finish, but will not retransmit * and be marked as ABORTED. * @xfer_timeout_ms: the transfer timeout in ms. * If 0, then timeout after 2100 ms. * @kthread_config: kthread used to configure a CEC adapter * @config_completion: used to signal completion of the config kthread * @kthread: main CEC processing thread * @kthread_waitq: main CEC processing wait_queue * @ops: cec adapter ops * @priv: cec driver's private data * @capabilities: cec adapter capabilities * @available_log_addrs: maximum number of available logical addresses * @phys_addr: the current physical address * @needs_hpd: if true, then the HDMI HotPlug Detect pin must be high * in order to transmit or receive CEC messages. This is usually a HW * limitation. * @is_enabled: the CEC adapter is enabled * @is_claiming_log_addrs: true if cec_claim_log_addrs() is running * @is_configuring: the CEC adapter is configuring (i.e. claiming LAs) * @must_reconfigure: while configuring, the PA changed, so reclaim LAs * @is_configured: the CEC adapter is configured (i.e. has claimed LAs) * @cec_pin_is_high: if true then the CEC pin is high. Only used with the * CEC pin framework. * @adap_controls_phys_addr: if true, then the CEC adapter controls the * physical address, i.e. the CEC hardware can detect HPD changes and * read the EDID and is not dependent on an external HDMI driver. * Drivers that need this can set this field to true after the * cec_allocate_adapter() call. * @last_initiator: the initiator of the last transmitted message. * @monitor_all_cnt: number of filehandles monitoring all msgs * @monitor_pin_cnt: number of filehandles monitoring pin changes * @follower_cnt: number of filehandles in follower mode * @cec_follower: filehandle of the exclusive follower * @cec_initiator: filehandle of the exclusive initiator * @passthrough: if true, then the exclusive follower is in * passthrough mode. * @log_addrs: current logical addresses * @conn_info: current connector info * @tx_timeout_cnt: count the number of Timed Out transmits. * Reset to 0 when this is reported in cec_adap_status(). * @tx_low_drive_cnt: count the number of Low Drive transmits. * Reset to 0 when this is reported in cec_adap_status(). * @tx_error_cnt: count the number of Error transmits. * Reset to 0 when this is reported in cec_adap_status(). * @tx_arb_lost_cnt: count the number of Arb Lost transmits. * Reset to 0 when this is reported in cec_adap_status(). * @tx_low_drive_log_cnt: number of logged Low Drive transmits since the * adapter was enabled. Used to avoid flooding the kernel * log if this happens a lot. * @tx_error_log_cnt: number of logged Error transmits since the adapter was * enabled. Used to avoid flooding the kernel log if this * happens a lot. * @notifier: CEC notifier * @pin: CEC pin status struct * @cec_dir: debugfs cec directory * @sequence: transmit sequence counter * @input_phys: remote control input_phys name * * This structure represents a cec adapter. */ struct cec_adapter { struct module *owner; char name[32]; struct cec_devnode devnode; struct mutex lock; struct rc_dev *rc; struct list_head transmit_queue; unsigned int transmit_queue_sz; struct list_head wait_queue; struct cec_data *transmitting; bool transmit_in_progress; bool transmit_in_progress_aborted; unsigned int xfer_timeout_ms; struct task_struct *kthread_config; struct completion config_completion; struct task_struct *kthread; wait_queue_head_t kthread_waitq; const struct cec_adap_ops *ops; void *priv; u32 capabilities; u8 available_log_addrs; u16 phys_addr; bool needs_hpd; bool is_enabled; bool is_claiming_log_addrs; bool is_configuring; bool must_reconfigure; bool is_configured; bool cec_pin_is_high; bool adap_controls_phys_addr; u8 last_initiator; u32 monitor_all_cnt; u32 monitor_pin_cnt; u32 follower_cnt; struct cec_fh *cec_follower; struct cec_fh *cec_initiator; bool passthrough; struct cec_log_addrs log_addrs; struct cec_connector_info conn_info; u32 tx_timeout_cnt; u32 tx_low_drive_cnt; u32 tx_error_cnt; u32 tx_arb_lost_cnt; u32 tx_low_drive_log_cnt; u32 tx_error_log_cnt; #ifdef CONFIG_CEC_NOTIFIER struct cec_notifier *notifier; #endif #ifdef CONFIG_CEC_PIN struct cec_pin *pin; #endif struct dentry *cec_dir; u32 sequence; char input_phys[40]; }; static inline int cec_get_device(struct cec_adapter *adap) { struct cec_devnode *devnode = &adap->devnode; /* * Check if the cec device is available. This needs to be done with * the devnode->lock held to prevent an open/unregister race: * without the lock, the device could be unregistered and freed between * the devnode->registered check and get_device() calls, leading to * a crash. */ mutex_lock(&devnode->lock); /* * return ENODEV if the cec device has been removed * already or if it is not registered anymore. */ if (!devnode->registered) { mutex_unlock(&devnode->lock); return -ENODEV; } /* and increase the device refcount */ get_device(&devnode->dev); mutex_unlock(&devnode->lock); return 0; } static inline void cec_put_device(struct cec_adapter *adap) { put_device(&adap->devnode.dev); } static inline void *cec_get_drvdata(const struct cec_adapter *adap) { return adap->priv; } static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr) { return adap->log_addrs.log_addr_mask & (1 << log_addr); } static inline bool cec_is_sink(const struct cec_adapter *adap) { return adap->phys_addr == 0; } /** * cec_is_registered() - is the CEC adapter registered? * * @adap: the CEC adapter, may be NULL. * * Return: true if the adapter is registered, false otherwise. */ static inline bool cec_is_registered(const struct cec_adapter *adap) { return adap && adap->devnode.registered; } #define cec_phys_addr_exp(pa) \ ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf struct edid; struct drm_connector; #if IS_REACHABLE(CONFIG_CEC_CORE) struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, void *priv, const char *name, u32 caps, u8 available_las); int cec_register_adapter(struct cec_adapter *adap, struct device *parent); void cec_unregister_adapter(struct cec_adapter *adap); void cec_delete_adapter(struct cec_adapter *adap); int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs, bool block); void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block); void cec_s_phys_addr_from_edid(struct cec_adapter *adap, const struct edid *edid); void cec_s_conn_info(struct cec_adapter *adap, const struct cec_connector_info *conn_info); int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg, bool block); /* Called by the adapter */ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt, ktime_t ts); static inline void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt, u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt) { cec_transmit_done_ts(adap, status, arb_lost_cnt, nack_cnt, low_drive_cnt, error_cnt, ktime_get()); } /* * Simplified version of cec_transmit_done for hardware that doesn't retry * failed transmits. So this is always just one attempt in which case * the status is sufficient. */ void cec_transmit_attempt_done_ts(struct cec_adapter *adap, u8 status, ktime_t ts); static inline void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) { cec_transmit_attempt_done_ts(adap, status, ktime_get()); } void cec_received_msg_ts(struct cec_adapter *adap, struct cec_msg *msg, ktime_t ts); static inline void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg) { cec_received_msg_ts(adap, msg, ktime_get()); } /** * cec_queue_pin_cec_event() - queue a CEC pin event with a given timestamp. * * @adap: pointer to the cec adapter * @is_high: when true the CEC pin is high, otherwise it is low * @dropped_events: when true some events were dropped * @ts: the timestamp for this event * */ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high, bool dropped_events, ktime_t ts); /** * cec_queue_pin_hpd_event() - queue a pin event with a given timestamp. * * @adap: pointer to the cec adapter * @is_high: when true the HPD pin is high, otherwise it is low * @ts: the timestamp for this event * */ void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts); /** * cec_queue_pin_5v_event() - queue a pin event with a given timestamp. * * @adap: pointer to the cec adapter * @is_high: when true the 5V pin is high, otherwise it is low * @ts: the timestamp for this event * */ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts); /** * cec_get_edid_phys_addr() - find and return the physical address * * @edid: pointer to the EDID data * @size: size in bytes of the EDID data * @offset: If not %NULL then the location of the physical address * bytes in the EDID will be returned here. This is set to 0 * if there is no physical address found. * * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none. */ u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, unsigned int *offset); void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, const struct drm_connector *connector); #else static inline int cec_register_adapter(struct cec_adapter *adap, struct device *parent) { return 0; } static inline void cec_unregister_adapter(struct cec_adapter *adap) { } static inline void cec_delete_adapter(struct cec_adapter *adap) { } static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block) { } static inline void cec_s_phys_addr_from_edid(struct cec_adapter *adap, const struct edid *edid) { } static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size, unsigned int *offset) { if (offset) *offset = 0; return CEC_PHYS_ADDR_INVALID; } static inline void cec_s_conn_info(struct cec_adapter *adap, const struct cec_connector_info *conn_info) { } static inline void cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info, const struct drm_connector *connector) { memset(conn_info, 0, sizeof(*conn_info)); } #endif /** * cec_phys_addr_invalidate() - set the physical address to INVALID * * @adap: the CEC adapter * * This is a simple helper function to invalidate the physical * address. */ static inline void cec_phys_addr_invalidate(struct cec_adapter *adap) { cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false); } /** * cec_get_edid_spa_location() - find location of the Source Physical Address * * @edid: the EDID * @size: the size of the EDID * * This EDID is expected to be a CEA-861 compliant, which means that there are * at least two blocks and one or more of the extensions blocks are CEA-861 * blocks. * * The returned location is guaranteed to be <= size-2. * * This is an inline function since it is used by both CEC and V4L2. * Ideally this would go in a module shared by both, but it is overkill to do * that for just a single function. */ static inline unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size) { unsigned int blocks = size / 128; unsigned int block; u8 d; /* Sanity check: at least 2 blocks and a multiple of the block size */ if (blocks < 2 || size % 128) return 0; /* * If there are fewer extension blocks than the size, then update * 'blocks'. It is allowed to have more extension blocks than the size, * since some hardware can only read e.g. 256 bytes of the EDID, even * though more blocks are present. The first CEA-861 extension block * should normally be in block 1 anyway. */ if (edid[0x7e] + 1 < blocks) blocks = edid[0x7e] + 1; for (block = 1; block < blocks; block++) { unsigned int offset = block * 128; /* Skip any non-CEA-861 extension blocks */ if (edid[offset] != 0x02 || edid[offset + 1] != 0x03) continue; /* search Vendor Specific Data Block (tag 3) */ d = edid[offset + 2] & 0x7f; /* Check if there are Data Blocks */ if (d <= 4) continue; if (d > 4) { unsigned int i = offset + 4; unsigned int end = offset + d; /* Note: 'end' is always < 'size' */ do { u8 tag = edid[i] >> 5; u8 len = edid[i] & 0x1f; if (tag == 3 && len >= 5 && i + len <= end && edid[i + 1] == 0x03 && edid[i + 2] == 0x0c && edid[i + 3] == 0x00) return i + 4; i += len + 1; } while (i < end); } } return 0; } #endif /* _MEDIA_CEC_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Definitions for LLC (link layer control) message handling * * Copyright IBM Corp. 2016 * * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com> * Ursula Braun <ubraun@linux.vnet.ibm.com> */ #ifndef SMC_LLC_H #define SMC_LLC_H #include "smc_wr.h" #define SMC_LLC_FLAG_RESP 0x80 #define SMC_LLC_WAIT_FIRST_TIME (5 * HZ) #define SMC_LLC_WAIT_TIME (2 * HZ) #define SMC_LLC_TESTLINK_DEFAULT_TIME (30 * HZ) enum smc_llc_reqresp { SMC_LLC_REQ, SMC_LLC_RESP }; enum smc_llc_msg_type { SMC_LLC_CONFIRM_LINK = 0x01, SMC_LLC_ADD_LINK = 0x02, SMC_LLC_ADD_LINK_CONT = 0x03, SMC_LLC_DELETE_LINK = 0x04, SMC_LLC_REQ_ADD_LINK = 0x05, SMC_LLC_CONFIRM_RKEY = 0x06, SMC_LLC_TEST_LINK = 0x07, SMC_LLC_CONFIRM_RKEY_CONT = 0x08, SMC_LLC_DELETE_RKEY = 0x09, /* V2 types */ SMC_LLC_CONFIRM_LINK_V2 = 0x21, SMC_LLC_ADD_LINK_V2 = 0x22, SMC_LLC_DELETE_LINK_V2 = 0x24, SMC_LLC_REQ_ADD_LINK_V2 = 0x25, SMC_LLC_CONFIRM_RKEY_V2 = 0x26, SMC_LLC_TEST_LINK_V2 = 0x27, SMC_LLC_DELETE_RKEY_V2 = 0x29, }; #define smc_link_downing(state) \ (cmpxchg(state, SMC_LNK_ACTIVE, SMC_LNK_INACTIVE) == SMC_LNK_ACTIVE) /* LLC DELETE LINK Request Reason Codes */ #define SMC_LLC_DEL_LOST_PATH 0x00010000 #define SMC_LLC_DEL_OP_INIT_TERM 0x00020000 #define SMC_LLC_DEL_PROG_INIT_TERM 0x00030000 #define SMC_LLC_DEL_PROT_VIOL 0x00040000 #define SMC_LLC_DEL_NO_ASYM_NEEDED 0x00050000 /* LLC DELETE LINK Response Reason Codes */ #define SMC_LLC_DEL_NOLNK 0x00100000 /* Unknown Link ID (no link) */ #define SMC_LLC_DEL_NOLGR 0x00200000 /* Unknown Link Group */ /* returns a usable link of the link group, or NULL */ static inline struct smc_link *smc_llc_usable_link(struct smc_link_group *lgr) { int i; for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) if (smc_link_usable(&lgr->lnk[i])) return &lgr->lnk[i]; return NULL; } /* set the termination reason code for the link group */ static inline void smc_llc_set_termination_rsn(struct smc_link_group *lgr, u32 rsn) { if (!lgr->llc_termination_rsn) lgr->llc_termination_rsn = rsn; } /* transmit */ int smc_llc_send_confirm_link(struct smc_link *lnk, enum smc_llc_reqresp reqresp); int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], struct smc_link *link_new, enum smc_llc_reqresp reqresp); int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id, enum smc_llc_reqresp reqresp, bool orderly, u32 reason); void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id); void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc); void smc_llc_lgr_clear(struct smc_link_group *lgr); int smc_llc_link_init(struct smc_link *link); void smc_llc_link_active(struct smc_link *link); void smc_llc_link_clear(struct smc_link *link, bool log); int smc_llc_do_confirm_rkey(struct smc_link *send_link, struct smc_buf_desc *rmb_desc); int smc_llc_do_delete_rkey(struct smc_link_group *lgr, struct smc_buf_desc *rmb_desc); int smc_llc_flow_initiate(struct smc_link_group *lgr, enum smc_llc_flowtype type); void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow); int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry, enum smc_llc_reqresp type); void smc_llc_link_set_uid(struct smc_link *link); void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry); struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, struct smc_link *lnk, int time_out, u8 exp_msg); struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow); void smc_llc_flow_qentry_del(struct smc_llc_flow *flow); void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn); int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry); int smc_llc_srv_add_link(struct smc_link *link, struct smc_llc_qentry *req_qentry); void smc_llc_add_link_local(struct smc_link *link); int smc_llc_init(void) __init; #endif /* SMC_LLC_H */ |
2 1 1 2 1 1 1 5 1 1 8 2 6 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat Kova[+] driver for Linux * * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* * Roccat Kova[+] is a bigger version of the Pyra with two more side buttons. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-kovaplus.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; static uint kovaplus_convert_event_cpi(uint value) { return (value == 7 ? 4 : (value == 4 ? 3 : value)); } static void kovaplus_profile_activated(struct kovaplus_device *kovaplus, uint new_profile_index) { if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings)) return; kovaplus->actual_profile = new_profile_index; kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level; kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x; kovaplus->actual_y_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_y; } static int kovaplus_send_control(struct usb_device *usb_dev, uint value, enum kovaplus_control_requests request) { int retval; struct roccat_common2_control control; if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS || request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) && value > 4) return -EINVAL; control.command = ROCCAT_COMMON_COMMAND_CONTROL; control.value = value; control.request = request; retval = roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL, &control, sizeof(struct roccat_common2_control)); return retval; } static int kovaplus_select_profile(struct usb_device *usb_dev, uint number, enum kovaplus_control_requests request) { return kovaplus_send_control(usb_dev, number, request); } static int kovaplus_get_profile_settings(struct usb_device *usb_dev, struct kovaplus_profile_settings *buf, uint number) { int retval; retval = kovaplus_select_profile(usb_dev, number, KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS, buf, KOVAPLUS_SIZE_PROFILE_SETTINGS); } static int kovaplus_get_profile_buttons(struct usb_device *usb_dev, struct kovaplus_profile_buttons *buf, int number) { int retval; retval = kovaplus_select_profile(usb_dev, number, KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS); if (retval) return retval; return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS, buf, KOVAPLUS_SIZE_PROFILE_BUTTONS); } /* retval is 0-4 on success, < 0 on error */ static int kovaplus_get_actual_profile(struct usb_device *usb_dev) { struct kovaplus_actual_profile buf; int retval; retval = roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct kovaplus_actual_profile)); return retval ? retval : buf.actual_profile; } static int kovaplus_set_actual_profile(struct usb_device *usb_dev, int new_profile) { struct kovaplus_actual_profile buf; buf.command = KOVAPLUS_COMMAND_ACTUAL_PROFILE; buf.size = sizeof(struct kovaplus_actual_profile); buf.actual_profile = new_profile; return roccat_common2_send_with_status(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE, &buf, sizeof(struct kovaplus_actual_profile)); } static ssize_t kovaplus_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&kovaplus->kovaplus_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&kovaplus->kovaplus_lock); if (retval) return retval; return real_size; } static ssize_t kovaplus_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&kovaplus->kovaplus_lock); retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size); mutex_unlock(&kovaplus->kovaplus_lock); if (retval) return retval; return real_size; } #define KOVAPLUS_SYSFS_W(thingy, THINGY) \ static ssize_t kovaplus_sysfs_write_ ## thingy(struct file *fp, \ struct kobject *kobj, struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return kovaplus_sysfs_write(fp, kobj, buf, off, count, \ KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \ } #define KOVAPLUS_SYSFS_R(thingy, THINGY) \ static ssize_t kovaplus_sysfs_read_ ## thingy(struct file *fp, \ struct kobject *kobj, struct bin_attribute *attr, char *buf, \ loff_t off, size_t count) \ { \ return kovaplus_sysfs_read(fp, kobj, buf, off, count, \ KOVAPLUS_SIZE_ ## THINGY, KOVAPLUS_COMMAND_ ## THINGY); \ } #define KOVAPLUS_SYSFS_RW(thingy, THINGY) \ KOVAPLUS_SYSFS_W(thingy, THINGY) \ KOVAPLUS_SYSFS_R(thingy, THINGY) #define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \ KOVAPLUS_SYSFS_RW(thingy, THINGY); \ static struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0660 }, \ .size = KOVAPLUS_SIZE_ ## THINGY, \ .read = kovaplus_sysfs_read_ ## thingy, \ .write = kovaplus_sysfs_write_ ## thingy \ } #define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \ KOVAPLUS_SYSFS_W(thingy, THINGY); \ static struct bin_attribute bin_attr_##thingy = { \ .attr = { .name = #thingy, .mode = 0220 }, \ .size = KOVAPLUS_SIZE_ ## THINGY, \ .write = kovaplus_sysfs_write_ ## thingy \ } KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL); KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO); KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS); KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS); static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); ssize_t retval; retval = kovaplus_select_profile(usb_dev, *(uint *)(attr->private), KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS); if (retval) return retval; return kovaplus_sysfs_read(fp, kobj, buf, off, count, KOVAPLUS_SIZE_PROFILE_SETTINGS, KOVAPLUS_COMMAND_PROFILE_SETTINGS); } static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); ssize_t retval; retval = kovaplus_select_profile(usb_dev, *(uint *)(attr->private), KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS); if (retval) return retval; return kovaplus_sysfs_read(fp, kobj, buf, off, count, KOVAPLUS_SIZE_PROFILE_BUTTONS, KOVAPLUS_COMMAND_PROFILE_BUTTONS); } #define PROFILE_ATTR(number) \ static struct bin_attribute bin_attr_profile##number##_settings = { \ .attr = { .name = "profile" #number "_settings", .mode = 0440 }, \ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \ .read = kovaplus_sysfs_read_profilex_settings, \ .private = &profile_numbers[number-1], \ }; \ static struct bin_attribute bin_attr_profile##number##_buttons = { \ .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \ .read = kovaplus_sysfs_read_profilex_buttons, \ .private = &profile_numbers[number-1], \ }; PROFILE_ATTR(1); PROFILE_ATTR(2); PROFILE_ATTR(3); PROFILE_ATTR(4); PROFILE_ATTR(5); static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev, struct device_attribute *attr, char *buf) { struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return sysfs_emit(buf, "%d\n", kovaplus->actual_profile); } static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev, struct device_attribute *attr, char const *buf, size_t size) { struct kovaplus_device *kovaplus; struct usb_device *usb_dev; unsigned long profile; int retval; struct kovaplus_roccat_report roccat_report; dev = dev->parent->parent; kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); retval = kstrtoul(buf, 10, &profile); if (retval) return retval; if (profile >= 5) return -EINVAL; mutex_lock(&kovaplus->kovaplus_lock); retval = kovaplus_set_actual_profile(usb_dev, profile); if (retval) { mutex_unlock(&kovaplus->kovaplus_lock); return retval; } kovaplus_profile_activated(kovaplus, profile); roccat_report.type = KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1; roccat_report.profile = profile + 1; roccat_report.button = 0; roccat_report.data1 = profile + 1; roccat_report.data2 = 0; roccat_report_event(kovaplus->chrdev_minor, (uint8_t const *)&roccat_report); mutex_unlock(&kovaplus->kovaplus_lock); return size; } static DEVICE_ATTR(actual_profile, 0660, kovaplus_sysfs_show_actual_profile, kovaplus_sysfs_set_actual_profile); static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev, struct device_attribute *attr, char *buf) { struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return sysfs_emit(buf, "%d\n", kovaplus->actual_cpi); } static DEVICE_ATTR(actual_cpi, 0440, kovaplus_sysfs_show_actual_cpi, NULL); static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev, struct device_attribute *attr, char *buf) { struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return sysfs_emit(buf, "%d\n", kovaplus->actual_x_sensitivity); } static DEVICE_ATTR(actual_sensitivity_x, 0440, kovaplus_sysfs_show_actual_sensitivity_x, NULL); static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev, struct device_attribute *attr, char *buf) { struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); return sysfs_emit(buf, "%d\n", kovaplus->actual_y_sensitivity); } static DEVICE_ATTR(actual_sensitivity_y, 0440, kovaplus_sysfs_show_actual_sensitivity_y, NULL); static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev, struct device_attribute *attr, char *buf) { struct kovaplus_device *kovaplus; struct usb_device *usb_dev; struct kovaplus_info info; dev = dev->parent->parent; kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); usb_dev = interface_to_usbdev(to_usb_interface(dev)); mutex_lock(&kovaplus->kovaplus_lock); roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO, &info, KOVAPLUS_SIZE_INFO); mutex_unlock(&kovaplus->kovaplus_lock); return sysfs_emit(buf, "%d\n", info.firmware_version); } static DEVICE_ATTR(firmware_version, 0440, kovaplus_sysfs_show_firmware_version, NULL); static struct attribute *kovaplus_attrs[] = { &dev_attr_actual_cpi.attr, &dev_attr_firmware_version.attr, &dev_attr_actual_profile.attr, &dev_attr_actual_sensitivity_x.attr, &dev_attr_actual_sensitivity_y.attr, NULL, }; static struct bin_attribute *kovaplus_bin_attributes[] = { &bin_attr_control, &bin_attr_info, &bin_attr_profile_settings, &bin_attr_profile_buttons, &bin_attr_profile1_settings, &bin_attr_profile2_settings, &bin_attr_profile3_settings, &bin_attr_profile4_settings, &bin_attr_profile5_settings, &bin_attr_profile1_buttons, &bin_attr_profile2_buttons, &bin_attr_profile3_buttons, &bin_attr_profile4_buttons, &bin_attr_profile5_buttons, NULL, }; static const struct attribute_group kovaplus_group = { .attrs = kovaplus_attrs, .bin_attrs = kovaplus_bin_attributes, }; static const struct attribute_group *kovaplus_groups[] = { &kovaplus_group, NULL, }; static const struct class kovaplus_class = { .name = "kovaplus", .dev_groups = kovaplus_groups, }; static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev, struct kovaplus_device *kovaplus) { int retval, i; static uint wait = 70; /* device will freeze with just 60 */ mutex_init(&kovaplus->kovaplus_lock); for (i = 0; i < 5; ++i) { msleep(wait); retval = kovaplus_get_profile_settings(usb_dev, &kovaplus->profile_settings[i], i); if (retval) return retval; msleep(wait); retval = kovaplus_get_profile_buttons(usb_dev, &kovaplus->profile_buttons[i], i); if (retval) return retval; } msleep(wait); retval = kovaplus_get_actual_profile(usb_dev); if (retval < 0) return retval; kovaplus_profile_activated(kovaplus, retval); return 0; } static int kovaplus_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct kovaplus_device *kovaplus; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { kovaplus = kzalloc(sizeof(*kovaplus), GFP_KERNEL); if (!kovaplus) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, kovaplus); retval = kovaplus_init_kovaplus_device_struct(usb_dev, kovaplus); if (retval) { hid_err(hdev, "couldn't init struct kovaplus_device\n"); goto exit_free; } retval = roccat_connect(&kovaplus_class, hdev, sizeof(struct kovaplus_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { kovaplus->chrdev_minor = retval; kovaplus->roccat_claimed = 1; } } else { hid_set_drvdata(hdev, NULL); } return 0; exit_free: kfree(kovaplus); return retval; } static void kovaplus_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct kovaplus_device *kovaplus; if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) { kovaplus = hid_get_drvdata(hdev); if (kovaplus->roccat_claimed) roccat_disconnect(kovaplus->chrdev_minor); kfree(kovaplus); } } static int kovaplus_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = kovaplus_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void kovaplus_remove(struct hid_device *hdev) { kovaplus_remove_specials(hdev); hid_hw_stop(hdev); } static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus, u8 const *data) { struct kovaplus_mouse_report_button const *button_report; if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON) return; button_report = (struct kovaplus_mouse_report_button const *)data; switch (button_report->type) { case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1: kovaplus_profile_activated(kovaplus, button_report->data1 - 1); break; case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI: kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1); break; case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY: kovaplus->actual_x_sensitivity = button_report->data1; kovaplus->actual_y_sensitivity = button_report->data2; break; default: break; } } static void kovaplus_report_to_chrdev(struct kovaplus_device const *kovaplus, u8 const *data) { struct kovaplus_roccat_report roccat_report; struct kovaplus_mouse_report_button const *button_report; if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON) return; button_report = (struct kovaplus_mouse_report_button const *)data; if (button_report->type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2) return; roccat_report.type = button_report->type; roccat_report.profile = kovaplus->actual_profile + 1; if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO || roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT || roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH || roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER) roccat_report.button = button_report->data1; else roccat_report.button = 0; if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI) roccat_report.data1 = kovaplus_convert_event_cpi(button_report->data1); else roccat_report.data1 = button_report->data1; roccat_report.data2 = button_report->data2; roccat_report_event(kovaplus->chrdev_minor, (uint8_t const *)&roccat_report); } static int kovaplus_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct kovaplus_device *kovaplus = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (kovaplus == NULL) return 0; kovaplus_keep_values_up_to_date(kovaplus, data); if (kovaplus->roccat_claimed) kovaplus_report_to_chrdev(kovaplus, data); return 0; } static const struct hid_device_id kovaplus_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) }, { } }; MODULE_DEVICE_TABLE(hid, kovaplus_devices); static struct hid_driver kovaplus_driver = { .name = "kovaplus", .id_table = kovaplus_devices, .probe = kovaplus_probe, .remove = kovaplus_remove, .raw_event = kovaplus_raw_event }; static int __init kovaplus_init(void) { int retval; retval = class_register(&kovaplus_class); if (retval) return retval; retval = hid_register_driver(&kovaplus_driver); if (retval) class_unregister(&kovaplus_class); return retval; } static void __exit kovaplus_exit(void) { hid_unregister_driver(&kovaplus_driver); class_unregister(&kovaplus_class); } module_init(kovaplus_init); module_exit(kovaplus_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Kova[+] driver"); MODULE_LICENSE("GPL v2"); |
13820 1092 1089 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_PKRU_H #define _ASM_X86_PKRU_H #include <asm/cpufeature.h> #define PKRU_AD_BIT 0x1u #define PKRU_WD_BIT 0x2u #define PKRU_BITS_PER_PKEY 2 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS extern u32 init_pkru_value; #define pkru_get_init_value() READ_ONCE(init_pkru_value) #else #define init_pkru_value 0 #define pkru_get_init_value() 0 #endif static inline bool __pkru_allows_read(u32 pkru, u16 pkey) { int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); } static inline bool __pkru_allows_write(u32 pkru, u16 pkey) { int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; /* * Access-disable disables writes too so we need to check * both bits here. */ return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits)); } static inline u32 read_pkru(void) { if (cpu_feature_enabled(X86_FEATURE_OSPKE)) return rdpkru(); return 0; } static inline void write_pkru(u32 pkru) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; /* * WRPKRU is relatively expensive compared to RDPKRU. * Avoid WRPKRU when it would not change the value. */ if (pkru != rdpkru()) wrpkru(pkru); } static inline void pkru_write_default(void) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; wrpkru(pkru_get_init_value()); } #endif |
3 3 3 2 4 3 3 1 2 1 2 10 2 4 5 3 2 2 2 1 4 1 3 2 2 3 2 3 1 4 3 2 4 4 3 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB Synaptics device driver * * Copyright (c) 2002 Rob Miller (rob@inpharmatica . co . uk) * Copyright (c) 2003 Ron Lee (ron@debian.org) * cPad driver for kernel 2.4 * * Copyright (c) 2004 Jan Steinhoff (cpad@jan-steinhoff . de) * Copyright (c) 2004 Ron Lee (ron@debian.org) * rewritten for kernel 2.6 * * cPad display character device part is not included. It can be found at * http://jan-steinhoff.de/linux/synaptics-usb.html * * Bases on: usb_skeleton.c v2.2 by Greg Kroah-Hartman * drivers/hid/usbhid/usbmouse.c by Vojtech Pavlik * drivers/input/mouse/synaptics.c by Peter Osterlund * * Trademarks are the property of their respective owners. */ /* * There are three different types of Synaptics USB devices: Touchpads, * touchsticks (or trackpoints), and touchscreens. Touchpads are well supported * by this driver, touchstick support has not been tested much yet, and * touchscreens have not been tested at all. * * Up to three alternate settings are possible: * setting 0: one int endpoint for relative movement (used by usbhid.ko) * setting 1: one int endpoint for absolute finger position * setting 2 (cPad only): one int endpoint for absolute finger position and * two bulk endpoints for the display (in/out) * This driver uses setting 1. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/usb.h> #include <linux/input.h> #include <linux/usb/input.h> #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 /* Synaptics USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 /* Integrated USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_CPAD 0x0003 /* Synaptics cPad */ #define USB_DEVICE_ID_SYNAPTICS_TS 0x0006 /* Synaptics TouchScreen */ #define USB_DEVICE_ID_SYNAPTICS_STICK 0x0007 /* Synaptics USB Styk */ #define USB_DEVICE_ID_SYNAPTICS_WP 0x0008 /* Synaptics USB WheelPad */ #define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 /* Composite USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 /* Wireless TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 /* DisplayPad */ #define SYNUSB_TOUCHPAD (1 << 0) #define SYNUSB_STICK (1 << 1) #define SYNUSB_TOUCHSCREEN (1 << 2) #define SYNUSB_AUXDISPLAY (1 << 3) /* For cPad */ #define SYNUSB_COMBO (1 << 4) /* Composite device (TP + stick) */ #define SYNUSB_IO_ALWAYS (1 << 5) #define USB_DEVICE_SYNAPTICS(prod, kind) \ USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, \ USB_DEVICE_ID_SYNAPTICS_##prod), \ .driver_info = (kind), #define SYNUSB_RECV_SIZE 8 #define XMIN_NOMINAL 1472 #define XMAX_NOMINAL 5472 #define YMIN_NOMINAL 1408 #define YMAX_NOMINAL 4448 struct synusb { struct usb_device *udev; struct usb_interface *intf; struct urb *urb; unsigned char *data; /* serialize access to open/suspend */ struct mutex pm_mutex; bool is_open; /* input device related data structures */ struct input_dev *input; char name[128]; char phys[64]; /* characteristics of the device */ unsigned long flags; }; static void synusb_report_buttons(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; input_report_key(input_dev, BTN_LEFT, synusb->data[1] & 0x04); input_report_key(input_dev, BTN_RIGHT, synusb->data[1] & 0x01); input_report_key(input_dev, BTN_MIDDLE, synusb->data[1] & 0x02); } static void synusb_report_stick(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; int x, y; unsigned int pressure; pressure = synusb->data[6]; x = (s16)(be16_to_cpup((__be16 *)&synusb->data[2]) << 3) >> 7; y = (s16)(be16_to_cpup((__be16 *)&synusb->data[4]) << 3) >> 7; if (pressure > 0) { input_report_rel(input_dev, REL_X, x); input_report_rel(input_dev, REL_Y, -y); } input_report_abs(input_dev, ABS_PRESSURE, pressure); synusb_report_buttons(synusb); input_sync(input_dev); } static void synusb_report_touchpad(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; unsigned int num_fingers, tool_width; unsigned int x, y; unsigned int pressure, w; pressure = synusb->data[6]; x = be16_to_cpup((__be16 *)&synusb->data[2]); y = be16_to_cpup((__be16 *)&synusb->data[4]); w = synusb->data[0] & 0x0f; if (pressure > 0) { num_fingers = 1; tool_width = 5; switch (w) { case 0 ... 1: num_fingers = 2 + w; break; case 2: /* pen, pretend its a finger */ break; case 4 ... 15: tool_width = w; break; } } else { num_fingers = 0; tool_width = 0; } /* * Post events * BTN_TOUCH has to be first as mousedev relies on it when doing * absolute -> relative conversion */ if (pressure > 30) input_report_key(input_dev, BTN_TOUCH, 1); if (pressure < 25) input_report_key(input_dev, BTN_TOUCH, 0); if (num_fingers > 0) { input_report_abs(input_dev, ABS_X, x); input_report_abs(input_dev, ABS_Y, YMAX_NOMINAL + YMIN_NOMINAL - y); } input_report_abs(input_dev, ABS_PRESSURE, pressure); input_report_abs(input_dev, ABS_TOOL_WIDTH, tool_width); input_report_key(input_dev, BTN_TOOL_FINGER, num_fingers == 1); input_report_key(input_dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); input_report_key(input_dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); synusb_report_buttons(synusb); if (synusb->flags & SYNUSB_AUXDISPLAY) input_report_key(input_dev, BTN_MIDDLE, synusb->data[1] & 0x08); input_sync(input_dev); } static void synusb_irq(struct urb *urb) { struct synusb *synusb = urb->context; int error; /* Check our status in case we need to bail out early. */ switch (urb->status) { case 0: usb_mark_last_busy(synusb->udev); break; /* Device went away so don't keep trying to read from it. */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto resubmit; break; } if (synusb->flags & SYNUSB_STICK) synusb_report_stick(synusb); else synusb_report_touchpad(synusb); resubmit: error = usb_submit_urb(urb, GFP_ATOMIC); if (error && error != -EPERM) dev_err(&synusb->intf->dev, "%s - usb_submit_urb failed with result: %d", __func__, error); } static struct usb_endpoint_descriptor * synusb_get_in_endpoint(struct usb_host_interface *iface) { struct usb_endpoint_descriptor *endpoint; int i; for (i = 0; i < iface->desc.bNumEndpoints; ++i) { endpoint = &iface->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found our interrupt in endpoint */ return endpoint; } } return NULL; } static int synusb_open(struct input_dev *dev) { struct synusb *synusb = input_get_drvdata(dev); int retval; retval = usb_autopm_get_interface(synusb->intf); if (retval) { dev_err(&synusb->intf->dev, "%s - usb_autopm_get_interface failed, error: %d\n", __func__, retval); return retval; } mutex_lock(&synusb->pm_mutex); retval = usb_submit_urb(synusb->urb, GFP_KERNEL); if (retval) { dev_err(&synusb->intf->dev, "%s - usb_submit_urb failed, error: %d\n", __func__, retval); retval = -EIO; goto out; } synusb->intf->needs_remote_wakeup = 1; synusb->is_open = true; out: mutex_unlock(&synusb->pm_mutex); usb_autopm_put_interface(synusb->intf); return retval; } static void synusb_close(struct input_dev *dev) { struct synusb *synusb = input_get_drvdata(dev); int autopm_error; autopm_error = usb_autopm_get_interface(synusb->intf); mutex_lock(&synusb->pm_mutex); usb_kill_urb(synusb->urb); synusb->intf->needs_remote_wakeup = 0; synusb->is_open = false; mutex_unlock(&synusb->pm_mutex); if (!autopm_error) usb_autopm_put_interface(synusb->intf); } static int synusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *ep; struct synusb *synusb; struct input_dev *input_dev; unsigned int intf_num = intf->cur_altsetting->desc.bInterfaceNumber; unsigned int altsetting = min(intf->num_altsetting, 1U); int error; error = usb_set_interface(udev, intf_num, altsetting); if (error) { dev_err(&udev->dev, "Can not set alternate setting to %i, error: %i", altsetting, error); return error; } ep = synusb_get_in_endpoint(intf->cur_altsetting); if (!ep) return -ENODEV; synusb = kzalloc(sizeof(*synusb), GFP_KERNEL); input_dev = input_allocate_device(); if (!synusb || !input_dev) { error = -ENOMEM; goto err_free_mem; } synusb->udev = udev; synusb->intf = intf; synusb->input = input_dev; mutex_init(&synusb->pm_mutex); synusb->flags = id->driver_info; if (synusb->flags & SYNUSB_COMBO) { /* * This is a combo device, we need to set proper * capability, depending on the interface. */ synusb->flags |= intf_num == 1 ? SYNUSB_STICK : SYNUSB_TOUCHPAD; } synusb->urb = usb_alloc_urb(0, GFP_KERNEL); if (!synusb->urb) { error = -ENOMEM; goto err_free_mem; } synusb->data = usb_alloc_coherent(udev, SYNUSB_RECV_SIZE, GFP_KERNEL, &synusb->urb->transfer_dma); if (!synusb->data) { error = -ENOMEM; goto err_free_urb; } usb_fill_int_urb(synusb->urb, udev, usb_rcvintpipe(udev, ep->bEndpointAddress), synusb->data, SYNUSB_RECV_SIZE, synusb_irq, synusb, ep->bInterval); synusb->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (udev->manufacturer) strscpy(synusb->name, udev->manufacturer, sizeof(synusb->name)); if (udev->product) { if (udev->manufacturer) strlcat(synusb->name, " ", sizeof(synusb->name)); strlcat(synusb->name, udev->product, sizeof(synusb->name)); } if (!strlen(synusb->name)) snprintf(synusb->name, sizeof(synusb->name), "USB Synaptics Device %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); if (synusb->flags & SYNUSB_STICK) strlcat(synusb->name, " (Stick)", sizeof(synusb->name)); usb_make_path(udev, synusb->phys, sizeof(synusb->phys)); strlcat(synusb->phys, "/input0", sizeof(synusb->phys)); input_dev->name = synusb->name; input_dev->phys = synusb->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &synusb->intf->dev; if (!(synusb->flags & SYNUSB_IO_ALWAYS)) { input_dev->open = synusb_open; input_dev->close = synusb_close; } input_set_drvdata(input_dev, synusb); __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); if (synusb->flags & SYNUSB_STICK) { __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0); } else { input_set_abs_params(input_dev, ABS_X, XMIN_NOMINAL, XMAX_NOMINAL, 0, 0); input_set_abs_params(input_dev, ABS_Y, YMIN_NOMINAL, YMAX_NOMINAL, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 255, 0, 0); input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, 15, 0, 0); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); } if (synusb->flags & SYNUSB_TOUCHSCREEN) __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); else __set_bit(INPUT_PROP_POINTER, input_dev->propbit); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); usb_set_intfdata(intf, synusb); if (synusb->flags & SYNUSB_IO_ALWAYS) { error = synusb_open(input_dev); if (error) goto err_free_dma; } error = input_register_device(input_dev); if (error) { dev_err(&udev->dev, "Failed to register input device, error %d\n", error); goto err_stop_io; } return 0; err_stop_io: if (synusb->flags & SYNUSB_IO_ALWAYS) synusb_close(synusb->input); err_free_dma: usb_free_coherent(udev, SYNUSB_RECV_SIZE, synusb->data, synusb->urb->transfer_dma); err_free_urb: usb_free_urb(synusb->urb); err_free_mem: input_free_device(input_dev); kfree(synusb); usb_set_intfdata(intf, NULL); return error; } static void synusb_disconnect(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); if (synusb->flags & SYNUSB_IO_ALWAYS) synusb_close(synusb->input); input_unregister_device(synusb->input); usb_free_coherent(udev, SYNUSB_RECV_SIZE, synusb->data, synusb->urb->transfer_dma); usb_free_urb(synusb->urb); kfree(synusb); usb_set_intfdata(intf, NULL); } static int synusb_suspend(struct usb_interface *intf, pm_message_t message) { struct synusb *synusb = usb_get_intfdata(intf); mutex_lock(&synusb->pm_mutex); usb_kill_urb(synusb->urb); mutex_unlock(&synusb->pm_mutex); return 0; } static int synusb_resume(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); int retval = 0; mutex_lock(&synusb->pm_mutex); if ((synusb->is_open || (synusb->flags & SYNUSB_IO_ALWAYS)) && usb_submit_urb(synusb->urb, GFP_NOIO) < 0) { retval = -EIO; } mutex_unlock(&synusb->pm_mutex); return retval; } static int synusb_pre_reset(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); mutex_lock(&synusb->pm_mutex); usb_kill_urb(synusb->urb); return 0; } static int synusb_post_reset(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); int retval = 0; if ((synusb->is_open || (synusb->flags & SYNUSB_IO_ALWAYS)) && usb_submit_urb(synusb->urb, GFP_NOIO) < 0) { retval = -EIO; } mutex_unlock(&synusb->pm_mutex); return retval; } static int synusb_reset_resume(struct usb_interface *intf) { return synusb_resume(intf); } static const struct usb_device_id synusb_idtable[] = { { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(CPAD, SYNUSB_TOUCHPAD | SYNUSB_AUXDISPLAY | SYNUSB_IO_ALWAYS) }, { USB_DEVICE_SYNAPTICS(TS, SYNUSB_TOUCHSCREEN) }, { USB_DEVICE_SYNAPTICS(STICK, SYNUSB_STICK) }, { USB_DEVICE_SYNAPTICS(WP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(COMP_TP, SYNUSB_COMBO) }, { USB_DEVICE_SYNAPTICS(WTP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(DPAD, SYNUSB_TOUCHPAD) }, { } }; MODULE_DEVICE_TABLE(usb, synusb_idtable); static struct usb_driver synusb_driver = { .name = "synaptics_usb", .probe = synusb_probe, .disconnect = synusb_disconnect, .id_table = synusb_idtable, .suspend = synusb_suspend, .resume = synusb_resume, .pre_reset = synusb_pre_reset, .post_reset = synusb_post_reset, .reset_resume = synusb_reset_resume, .supports_autosuspend = 1, }; module_usb_driver(synusb_driver); MODULE_AUTHOR("Rob Miller <rob@inpharmatica.co.uk>, " "Ron Lee <ron@debian.org>, " "Jan Steinhoff <cpad@jan-steinhoff.de>"); MODULE_DESCRIPTION("Synaptics USB device driver"); MODULE_LICENSE("GPL"); |
209 209 6 206 209 241 241 5 20 1 19 21 21 22 22 1 21 21 226 226 3 224 282 2 3 27 21 226 21 11 6 226 5 24 24 4 21 112 86 27 109 4 112 86 27 108 7 89 16 17 101 6 10 2 8 1 7 1 5 2 6 1 3 4 5 9 2 8 8 94 94 117 93 24 27 91 108 10 116 92 1 83 8 56 26 7 7 5 27 1 23 4 27 23 23 1 18 4 1 21 9 9 9 2 6 1 7 1 7 24 26 2 24 32 32 6 1 9 23 4 7 21 5 27 24 8 42 1 1 5 5 11 11 11 6 11 42 42 1 42 10 33 1 16 18 2 10 20 10 29 38 25 6 7 12 1 20 19 1 20 6 31 1 7 29 1 5 4 5 1 37 1 29 1 7 31 7 16 21 20 18 9 29 4 4 4 4 3 4 4 4 3 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 | // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/module.h> #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> #include <linux/atomic.h> #include <linux/ratelimit.h> #include <linux/backing-file.h> #include "overlayfs.h" static unsigned short ovl_redirect_max = 256; module_param_named(redirect_max, ovl_redirect_max, ushort, 0644); MODULE_PARM_DESC(redirect_max, "Maximum length of absolute redirect xattr value"); static int ovl_set_redirect(struct dentry *dentry, bool samedir); int ovl_cleanup(struct ovl_fs *ofs, struct inode *wdir, struct dentry *wdentry) { int err; dget(wdentry); if (d_is_dir(wdentry)) err = ovl_do_rmdir(ofs, wdir, wdentry); else err = ovl_do_unlink(ofs, wdir, wdentry); dput(wdentry); if (err) { pr_err("cleanup of '%pd2' failed (%i)\n", wdentry, err); } return err; } struct dentry *ovl_lookup_temp(struct ovl_fs *ofs, struct dentry *workdir) { struct dentry *temp; char name[20]; static atomic_t temp_id = ATOMIC_INIT(0); /* counter is allowed to wrap, since temp dentries are ephemeral */ snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id)); temp = ovl_lookup_upper(ofs, name, workdir, strlen(name)); if (!IS_ERR(temp) && temp->d_inode) { pr_err("workdir/%s already exists\n", name); dput(temp); temp = ERR_PTR(-EIO); } return temp; } /* caller holds i_mutex on workdir */ static struct dentry *ovl_whiteout(struct ovl_fs *ofs) { int err; struct dentry *whiteout; struct dentry *workdir = ofs->workdir; struct inode *wdir = workdir->d_inode; if (!ofs->whiteout) { whiteout = ovl_lookup_temp(ofs, workdir); if (IS_ERR(whiteout)) goto out; err = ovl_do_whiteout(ofs, wdir, whiteout); if (err) { dput(whiteout); whiteout = ERR_PTR(err); goto out; } ofs->whiteout = whiteout; } if (!ofs->no_shared_whiteout) { whiteout = ovl_lookup_temp(ofs, workdir); if (IS_ERR(whiteout)) goto out; err = ovl_do_link(ofs, ofs->whiteout, wdir, whiteout); if (!err) goto out; if (err != -EMLINK) { pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n", ofs->whiteout->d_inode->i_nlink, err); ofs->no_shared_whiteout = true; } dput(whiteout); } whiteout = ofs->whiteout; ofs->whiteout = NULL; out: return whiteout; } /* Caller must hold i_mutex on both workdir and dir */ int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir, struct dentry *dentry) { struct inode *wdir = ofs->workdir->d_inode; struct dentry *whiteout; int err; int flags = 0; whiteout = ovl_whiteout(ofs); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) return err; if (d_is_dir(dentry)) flags = RENAME_EXCHANGE; err = ovl_do_rename(ofs, wdir, whiteout, dir, dentry, flags); if (err) goto kill_whiteout; if (flags) ovl_cleanup(ofs, wdir, dentry); out: dput(whiteout); return err; kill_whiteout: ovl_cleanup(ofs, wdir, whiteout); goto out; } int ovl_mkdir_real(struct ovl_fs *ofs, struct inode *dir, struct dentry **newdentry, umode_t mode) { int err; struct dentry *d, *dentry = *newdentry; err = ovl_do_mkdir(ofs, dir, dentry, mode); if (err) return err; if (likely(!d_unhashed(dentry))) return 0; /* * vfs_mkdir() may succeed and leave the dentry passed * to it unhashed and negative. If that happens, try to * lookup a new hashed and positive dentry. */ d = ovl_lookup_upper(ofs, dentry->d_name.name, dentry->d_parent, dentry->d_name.len); if (IS_ERR(d)) { pr_warn("failed lookup after mkdir (%pd2, err=%i).\n", dentry, err); return PTR_ERR(d); } dput(dentry); *newdentry = d; return 0; } struct dentry *ovl_create_real(struct ovl_fs *ofs, struct inode *dir, struct dentry *newdentry, struct ovl_cattr *attr) { int err; if (IS_ERR(newdentry)) return newdentry; err = -ESTALE; if (newdentry->d_inode) goto out; if (attr->hardlink) { err = ovl_do_link(ofs, attr->hardlink, dir, newdentry); } else { switch (attr->mode & S_IFMT) { case S_IFREG: err = ovl_do_create(ofs, dir, newdentry, attr->mode); break; case S_IFDIR: /* mkdir is special... */ err = ovl_mkdir_real(ofs, dir, &newdentry, attr->mode); break; case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: err = ovl_do_mknod(ofs, dir, newdentry, attr->mode, attr->rdev); break; case S_IFLNK: err = ovl_do_symlink(ofs, dir, newdentry, attr->link); break; default: err = -EPERM; } } if (!err && WARN_ON(!newdentry->d_inode)) { /* * Not quite sure if non-instantiated dentry is legal or not. * VFS doesn't seem to care so check and warn here. */ err = -EIO; } out: if (err) { dput(newdentry); return ERR_PTR(err); } return newdentry; } struct dentry *ovl_create_temp(struct ovl_fs *ofs, struct dentry *workdir, struct ovl_cattr *attr) { return ovl_create_real(ofs, d_inode(workdir), ovl_lookup_temp(ofs, workdir), attr); } static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper, int xerr) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); int err; err = ovl_check_setxattr(ofs, upper, OVL_XATTR_OPAQUE, "y", 1, xerr); if (!err) ovl_dentry_set_opaque(dentry); return err; } static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) { /* * Fail with -EIO when trying to create opaque dir and upper doesn't * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to * return a specific error for noxattr case. */ return ovl_set_opaque_xerr(dentry, upperdentry, -EIO); } /* * Common operations required to be done after creation of file on upper. * If @hardlink is false, then @inode is a pre-allocated inode, we may or * may not use to instantiate the new dentry. */ static int ovl_instantiate(struct dentry *dentry, struct inode *inode, struct dentry *newdentry, bool hardlink, struct file *tmpfile) { struct ovl_inode_params oip = { .upperdentry = newdentry, .newinode = inode, }; ovl_dentry_set_upper_alias(dentry); ovl_dentry_init_reval(dentry, newdentry, NULL); if (!hardlink) { /* * ovl_obtain_alias() can be called after ovl_create_real() * and before we get here, so we may get an inode from cache * with the same real upperdentry that is not the inode we * pre-allocated. In this case we will use the cached inode * to instantiate the new dentry. * * XXX: if we ever use ovl_obtain_alias() to decode directory * file handles, need to use ovl_get_inode_locked() and * d_instantiate_new() here to prevent from creating two * hashed directory inode aliases. */ inode = ovl_get_inode(dentry->d_sb, &oip); if (IS_ERR(inode)) return PTR_ERR(inode); if (inode == oip.newinode) ovl_set_flag(OVL_UPPERDATA, inode); } else { WARN_ON(ovl_inode_real(inode) != d_inode(newdentry)); dput(newdentry); inc_nlink(inode); } if (tmpfile) d_mark_tmpfile(tmpfile, inode); d_instantiate(dentry, inode); if (inode != oip.newinode) { pr_warn_ratelimited("newly created inode found in cache (%pd2)\n", dentry); } /* Force lookup of new upper hardlink to find its lower */ if (hardlink) d_drop(dentry); return 0; } static bool ovl_type_merge(struct dentry *dentry) { return OVL_TYPE_MERGE(ovl_path_type(dentry)); } static bool ovl_type_origin(struct dentry *dentry) { return OVL_TYPE_ORIGIN(ovl_path_type(dentry)); } static int ovl_create_upper(struct dentry *dentry, struct inode *inode, struct ovl_cattr *attr) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *udir = upperdir->d_inode; struct dentry *newdentry; int err; inode_lock_nested(udir, I_MUTEX_PARENT); newdentry = ovl_create_real(ofs, udir, ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len), attr); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_unlock; if (ovl_type_merge(dentry->d_parent) && d_is_dir(newdentry) && !ovl_allow_offline_changes(ofs)) { /* Setting opaque here is just an optimization, allow to fail */ ovl_set_opaque(dentry, newdentry); } ovl_dir_modified(dentry->d_parent, false); err = ovl_instantiate(dentry, inode, newdentry, !!attr->hardlink, NULL); if (err) goto out_cleanup; out_unlock: inode_unlock(udir); return err; out_cleanup: ovl_cleanup(ofs, udir, newdentry); dput(newdentry); goto out_unlock; } static struct dentry *ovl_clear_empty(struct dentry *dentry, struct list_head *list) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct inode *wdir = workdir->d_inode; struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *udir = upperdir->d_inode; struct path upperpath; struct dentry *upper; struct dentry *opaquedir; struct kstat stat; int err; if (WARN_ON(!workdir)) return ERR_PTR(-EROFS); err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; ovl_path_upper(dentry, &upperpath); err = vfs_getattr(&upperpath, &stat, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT); if (err) goto out_unlock; err = -ESTALE; if (!S_ISDIR(stat.mode)) goto out_unlock; upper = upperpath.dentry; if (upper->d_parent->d_inode != udir) goto out_unlock; opaquedir = ovl_create_temp(ofs, workdir, OVL_CATTR(stat.mode)); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out_unlock; err = ovl_copy_xattr(dentry->d_sb, &upperpath, opaquedir); if (err) goto out_cleanup; err = ovl_set_opaque(dentry, opaquedir); if (err) goto out_cleanup; inode_lock(opaquedir->d_inode); err = ovl_set_attr(ofs, opaquedir, &stat); inode_unlock(opaquedir->d_inode); if (err) goto out_cleanup; err = ovl_do_rename(ofs, wdir, opaquedir, udir, upper, RENAME_EXCHANGE); if (err) goto out_cleanup; ovl_cleanup_whiteouts(ofs, upper, list); ovl_cleanup(ofs, wdir, upper); unlock_rename(workdir, upperdir); /* dentry's upper doesn't match now, get rid of it */ d_drop(dentry); return opaquedir; out_cleanup: ovl_cleanup(ofs, wdir, opaquedir); dput(opaquedir); out_unlock: unlock_rename(workdir, upperdir); out: return ERR_PTR(err); } static int ovl_set_upper_acl(struct ovl_fs *ofs, struct dentry *upperdentry, const char *acl_name, struct posix_acl *acl) { if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl) return 0; return ovl_do_set_acl(ofs, upperdentry, acl_name, acl); } static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, struct ovl_cattr *cattr) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct inode *wdir = workdir->d_inode; struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *udir = upperdir->d_inode; struct dentry *upper; struct dentry *newdentry; int err; struct posix_acl *acl, *default_acl; bool hardlink = !!cattr->hardlink; if (WARN_ON(!workdir)) return -EROFS; if (!hardlink) { err = posix_acl_create(dentry->d_parent->d_inode, &cattr->mode, &default_acl, &acl); if (err) return err; } err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out_unlock; err = -ESTALE; if (d_is_negative(upper) || !ovl_upper_is_whiteout(ofs, upper)) goto out_dput; newdentry = ovl_create_temp(ofs, workdir, cattr); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_dput; /* * mode could have been mutilated due to umask (e.g. sgid directory) */ if (!hardlink && !S_ISLNK(cattr->mode) && newdentry->d_inode->i_mode != cattr->mode) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = cattr->mode, }; inode_lock(newdentry->d_inode); err = ovl_do_notify_change(ofs, newdentry, &attr); inode_unlock(newdentry->d_inode); if (err) goto out_cleanup; } if (!hardlink) { err = ovl_set_upper_acl(ofs, newdentry, XATTR_NAME_POSIX_ACL_ACCESS, acl); if (err) goto out_cleanup; err = ovl_set_upper_acl(ofs, newdentry, XATTR_NAME_POSIX_ACL_DEFAULT, default_acl); if (err) goto out_cleanup; } if (!hardlink && S_ISDIR(cattr->mode)) { err = ovl_set_opaque(dentry, newdentry); if (err) goto out_cleanup; err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, RENAME_EXCHANGE); if (err) goto out_cleanup; ovl_cleanup(ofs, wdir, upper); } else { err = ovl_do_rename(ofs, wdir, newdentry, udir, upper, 0); if (err) goto out_cleanup; } ovl_dir_modified(dentry->d_parent, false); err = ovl_instantiate(dentry, inode, newdentry, hardlink, NULL); if (err) { ovl_cleanup(ofs, udir, newdentry); dput(newdentry); } out_dput: dput(upper); out_unlock: unlock_rename(workdir, upperdir); out: if (!hardlink) { posix_acl_release(acl); posix_acl_release(default_acl); } return err; out_cleanup: ovl_cleanup(ofs, wdir, newdentry); dput(newdentry); goto out_dput; } static const struct cred *ovl_setup_cred_for_create(struct dentry *dentry, struct inode *inode, umode_t mode, const struct cred *old_cred) { int err; struct cred *override_cred; override_cred = prepare_creds(); if (!override_cred) return ERR_PTR(-ENOMEM); override_cred->fsuid = inode->i_uid; override_cred->fsgid = inode->i_gid; err = security_dentry_create_files_as(dentry, mode, &dentry->d_name, old_cred, override_cred); if (err) { put_cred(override_cred); return ERR_PTR(err); } /* * Caller is going to match this with revert_creds_light() and drop * referenec on the returned creds. * We must be called with creator creds already, otherwise we risk * leaking creds. */ old_cred = override_creds_light(override_cred); WARN_ON_ONCE(old_cred != ovl_creds(dentry->d_sb)); return override_cred; } static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, struct ovl_cattr *attr, bool origin) { int err; const struct cred *old_cred, *new_cred = NULL; struct dentry *parent = dentry->d_parent; old_cred = ovl_override_creds(dentry->d_sb); /* * When linking a file with copy up origin into a new parent, mark the * new parent dir "impure". */ if (origin) { err = ovl_set_impure(parent, ovl_dentry_upper(parent)); if (err) goto out_revert_creds; } if (!attr->hardlink) { /* * In the creation cases(create, mkdir, mknod, symlink), * ovl should transfer current's fs{u,g}id to underlying * fs. Because underlying fs want to initialize its new * inode owner using current's fs{u,g}id. And in this * case, the @inode is a new inode that is initialized * in inode_init_owner() to current's fs{u,g}id. So use * the inode's i_{u,g}id to override the cred's fs{u,g}id. * * But in the other hardlink case, ovl_link() does not * create a new inode, so just use the ovl mounter's * fs{u,g}id. */ new_cred = ovl_setup_cred_for_create(dentry, inode, attr->mode, old_cred); err = PTR_ERR(new_cred); if (IS_ERR(new_cred)) { new_cred = NULL; goto out_revert_creds; } } if (!ovl_dentry_is_whiteout(dentry)) err = ovl_create_upper(dentry, inode, attr); else err = ovl_create_over_whiteout(dentry, inode, attr); out_revert_creds: ovl_revert_creds(old_cred); put_cred(new_cred); return err; } static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, const char *link) { int err; struct inode *inode; struct ovl_cattr attr = { .rdev = rdev, .link = link, }; err = ovl_copy_up(dentry->d_parent); if (err) return err; err = ovl_want_write(dentry); if (err) goto out; /* Preallocate inode to be used by ovl_get_inode() */ err = -ENOMEM; inode = ovl_new_inode(dentry->d_sb, mode, rdev); if (!inode) goto out_drop_write; spin_lock(&inode->i_lock); inode->i_state |= I_CREATING; spin_unlock(&inode->i_lock); inode_init_owner(&nop_mnt_idmap, inode, dentry->d_parent->d_inode, mode); attr.mode = inode->i_mode; err = ovl_create_or_link(dentry, inode, &attr, false); /* Did we end up using the preallocated inode? */ if (inode != d_inode(dentry)) iput(inode); out_drop_write: ovl_drop_write(dentry); out: return err; } static int ovl_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL); } static int ovl_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL); } static int ovl_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { /* Don't allow creation of "whiteout" on overlay */ if (S_ISCHR(mode) && rdev == WHITEOUT_DEV) return -EPERM; return ovl_create_object(dentry, mode, rdev, NULL); } static int ovl_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *link) { return ovl_create_object(dentry, S_IFLNK, 0, link); } static int ovl_set_link_redirect(struct dentry *dentry) { const struct cred *old_cred; int err; old_cred = ovl_override_creds(dentry->d_sb); err = ovl_set_redirect(dentry, false); ovl_revert_creds(old_cred); return err; } static int ovl_link(struct dentry *old, struct inode *newdir, struct dentry *new) { int err; struct inode *inode; err = ovl_copy_up(old); if (err) goto out; err = ovl_copy_up(new->d_parent); if (err) goto out; err = ovl_nlink_start(old); if (err) goto out; if (ovl_is_metacopy_dentry(old)) { err = ovl_set_link_redirect(old); if (err) goto out_nlink_end; } inode = d_inode(old); ihold(inode); err = ovl_create_or_link(new, inode, &(struct ovl_cattr) {.hardlink = ovl_dentry_upper(old)}, ovl_type_origin(old)); if (err) iput(inode); out_nlink_end: ovl_nlink_end(old); out: return err; } static bool ovl_matches_upper(struct dentry *dentry, struct dentry *upper) { return d_inode(ovl_dentry_upper(dentry)) == d_inode(upper); } static int ovl_remove_and_whiteout(struct dentry *dentry, struct list_head *list) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *workdir = ovl_workdir(dentry); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct dentry *upper; struct dentry *opaquedir = NULL; int err; if (WARN_ON(!workdir)) return -EROFS; if (!list_empty(list)) { opaquedir = ovl_clear_empty(dentry, list); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out; } err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out_dput; upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out_unlock; err = -ESTALE; if ((opaquedir && upper != opaquedir) || (!opaquedir && ovl_dentry_upper(dentry) && !ovl_matches_upper(dentry, upper))) { goto out_dput_upper; } err = ovl_cleanup_and_whiteout(ofs, d_inode(upperdir), upper); if (err) goto out_d_drop; ovl_dir_modified(dentry->d_parent, true); out_d_drop: d_drop(dentry); out_dput_upper: dput(upper); out_unlock: unlock_rename(workdir, upperdir); out_dput: dput(opaquedir); out: return err; } static int ovl_remove_upper(struct dentry *dentry, bool is_dir, struct list_head *list) { struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent); struct inode *dir = upperdir->d_inode; struct dentry *upper; struct dentry *opaquedir = NULL; int err; if (!list_empty(list)) { opaquedir = ovl_clear_empty(dentry, list); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out; } inode_lock_nested(dir, I_MUTEX_PARENT); upper = ovl_lookup_upper(ofs, dentry->d_name.name, upperdir, dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto out_unlock; err = -ESTALE; if ((opaquedir && upper != opaquedir) || (!opaquedir && !ovl_matches_upper(dentry, upper))) goto out_dput_upper; if (is_dir) err = ovl_do_rmdir(ofs, dir, upper); else err = ovl_do_unlink(ofs, dir, upper); ovl_dir_modified(dentry->d_parent, ovl_type_origin(dentry)); /* * Keeping this dentry hashed would mean having to release * upperpath/lowerpath, which could only be done if we are the * sole user of this dentry. Too tricky... Just unhash for * now. */ if (!err) d_drop(dentry); out_dput_upper: dput(upper); out_unlock: inode_unlock(dir); dput(opaquedir); out: return err; } static bool ovl_pure_upper(struct dentry *dentry) { return !ovl_dentry_lower(dentry) && !ovl_test_flag(OVL_WHITEOUTS, d_inode(dentry)); } static void ovl_drop_nlink(struct dentry *dentry) { struct inode *inode = d_inode(dentry); struct dentry *alias; /* Try to find another, hashed alias */ spin_lock(&inode->i_lock); hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { if (alias != dentry && !d_unhashed(alias)) break; } spin_unlock(&inode->i_lock); /* * Changes to underlying layers may cause i_nlink to lose sync with * reality. In this case prevent the link count from going to zero * prematurely. */ if (inode->i_nlink > !!alias) drop_nlink(inode); } static int ovl_do_remove(struct dentry *dentry, bool is_dir) { int err; const struct cred *old_cred; bool lower_positive = ovl_lower_positive(dentry); LIST_HEAD(list); /* No need to clean pure upper removed by vfs_rmdir() */ if (is_dir && (lower_positive || !ovl_pure_upper(dentry))) { err = ovl_check_empty_dir(dentry, &list); if (err) goto out; } err = ovl_copy_up(dentry->d_parent); if (err) goto out; err = ovl_nlink_start(dentry); if (err) goto out; old_cred = ovl_override_creds(dentry->d_sb); if (!lower_positive) err = ovl_remove_upper(dentry, is_dir, &list); else err = ovl_remove_and_whiteout(dentry, &list); ovl_revert_creds(old_cred); if (!err) { if (is_dir) clear_nlink(dentry->d_inode); else ovl_drop_nlink(dentry); } ovl_nlink_end(dentry); /* * Copy ctime * * Note: we fail to update ctime if there was no copy-up, only a * whiteout */ if (ovl_dentry_upper(dentry)) ovl_copyattr(d_inode(dentry)); out: ovl_cache_free(&list); return err; } static int ovl_unlink(struct inode *dir, struct dentry *dentry) { return ovl_do_remove(dentry, false); } static int ovl_rmdir(struct inode *dir, struct dentry *dentry) { return ovl_do_remove(dentry, true); } static bool ovl_type_merge_or_lower(struct dentry *dentry) { enum ovl_path_type type = ovl_path_type(dentry); return OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type); } static bool ovl_can_move(struct dentry *dentry) { return ovl_redirect_dir(OVL_FS(dentry->d_sb)) || !d_is_dir(dentry) || !ovl_type_merge_or_lower(dentry); } static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect) { char *buf, *ret; struct dentry *d, *tmp; int buflen = ovl_redirect_max + 1; if (!abs_redirect) { ret = kstrndup(dentry->d_name.name, dentry->d_name.len, GFP_KERNEL); goto out; } buf = ret = kmalloc(buflen, GFP_KERNEL); if (!buf) goto out; buflen--; buf[buflen] = '\0'; for (d = dget(dentry); !IS_ROOT(d);) { const char *name; int thislen; spin_lock(&d->d_lock); name = ovl_dentry_get_redirect(d); if (name) { thislen = strlen(name); } else { name = d->d_name.name; thislen = d->d_name.len; } /* If path is too long, fall back to userspace move */ if (thislen + (name[0] != '/') > buflen) { ret = ERR_PTR(-EXDEV); spin_unlock(&d->d_lock); goto out_put; } buflen -= thislen; memcpy(&buf[buflen], name, thislen); spin_unlock(&d->d_lock); tmp = dget_parent(d); dput(d); d = tmp; /* Absolute redirect: finished */ if (buf[buflen] == '/') break; buflen--; buf[buflen] = '/'; } ret = kstrdup(&buf[buflen], GFP_KERNEL); out_put: dput(d); kfree(buf); out: return ret ? ret : ERR_PTR(-ENOMEM); } static bool ovl_need_absolute_redirect(struct dentry *dentry, bool samedir) { struct dentry *lowerdentry; if (!samedir) return true; if (d_is_dir(dentry)) return false; /* * For non-dir hardlinked files, we need absolute redirects * in general as two upper hardlinks could be in different * dirs. We could put a relative redirect now and convert * it to absolute redirect later. But when nlink > 1 and * indexing is on, that means relative redirect needs to be * converted to absolute during copy up of another lower * hardllink as well. * * So without optimizing too much, just check if lower is * a hard link or not. If lower is hard link, put absolute * redirect. */ lowerdentry = ovl_dentry_lower(dentry); return (d_inode(lowerdentry)->i_nlink > 1); } static int ovl_set_redirect(struct dentry *dentry, bool samedir) { int err; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); const char *redirect = ovl_dentry_get_redirect(dentry); bool absolute_redirect = ovl_need_absolute_redirect(dentry, samedir); if (redirect && (!absolute_redirect || redirect[0] == '/')) return 0; redirect = ovl_get_redirect(dentry, absolute_redirect); if (IS_ERR(redirect)) return PTR_ERR(redirect); err = ovl_check_setxattr(ofs, ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT, redirect, strlen(redirect), -EXDEV); if (!err) { spin_lock(&dentry->d_lock); ovl_dentry_set_redirect(dentry, redirect); spin_unlock(&dentry->d_lock); } else { kfree(redirect); pr_warn_ratelimited("failed to set redirect (%i)\n", err); /* Fall back to userspace copy-up */ err = -EXDEV; } return err; } static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *old, struct inode *newdir, struct dentry *new, unsigned int flags) { int err; struct dentry *old_upperdir; struct dentry *new_upperdir; struct dentry *olddentry; struct dentry *newdentry; struct dentry *trap; bool old_opaque; bool new_opaque; bool cleanup_whiteout = false; bool update_nlink = false; bool overwrite = !(flags & RENAME_EXCHANGE); bool is_dir = d_is_dir(old); bool new_is_dir = d_is_dir(new); bool samedir = olddir == newdir; struct dentry *opaquedir = NULL; const struct cred *old_cred = NULL; struct ovl_fs *ofs = OVL_FS(old->d_sb); LIST_HEAD(list); err = -EINVAL; if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE)) goto out; flags &= ~RENAME_NOREPLACE; /* Don't copy up directory trees */ err = -EXDEV; if (!ovl_can_move(old)) goto out; if (!overwrite && !ovl_can_move(new)) goto out; if (overwrite && new_is_dir && !ovl_pure_upper(new)) { err = ovl_check_empty_dir(new, &list); if (err) goto out; } if (overwrite) { if (ovl_lower_positive(old)) { if (!ovl_dentry_is_whiteout(new)) { /* Whiteout source */ flags |= RENAME_WHITEOUT; } else { /* Switch whiteouts */ flags |= RENAME_EXCHANGE; } } else if (is_dir && ovl_dentry_is_whiteout(new)) { flags |= RENAME_EXCHANGE; cleanup_whiteout = true; } } err = ovl_copy_up(old); if (err) goto out; err = ovl_copy_up(new->d_parent); if (err) goto out; if (!overwrite) { err = ovl_copy_up(new); if (err) goto out; } else if (d_inode(new)) { err = ovl_nlink_start(new); if (err) goto out; update_nlink = true; } if (!update_nlink) { /* ovl_nlink_start() took ovl_want_write() */ err = ovl_want_write(old); if (err) goto out; } old_cred = ovl_override_creds(old->d_sb); if (!list_empty(&list)) { opaquedir = ovl_clear_empty(new, &list); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { opaquedir = NULL; goto out_revert_creds; } } old_upperdir = ovl_dentry_upper(old->d_parent); new_upperdir = ovl_dentry_upper(new->d_parent); if (!samedir) { /* * When moving a merge dir or non-dir with copy up origin into * a new parent, we are marking the new parent dir "impure". * When ovl_iterate() iterates an "impure" upper dir, it will * lookup the origin inodes of the entries to fill d_ino. */ if (ovl_type_origin(old)) { err = ovl_set_impure(new->d_parent, new_upperdir); if (err) goto out_revert_creds; } if (!overwrite && ovl_type_origin(new)) { err = ovl_set_impure(old->d_parent, old_upperdir); if (err) goto out_revert_creds; } } trap = lock_rename(new_upperdir, old_upperdir); if (IS_ERR(trap)) { err = PTR_ERR(trap); goto out_revert_creds; } olddentry = ovl_lookup_upper(ofs, old->d_name.name, old_upperdir, old->d_name.len); err = PTR_ERR(olddentry); if (IS_ERR(olddentry)) goto out_unlock; err = -ESTALE; if (!ovl_matches_upper(old, olddentry)) goto out_dput_old; newdentry = ovl_lookup_upper(ofs, new->d_name.name, new_upperdir, new->d_name.len); err = PTR_ERR(newdentry); if (IS_ERR(newdentry)) goto out_dput_old; old_opaque = ovl_dentry_is_opaque(old); new_opaque = ovl_dentry_is_opaque(new); err = -ESTALE; if (d_inode(new) && ovl_dentry_upper(new)) { if (opaquedir) { if (newdentry != opaquedir) goto out_dput; } else { if (!ovl_matches_upper(new, newdentry)) goto out_dput; } } else { if (!d_is_negative(newdentry)) { if (!new_opaque || !ovl_upper_is_whiteout(ofs, newdentry)) goto out_dput; } else { if (flags & RENAME_EXCHANGE) goto out_dput; } } if (olddentry == trap) goto out_dput; if (newdentry == trap) goto out_dput; if (olddentry->d_inode == newdentry->d_inode) goto out_dput; err = 0; if (ovl_type_merge_or_lower(old)) err = ovl_set_redirect(old, samedir); else if (is_dir && !old_opaque && ovl_type_merge(new->d_parent)) err = ovl_set_opaque_xerr(old, olddentry, -EXDEV); if (err) goto out_dput; if (!overwrite && ovl_type_merge_or_lower(new)) err = ovl_set_redirect(new, samedir); else if (!overwrite && new_is_dir && !new_opaque && ovl_type_merge(old->d_parent)) err = ovl_set_opaque_xerr(new, newdentry, -EXDEV); if (err) goto out_dput; err = ovl_do_rename(ofs, old_upperdir->d_inode, olddentry, new_upperdir->d_inode, newdentry, flags); if (err) goto out_dput; if (cleanup_whiteout) ovl_cleanup(ofs, old_upperdir->d_inode, newdentry); if (overwrite && d_inode(new)) { if (new_is_dir) clear_nlink(d_inode(new)); else ovl_drop_nlink(new); } ovl_dir_modified(old->d_parent, ovl_type_origin(old) || (!overwrite && ovl_type_origin(new))); ovl_dir_modified(new->d_parent, ovl_type_origin(old) || (d_inode(new) && ovl_type_origin(new))); /* copy ctime: */ ovl_copyattr(d_inode(old)); if (d_inode(new) && ovl_dentry_upper(new)) ovl_copyattr(d_inode(new)); out_dput: dput(newdentry); out_dput_old: dput(olddentry); out_unlock: unlock_rename(new_upperdir, old_upperdir); out_revert_creds: ovl_revert_creds(old_cred); if (update_nlink) ovl_nlink_end(new); else ovl_drop_write(old); out: dput(opaquedir); ovl_cache_free(&list); return err; } static int ovl_create_tmpfile(struct file *file, struct dentry *dentry, struct inode *inode, umode_t mode) { const struct cred *old_cred, *new_cred = NULL; struct path realparentpath; struct file *realfile; struct ovl_file *of; struct dentry *newdentry; /* It's okay to set O_NOATIME, since the owner will be current fsuid */ int flags = file->f_flags | OVL_OPEN_FLAGS; int err; old_cred = ovl_override_creds(dentry->d_sb); new_cred = ovl_setup_cred_for_create(dentry, inode, mode, old_cred); err = PTR_ERR(new_cred); if (IS_ERR(new_cred)) { new_cred = NULL; goto out_revert_creds; } ovl_path_upper(dentry->d_parent, &realparentpath); realfile = backing_tmpfile_open(&file->f_path, flags, &realparentpath, mode, current_cred()); err = PTR_ERR_OR_ZERO(realfile); pr_debug("tmpfile/open(%pd2, 0%o) = %i\n", realparentpath.dentry, mode, err); if (err) goto out_revert_creds; of = ovl_file_alloc(realfile); if (!of) { fput(realfile); err = -ENOMEM; goto out_revert_creds; } /* ovl_instantiate() consumes the newdentry reference on success */ newdentry = dget(realfile->f_path.dentry); err = ovl_instantiate(dentry, inode, newdentry, false, file); if (!err) { file->private_data = of; } else { dput(newdentry); ovl_file_free(of); } out_revert_creds: ovl_revert_creds(old_cred); put_cred(new_cred); return err; } static int ovl_dummy_open(struct inode *inode, struct file *file) { return 0; } static int ovl_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { int err; struct dentry *dentry = file->f_path.dentry; struct inode *inode; if (!OVL_FS(dentry->d_sb)->tmpfile) return -EOPNOTSUPP; err = ovl_copy_up(dentry->d_parent); if (err) return err; err = ovl_want_write(dentry); if (err) return err; err = -ENOMEM; inode = ovl_new_inode(dentry->d_sb, mode, 0); if (!inode) goto drop_write; inode_init_owner(&nop_mnt_idmap, inode, dir, mode); err = ovl_create_tmpfile(file, dentry, inode, inode->i_mode); if (err) goto put_inode; /* * Check if the preallocated inode was actually used. Having something * else assigned to the dentry shouldn't happen as that would indicate * that the backing tmpfile "leaked" out of overlayfs. */ err = -EIO; if (WARN_ON(inode != d_inode(dentry))) goto put_realfile; /* inode reference was transferred to dentry */ inode = NULL; err = finish_open(file, dentry, ovl_dummy_open); put_realfile: /* Without FMODE_OPENED ->release() won't be called on @file */ if (!(file->f_mode & FMODE_OPENED)) ovl_file_free(file->private_data); put_inode: iput(inode); drop_write: ovl_drop_write(dentry); return err; } const struct inode_operations ovl_dir_inode_operations = { .lookup = ovl_lookup, .mkdir = ovl_mkdir, .symlink = ovl_symlink, .unlink = ovl_unlink, .rmdir = ovl_rmdir, .rename = ovl_rename, .link = ovl_link, .setattr = ovl_setattr, .create = ovl_create, .mknod = ovl_mknod, .permission = ovl_permission, .getattr = ovl_getattr, .listxattr = ovl_listxattr, .get_inode_acl = ovl_get_inode_acl, .get_acl = ovl_get_acl, .set_acl = ovl_set_acl, .update_time = ovl_update_time, .fileattr_get = ovl_fileattr_get, .fileattr_set = ovl_fileattr_set, .tmpfile = ovl_tmpfile, }; |
14 5 9 3415 16 3403 3214 3219 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 | // SPDX-License-Identifier: GPL-2.0 /* * devtmpfs - kernel-maintained tmpfs-based /dev * * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org> * * During bootup, before any driver core device is registered, * devtmpfs, a tmpfs-based filesystem is created. Every driver-core * device which requests a device node, will add a node in this * filesystem. * By default, all devices are named after the name of the device, * owned by root and have a default mode of 0600. Subsystems can * overwrite the default setting if needed. */ #define pr_fmt(fmt) "devtmpfs: " fmt #include <linux/kernel.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/device.h> #include <linux/blkdev.h> #include <linux/namei.h> #include <linux/fs.h> #include <linux/shmem_fs.h> #include <linux/ramfs.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/init_syscalls.h> #include <uapi/linux/mount.h> #include "base.h" #ifdef CONFIG_DEVTMPFS_SAFE #define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID) #else #define DEVTMPFS_MFLAGS (MS_SILENT) #endif static struct task_struct *thread; static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT); static DEFINE_SPINLOCK(req_lock); static struct req { struct req *next; struct completion done; int err; const char *name; umode_t mode; /* 0 => delete */ kuid_t uid; kgid_t gid; struct device *dev; } *requests; static int __init mount_param(char *str) { mount_dev = simple_strtoul(str, NULL, 0); return 1; } __setup("devtmpfs.mount=", mount_param); static struct vfsmount *mnt; static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct super_block *s = mnt->mnt_sb; int err; atomic_inc(&s->s_active); down_write(&s->s_umount); err = reconfigure_single(s, flags, data); if (err < 0) { deactivate_locked_super(s); return ERR_PTR(err); } return dget(s->s_root); } static struct file_system_type internal_fs_type = { .name = "devtmpfs", #ifdef CONFIG_TMPFS .init_fs_context = shmem_init_fs_context, #else .init_fs_context = ramfs_init_fs_context, #endif .kill_sb = kill_litter_super, }; static struct file_system_type dev_fs_type = { .name = "devtmpfs", .mount = public_dev_mount, }; static int devtmpfs_submit_req(struct req *req, const char *tmp) { init_completion(&req->done); spin_lock(&req_lock); req->next = requests; requests = req; spin_unlock(&req_lock); wake_up_process(thread); wait_for_completion(&req->done); kfree(tmp); return req->err; } int devtmpfs_create_node(struct device *dev) { const char *tmp = NULL; struct req req; if (!thread) return 0; req.mode = 0; req.uid = GLOBAL_ROOT_UID; req.gid = GLOBAL_ROOT_GID; req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp); if (!req.name) return -ENOMEM; if (req.mode == 0) req.mode = 0600; if (is_blockdev(dev)) req.mode |= S_IFBLK; else req.mode |= S_IFCHR; req.dev = dev; return devtmpfs_submit_req(&req, tmp); } int devtmpfs_delete_node(struct device *dev) { const char *tmp = NULL; struct req req; if (!thread) return 0; req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp); if (!req.name) return -ENOMEM; req.mode = 0; req.dev = dev; return devtmpfs_submit_req(&req, tmp); } static int dev_mkdir(const char *name, umode_t mode) { struct dentry *dentry; struct path path; int err; dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); if (IS_ERR(dentry)) return PTR_ERR(dentry); err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode); if (!err) /* mark as kernel-created inode */ d_inode(dentry)->i_private = &thread; done_path_create(&path, dentry); return err; } static int create_path(const char *nodepath) { char *path; char *s; int err = 0; /* parent directories do not exist, create them */ path = kstrdup(nodepath, GFP_KERNEL); if (!path) return -ENOMEM; s = path; for (;;) { s = strchr(s, '/'); if (!s) break; s[0] = '\0'; err = dev_mkdir(path, 0755); if (err && err != -EEXIST) break; s[0] = '/'; s++; } kfree(path); return err; } static int handle_create(const char *nodename, umode_t mode, kuid_t uid, kgid_t gid, struct device *dev) { struct dentry *dentry; struct path path; int err; dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); if (dentry == ERR_PTR(-ENOENT)) { create_path(nodename); dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); } if (IS_ERR(dentry)) return PTR_ERR(dentry); err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, dev->devt); if (!err) { struct iattr newattrs; newattrs.ia_mode = mode; newattrs.ia_uid = uid; newattrs.ia_gid = gid; newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID; inode_lock(d_inode(dentry)); notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL); inode_unlock(d_inode(dentry)); /* mark as kernel-created inode */ d_inode(dentry)->i_private = &thread; } done_path_create(&path, dentry); return err; } static int dev_rmdir(const char *name) { struct path parent; struct dentry *dentry; int err; dentry = kern_path_locked(name, &parent); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (d_really_is_positive(dentry)) { if (d_inode(dentry)->i_private == &thread) err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry), dentry); else err = -EPERM; } else { err = -ENOENT; } dput(dentry); inode_unlock(d_inode(parent.dentry)); path_put(&parent); return err; } static int delete_path(const char *nodepath) { char *path; int err = 0; path = kstrdup(nodepath, GFP_KERNEL); if (!path) return -ENOMEM; for (;;) { char *base; base = strrchr(path, '/'); if (!base) break; base[0] = '\0'; err = dev_rmdir(path); if (err) break; } kfree(path); return err; } static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat) { /* did we create it */ if (inode->i_private != &thread) return 0; /* does the dev_t match */ if (is_blockdev(dev)) { if (!S_ISBLK(stat->mode)) return 0; } else { if (!S_ISCHR(stat->mode)) return 0; } if (stat->rdev != dev->devt) return 0; /* ours */ return 1; } static int handle_remove(const char *nodename, struct device *dev) { struct path parent; struct dentry *dentry; int deleted = 0; int err; dentry = kern_path_locked(nodename, &parent); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (d_really_is_positive(dentry)) { struct kstat stat; struct path p = {.mnt = parent.mnt, .dentry = dentry}; err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, AT_STATX_SYNC_AS_STAT); if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { struct iattr newattrs; /* * before unlinking this node, reset permissions * of possible references like hardlinks */ newattrs.ia_uid = GLOBAL_ROOT_UID; newattrs.ia_gid = GLOBAL_ROOT_GID; newattrs.ia_mode = stat.mode & ~0777; newattrs.ia_valid = ATTR_UID|ATTR_GID|ATTR_MODE; inode_lock(d_inode(dentry)); notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL); inode_unlock(d_inode(dentry)); err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry), dentry, NULL); if (!err || err == -ENOENT) deleted = 1; } } else { err = -ENOENT; } dput(dentry); inode_unlock(d_inode(parent.dentry)); path_put(&parent); if (deleted && strchr(nodename, '/')) delete_path(nodename); return err; } /* * If configured, or requested by the commandline, devtmpfs will be * auto-mounted after the kernel mounted the root filesystem. */ int __init devtmpfs_mount(void) { int err; if (!mount_dev) return 0; if (!thread) return 0; err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL); if (err) pr_info("error mounting %d\n", err); else pr_info("mounted\n"); return err; } static __initdata DECLARE_COMPLETION(setup_done); static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid, struct device *dev) { if (mode) return handle_create(name, mode, uid, gid, dev); else return handle_remove(name, dev); } static void __noreturn devtmpfs_work_loop(void) { while (1) { spin_lock(&req_lock); while (requests) { struct req *req = requests; requests = NULL; spin_unlock(&req_lock); while (req) { struct req *next = req->next; req->err = handle(req->name, req->mode, req->uid, req->gid, req->dev); complete(&req->done); req = next; } spin_lock(&req_lock); } __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&req_lock); schedule(); } } static noinline int __init devtmpfs_setup(void *p) { int err; err = ksys_unshare(CLONE_NEWNS); if (err) goto out; err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL); if (err) goto out; init_chdir("/.."); /* will traverse into overmounted root */ init_chroot("."); out: *(int *)p = err; return err; } /* * The __ref is because devtmpfs_setup needs to be __init for the routines it * calls. That call is done while devtmpfs_init, which is marked __init, * synchronously waits for it to complete. */ static int __ref devtmpfsd(void *p) { int err = devtmpfs_setup(p); complete(&setup_done); if (err) return err; devtmpfs_work_loop(); return 0; } /* * Create devtmpfs instance, driver-core devices will add their device * nodes here. */ int __init devtmpfs_init(void) { char opts[] = "mode=0755"; int err; mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts); if (IS_ERR(mnt)) { pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt)); return PTR_ERR(mnt); } err = register_filesystem(&dev_fs_type); if (err) { pr_err("unable to register devtmpfs type %d\n", err); return err; } thread = kthread_run(devtmpfsd, &err, "kdevtmpfs"); if (!IS_ERR(thread)) { wait_for_completion(&setup_done); } else { err = PTR_ERR(thread); thread = NULL; } if (err) { pr_err("unable to create devtmpfs %d\n", err); unregister_filesystem(&dev_fs_type); thread = NULL; return err; } pr_info("initialized\n"); return 0; } |
3 223 90 133 44 2220 2224 498 497 211 193 80 115 44 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 | #include <linux/init.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <net/net_namespace.h> #include <net/netfilter/nf_tables.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_arp.h> #include <net/netfilter/nf_tables_ipv4.h> #include <net/netfilter/nf_tables_ipv6.h> #ifdef CONFIG_NF_TABLES_IPV4 static unsigned int nft_do_chain_ipv4(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); nft_set_pktinfo_ipv4(&pkt); return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_filter_ipv4 = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_IPV4, .hook_mask = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING), .hooks = { [NF_INET_LOCAL_IN] = nft_do_chain_ipv4, [NF_INET_LOCAL_OUT] = nft_do_chain_ipv4, [NF_INET_FORWARD] = nft_do_chain_ipv4, [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4, [NF_INET_POST_ROUTING] = nft_do_chain_ipv4, }, }; static void nft_chain_filter_ipv4_init(void) { nft_register_chain_type(&nft_chain_filter_ipv4); } static void nft_chain_filter_ipv4_fini(void) { nft_unregister_chain_type(&nft_chain_filter_ipv4); } #else static inline void nft_chain_filter_ipv4_init(void) {} static inline void nft_chain_filter_ipv4_fini(void) {} #endif /* CONFIG_NF_TABLES_IPV4 */ #ifdef CONFIG_NF_TABLES_ARP static unsigned int nft_do_chain_arp(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); nft_set_pktinfo_unspec(&pkt); return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_filter_arp = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_ARP, .owner = THIS_MODULE, .hook_mask = (1 << NF_ARP_IN) | (1 << NF_ARP_OUT), .hooks = { [NF_ARP_IN] = nft_do_chain_arp, [NF_ARP_OUT] = nft_do_chain_arp, }, }; static void nft_chain_filter_arp_init(void) { nft_register_chain_type(&nft_chain_filter_arp); } static void nft_chain_filter_arp_fini(void) { nft_unregister_chain_type(&nft_chain_filter_arp); } #else static inline void nft_chain_filter_arp_init(void) {} static inline void nft_chain_filter_arp_fini(void) {} #endif /* CONFIG_NF_TABLES_ARP */ #ifdef CONFIG_NF_TABLES_IPV6 static unsigned int nft_do_chain_ipv6(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); nft_set_pktinfo_ipv6(&pkt); return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_filter_ipv6 = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_IPV6, .hook_mask = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING), .hooks = { [NF_INET_LOCAL_IN] = nft_do_chain_ipv6, [NF_INET_LOCAL_OUT] = nft_do_chain_ipv6, [NF_INET_FORWARD] = nft_do_chain_ipv6, [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6, [NF_INET_POST_ROUTING] = nft_do_chain_ipv6, }, }; static void nft_chain_filter_ipv6_init(void) { nft_register_chain_type(&nft_chain_filter_ipv6); } static void nft_chain_filter_ipv6_fini(void) { nft_unregister_chain_type(&nft_chain_filter_ipv6); } #else static inline void nft_chain_filter_ipv6_init(void) {} static inline void nft_chain_filter_ipv6_fini(void) {} #endif /* CONFIG_NF_TABLES_IPV6 */ #ifdef CONFIG_NF_TABLES_INET static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); switch (state->pf) { case NFPROTO_IPV4: nft_set_pktinfo_ipv4(&pkt); break; case NFPROTO_IPV6: nft_set_pktinfo_ipv6(&pkt); break; default: break; } return nft_do_chain(&pkt, priv); } static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nf_hook_state ingress_state = *state; struct nft_pktinfo pkt; switch (skb->protocol) { case htons(ETH_P_IP): /* Original hook is NFPROTO_NETDEV and NF_NETDEV_INGRESS. */ ingress_state.pf = NFPROTO_IPV4; ingress_state.hook = NF_INET_INGRESS; nft_set_pktinfo(&pkt, skb, &ingress_state); if (nft_set_pktinfo_ipv4_ingress(&pkt) < 0) return NF_DROP; break; case htons(ETH_P_IPV6): ingress_state.pf = NFPROTO_IPV6; ingress_state.hook = NF_INET_INGRESS; nft_set_pktinfo(&pkt, skb, &ingress_state); if (nft_set_pktinfo_ipv6_ingress(&pkt) < 0) return NF_DROP; break; default: return NF_ACCEPT; } return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_filter_inet = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_INET, .hook_mask = (1 << NF_INET_INGRESS) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING), .hooks = { [NF_INET_INGRESS] = nft_do_chain_inet_ingress, [NF_INET_LOCAL_IN] = nft_do_chain_inet, [NF_INET_LOCAL_OUT] = nft_do_chain_inet, [NF_INET_FORWARD] = nft_do_chain_inet, [NF_INET_PRE_ROUTING] = nft_do_chain_inet, [NF_INET_POST_ROUTING] = nft_do_chain_inet, }, }; static void nft_chain_filter_inet_init(void) { nft_register_chain_type(&nft_chain_filter_inet); } static void nft_chain_filter_inet_fini(void) { nft_unregister_chain_type(&nft_chain_filter_inet); } #else static inline void nft_chain_filter_inet_init(void) {} static inline void nft_chain_filter_inet_fini(void) {} #endif /* CONFIG_NF_TABLES_IPV6 */ #if IS_ENABLED(CONFIG_NF_TABLES_BRIDGE) static unsigned int nft_do_chain_bridge(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); switch (eth_hdr(skb)->h_proto) { case htons(ETH_P_IP): nft_set_pktinfo_ipv4_validate(&pkt); break; case htons(ETH_P_IPV6): nft_set_pktinfo_ipv6_validate(&pkt); break; default: nft_set_pktinfo_unspec(&pkt); break; } return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_filter_bridge = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_BRIDGE, .hook_mask = (1 << NF_BR_PRE_ROUTING) | (1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_POST_ROUTING), .hooks = { [NF_BR_PRE_ROUTING] = nft_do_chain_bridge, [NF_BR_LOCAL_IN] = nft_do_chain_bridge, [NF_BR_FORWARD] = nft_do_chain_bridge, [NF_BR_LOCAL_OUT] = nft_do_chain_bridge, [NF_BR_POST_ROUTING] = nft_do_chain_bridge, }, }; static void nft_chain_filter_bridge_init(void) { nft_register_chain_type(&nft_chain_filter_bridge); } static void nft_chain_filter_bridge_fini(void) { nft_unregister_chain_type(&nft_chain_filter_bridge); } #else static inline void nft_chain_filter_bridge_init(void) {} static inline void nft_chain_filter_bridge_fini(void) {} #endif /* CONFIG_NF_TABLES_BRIDGE */ #ifdef CONFIG_NF_TABLES_NETDEV static unsigned int nft_do_chain_netdev(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nft_pktinfo pkt; nft_set_pktinfo(&pkt, skb, state); switch (skb->protocol) { case htons(ETH_P_IP): nft_set_pktinfo_ipv4_validate(&pkt); break; case htons(ETH_P_IPV6): nft_set_pktinfo_ipv6_validate(&pkt); break; default: nft_set_pktinfo_unspec(&pkt); break; } return nft_do_chain(&pkt, priv); } static const struct nft_chain_type nft_chain_filter_netdev = { .name = "filter", .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_NETDEV, .hook_mask = (1 << NF_NETDEV_INGRESS) | (1 << NF_NETDEV_EGRESS), .hooks = { [NF_NETDEV_INGRESS] = nft_do_chain_netdev, [NF_NETDEV_EGRESS] = nft_do_chain_netdev, }, }; static void nft_netdev_event(unsigned long event, struct net_device *dev, struct nft_ctx *ctx) { struct nft_base_chain *basechain = nft_base_chain(ctx->chain); struct nft_hook *hook, *found = NULL; int n = 0; list_for_each_entry(hook, &basechain->hook_list, list) { if (hook->ops.dev == dev) found = hook; n++; } if (!found) return; if (n > 1) { if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT)) nf_unregister_net_hook(ctx->net, &found->ops); list_del_rcu(&found->list); kfree_rcu(found, rcu); return; } /* UNREGISTER events are also happening on netns exit. * * Although nf_tables core releases all tables/chains, only this event * handler provides guarantee that hook->ops.dev is still accessible, * so we cannot skip exiting net namespaces. */ __nft_release_basechain(ctx); } static int nf_tables_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct nft_base_chain *basechain; struct nftables_pernet *nft_net; struct nft_chain *chain, *nr; struct nft_table *table; struct nft_ctx ctx = { .net = dev_net(dev), }; if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; nft_net = nft_pernet(ctx.net); mutex_lock(&nft_net->commit_mutex); list_for_each_entry(table, &nft_net->tables, list) { if (table->family != NFPROTO_NETDEV && table->family != NFPROTO_INET) continue; ctx.family = table->family; ctx.table = table; list_for_each_entry_safe(chain, nr, &table->chains, list) { if (!nft_is_base_chain(chain)) continue; basechain = nft_base_chain(chain); if (table->family == NFPROTO_INET && basechain->ops.hooknum != NF_INET_INGRESS) continue; ctx.chain = chain; nft_netdev_event(event, dev, &ctx); } } mutex_unlock(&nft_net->commit_mutex); return NOTIFY_DONE; } static struct notifier_block nf_tables_netdev_notifier = { .notifier_call = nf_tables_netdev_event, }; static int nft_chain_filter_netdev_init(void) { int err; nft_register_chain_type(&nft_chain_filter_netdev); err = register_netdevice_notifier(&nf_tables_netdev_notifier); if (err) goto err_register_netdevice_notifier; return 0; err_register_netdevice_notifier: nft_unregister_chain_type(&nft_chain_filter_netdev); return err; } static void nft_chain_filter_netdev_fini(void) { nft_unregister_chain_type(&nft_chain_filter_netdev); unregister_netdevice_notifier(&nf_tables_netdev_notifier); } #else static inline int nft_chain_filter_netdev_init(void) { return 0; } static inline void nft_chain_filter_netdev_fini(void) {} #endif /* CONFIG_NF_TABLES_NETDEV */ int __init nft_chain_filter_init(void) { int err; err = nft_chain_filter_netdev_init(); if (err < 0) return err; nft_chain_filter_ipv4_init(); nft_chain_filter_ipv6_init(); nft_chain_filter_arp_init(); nft_chain_filter_inet_init(); nft_chain_filter_bridge_init(); return 0; } void nft_chain_filter_fini(void) { nft_chain_filter_bridge_fini(); nft_chain_filter_inet_fini(); nft_chain_filter_arp_fini(); nft_chain_filter_ipv6_fini(); nft_chain_filter_ipv4_fini(); nft_chain_filter_netdev_fini(); } |
58 6 53 47 36 36 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * Single-block cipher operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> */ #include <crypto/algapi.h> #include <crypto/internal/cipher.h> #include <linux/kernel.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { struct cipher_alg *cia = crypto_cipher_alg(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); int ret; u8 *buffer, *alignbuffer; unsigned long absize; absize = keylen + alignmask; buffer = kmalloc(absize, GFP_ATOMIC); if (!buffer) return -ENOMEM; alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(alignbuffer, key, keylen); ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen); kfree_sensitive(buffer); return ret; } int crypto_cipher_setkey(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { struct cipher_alg *cia = crypto_cipher_alg(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) return -EINVAL; if ((unsigned long)key & alignmask) return setkey_unaligned(tfm, key, keylen); return cia->cia_setkey(crypto_cipher_tfm(tfm), key, keylen); } EXPORT_SYMBOL_NS_GPL(crypto_cipher_setkey, "CRYPTO_INTERNAL"); static inline void cipher_crypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src, bool enc) { unsigned long alignmask = crypto_cipher_alignmask(tfm); struct cipher_alg *cia = crypto_cipher_alg(tfm); void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = enc ? cia->cia_encrypt : cia->cia_decrypt; if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { unsigned int bs = crypto_cipher_blocksize(tfm); u8 buffer[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); memcpy(tmp, src, bs); fn(crypto_cipher_tfm(tfm), tmp, tmp); memcpy(dst, tmp, bs); } else { fn(crypto_cipher_tfm(tfm), dst, src); } } void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { cipher_crypt_one(tfm, dst, src, true); } EXPORT_SYMBOL_NS_GPL(crypto_cipher_encrypt_one, "CRYPTO_INTERNAL"); void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, u8 *dst, const u8 *src) { cipher_crypt_one(tfm, dst, src, false); } EXPORT_SYMBOL_NS_GPL(crypto_cipher_decrypt_one, "CRYPTO_INTERNAL"); struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher) { struct crypto_tfm *tfm = crypto_cipher_tfm(cipher); struct crypto_alg *alg = tfm->__crt_alg; struct crypto_cipher *ncipher; struct crypto_tfm *ntfm; if (alg->cra_init) return ERR_PTR(-ENOSYS); if (unlikely(!crypto_mod_get(alg))) return ERR_PTR(-ESTALE); ntfm = __crypto_alloc_tfmgfp(alg, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK, GFP_ATOMIC); if (IS_ERR(ntfm)) { crypto_mod_put(alg); return ERR_CAST(ntfm); } ntfm->crt_flags = tfm->crt_flags; ncipher = __crypto_cipher_cast(ntfm); return ncipher; } EXPORT_SYMBOL_GPL(crypto_clone_cipher); |
5 1 1 3 3 61 63 62 63 63 48 63 44 2 4 1 51 18 7 6 7 3 2 3 1 3 1 1 3 3 1 1 1 15 2 1 3 1 2 4 1 2 10 2 3 3 2 1 1 4 4 3 14 1 2 1 1 1 1 1 3 6 4 28 33 63 63 63 64 63 63 63 63 62 63 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2024 Intel Corporation * * element parsing for mac80211 */ #include <net/mac80211.h> #include <linux/netdevice.h> #include <linux/export.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/bitmap.h> #include <linux/crc32.h> #include <net/net_namespace.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <kunit/visibility.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" #include "led.h" #include "wep.h" struct ieee80211_elems_parse { /* must be first for kfree to work */ struct ieee802_11_elems elems; /* The basic Multi-Link element in the original elements */ const struct element *ml_basic_elem; /* The reconfiguration Multi-Link element in the original elements */ const struct element *ml_reconf_elem; /* * scratch buffer that can be used for various element parsing related * tasks, e.g., element de-fragmentation etc. */ size_t scratch_len; u8 *scratch_pos; u8 scratch[] __counted_by(scratch_len); }; static void ieee80211_parse_extension_element(u32 *crc, const struct element *elem, struct ieee80211_elems_parse *elems_parse, struct ieee80211_elems_parse_params *params) { struct ieee802_11_elems *elems = &elems_parse->elems; const void *data = elem->data + 1; bool calc_crc = false; u8 len; if (!elem->datalen) return; len = elem->datalen - 1; switch (elem->data[0]) { case WLAN_EID_EXT_HE_MU_EDCA: if (params->mode < IEEE80211_CONN_MODE_HE) break; calc_crc = true; if (len >= sizeof(*elems->mu_edca_param_set)) elems->mu_edca_param_set = data; break; case WLAN_EID_EXT_HE_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (ieee80211_he_capa_size_ok(data, len)) { elems->he_cap = data; elems->he_cap_len = len; } break; case WLAN_EID_EXT_HE_OPERATION: if (params->mode < IEEE80211_CONN_MODE_HE) break; calc_crc = true; if (len >= sizeof(*elems->he_operation) && len >= ieee80211_he_oper_size(data) - 1) elems->he_operation = data; break; case WLAN_EID_EXT_UORA: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= 1) elems->uora_element = data; break; case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME: if (len == 3) elems->max_channel_switch_time = data; break; case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION: if (len >= sizeof(*elems->mbssid_config_ie)) elems->mbssid_config_ie = data; break; case WLAN_EID_EXT_HE_SPR: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= sizeof(*elems->he_spr) && len >= ieee80211_he_spr_size(data) - 1) elems->he_spr = data; break; case WLAN_EID_EXT_HE_6GHZ_CAPA: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= sizeof(*elems->he_6ghz_capa)) elems->he_6ghz_capa = data; break; case WLAN_EID_EXT_EHT_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_EHT) break; if (ieee80211_eht_capa_size_ok(elems->he_cap, data, len, params->from_ap)) { elems->eht_cap = data; elems->eht_cap_len = len; } break; case WLAN_EID_EXT_EHT_OPERATION: if (params->mode < IEEE80211_CONN_MODE_EHT) break; if (ieee80211_eht_oper_size_ok(data, len)) elems->eht_operation = data; calc_crc = true; break; case WLAN_EID_EXT_EHT_MULTI_LINK: if (params->mode < IEEE80211_CONN_MODE_EHT) break; calc_crc = true; if (ieee80211_mle_size_ok(data, len)) { const struct ieee80211_multi_link_elem *mle = (void *)data; switch (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: if (elems_parse->ml_basic_elem) { elems->parse_error |= IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC; break; } elems_parse->ml_basic_elem = elem; break; case IEEE80211_ML_CONTROL_TYPE_RECONF: elems_parse->ml_reconf_elem = elem; break; default: break; } } break; case WLAN_EID_EXT_BANDWIDTH_INDICATION: if (params->mode < IEEE80211_CONN_MODE_EHT) break; if (ieee80211_bandwidth_indication_size_ok(data, len)) elems->bandwidth_indication = data; calc_crc = true; break; case WLAN_EID_EXT_TID_TO_LINK_MAPPING: if (params->mode < IEEE80211_CONN_MODE_EHT) break; calc_crc = true; if (ieee80211_tid_to_link_map_size_ok(data, len) && elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) { elems->ttlm[elems->ttlm_num] = (void *)data; elems->ttlm_num++; } break; } if (crc && calc_crc) *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2); } static void ieee80211_parse_tpe(struct ieee80211_parsed_tpe *tpe, const u8 *data, u8 len) { const struct ieee80211_tx_pwr_env *env = (const void *)data; u8 count, interpret, category; u8 *out, N, *cnt_out = NULL, *N_out = NULL; if (!ieee80211_valid_tpe_element(data, len)) return; count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT); interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET); category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY); switch (interpret) { case IEEE80211_TPE_LOCAL_EIRP: out = tpe->max_local[category].power; cnt_out = &tpe->max_local[category].count; tpe->max_local[category].valid = true; break; case IEEE80211_TPE_REG_CLIENT_EIRP: out = tpe->max_reg_client[category].power; cnt_out = &tpe->max_reg_client[category].count; tpe->max_reg_client[category].valid = true; break; case IEEE80211_TPE_LOCAL_EIRP_PSD: out = tpe->psd_local[category].power; cnt_out = &tpe->psd_local[category].count; N_out = &tpe->psd_local[category].n; tpe->psd_local[category].valid = true; break; case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: out = tpe->psd_reg_client[category].power; cnt_out = &tpe->psd_reg_client[category].count; N_out = &tpe->psd_reg_client[category].n; tpe->psd_reg_client[category].valid = true; break; } switch (interpret) { case IEEE80211_TPE_LOCAL_EIRP: case IEEE80211_TPE_REG_CLIENT_EIRP: /* count was validated <= 3, plus 320 MHz */ BUILD_BUG_ON(IEEE80211_TPE_EIRP_ENTRIES_320MHZ < 5); memcpy(out, env->variable, count + 1); *cnt_out = count + 1; /* separately take 320 MHz if present */ if (count == 3 && len > sizeof(*env) + count + 1) { out[4] = env->variable[4]; *cnt_out = 5; } break; case IEEE80211_TPE_LOCAL_EIRP_PSD: case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: if (!count) { memset(out, env->variable[0], IEEE80211_TPE_PSD_ENTRIES_320MHZ); *cnt_out = IEEE80211_TPE_PSD_ENTRIES_320MHZ; break; } N = 1 << (count - 1); memcpy(out, env->variable, N); *cnt_out = N; *N_out = N; if (len > sizeof(*env) + N) { int K = u8_get_bits(env->variable[N], IEEE80211_TX_PWR_ENV_EXT_COUNT); K = min(K, IEEE80211_TPE_PSD_ENTRIES_320MHZ - N); memcpy(out + N, env->variable + N + 1, K); (*cnt_out) += K; } break; } } static u32 _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, struct ieee80211_elems_parse *elems_parse, const struct element *check_inherit) { struct ieee802_11_elems *elems = &elems_parse->elems; const struct element *elem; bool calc_crc = params->filter != 0; DECLARE_BITMAP(seen_elems, 256); u32 crc = params->crc; bitmap_zero(seen_elems, 256); for_each_element(elem, params->start, params->len) { const struct element *subelem; u8 elem_parse_failed; u8 id = elem->id; u8 elen = elem->datalen; const u8 *pos = elem->data; if (check_inherit && !cfg80211_is_element_inherited(elem, check_inherit)) continue; switch (id) { case WLAN_EID_SSID: case WLAN_EID_SUPP_RATES: case WLAN_EID_FH_PARAMS: case WLAN_EID_DS_PARAMS: case WLAN_EID_CF_PARAMS: case WLAN_EID_TIM: case WLAN_EID_IBSS_PARAMS: case WLAN_EID_CHALLENGE: case WLAN_EID_RSN: case WLAN_EID_ERP_INFO: case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_OPERATION: case WLAN_EID_VHT_CAPABILITY: case WLAN_EID_VHT_OPERATION: case WLAN_EID_MESH_ID: case WLAN_EID_MESH_CONFIG: case WLAN_EID_PEER_MGMT: case WLAN_EID_PREQ: case WLAN_EID_PREP: case WLAN_EID_PERR: case WLAN_EID_RANN: case WLAN_EID_CHANNEL_SWITCH: case WLAN_EID_EXT_CHANSWITCH_ANN: case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: case WLAN_EID_TIMEOUT_INTERVAL: case WLAN_EID_SECONDARY_CHANNEL_OFFSET: case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: case WLAN_EID_CHAN_SWITCH_PARAM: case WLAN_EID_EXT_CAPABILITY: case WLAN_EID_CHAN_SWITCH_TIMING: case WLAN_EID_LINK_ID: case WLAN_EID_BSS_MAX_IDLE_PERIOD: case WLAN_EID_RSNX: case WLAN_EID_S1G_BCN_COMPAT: case WLAN_EID_S1G_CAPABILITIES: case WLAN_EID_S1G_OPERATION: case WLAN_EID_AID_RESPONSE: case WLAN_EID_S1G_SHORT_BCN_INTERVAL: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once */ if (test_bit(id, seen_elems)) { elems->parse_error |= IEEE80211_PARSE_ERR_DUP_ELEM; continue; } break; } if (calc_crc && id < 64 && (params->filter & (1ULL << id))) crc = crc32_be(crc, pos - 2, elen + 2); elem_parse_failed = 0; switch (id) { case WLAN_EID_LINK_ID: if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->lnk_id = (void *)(pos - 2); break; case WLAN_EID_CHAN_SWITCH_TIMING: if (elen < sizeof(struct ieee80211_ch_switch_timing)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->ch_sw_timing = (void *)pos; break; case WLAN_EID_EXT_CAPABILITY: elems->ext_capab = pos; elems->ext_capab_len = elen; break; case WLAN_EID_SSID: elems->ssid = pos; elems->ssid_len = elen; break; case WLAN_EID_SUPP_RATES: elems->supp_rates = pos; elems->supp_rates_len = elen; break; case WLAN_EID_DS_PARAMS: if (elen >= 1) elems->ds_params = pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_TIM: if (elen >= sizeof(struct ieee80211_tim_ie)) { elems->tim = (void *)pos; elems->tim_len = elen; } else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { /* Microsoft OUI (00:50:F2) */ if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); if (elen >= 5 && pos[3] == 2) { /* OUI Type 2 - WMM IE */ if (pos[4] == 0) { elems->wmm_info = pos; elems->wmm_info_len = elen; } else if (pos[4] == 1) { elems->wmm_param = pos; elems->wmm_param_len = elen; } } } break; case WLAN_EID_RSN: elems->rsn = pos; elems->rsn_len = elen; break; case WLAN_EID_ERP_INFO: if (elen >= 1) elems->erp_info = pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_EXT_SUPP_RATES: elems->ext_supp_rates = pos; elems->ext_supp_rates_len = elen; break; case WLAN_EID_HT_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_HT) break; if (elen >= sizeof(struct ieee80211_ht_cap)) elems->ht_cap_elem = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_HT_OPERATION: if (params->mode < IEEE80211_CONN_MODE_HT) break; if (elen >= sizeof(struct ieee80211_ht_operation)) elems->ht_operation = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VHT_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (elen >= sizeof(struct ieee80211_vht_cap)) elems->vht_cap_elem = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VHT_OPERATION: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (elen >= sizeof(struct ieee80211_vht_operation)) { elems->vht_operation = (void *)pos; if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); break; } elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_OPMODE_NOTIF: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (elen > 0) { elems->opmode_notif = pos; if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); break; } elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_MESH_ID: elems->mesh_id = pos; elems->mesh_id_len = elen; break; case WLAN_EID_MESH_CONFIG: if (elen >= sizeof(struct ieee80211_meshconf_ie)) elems->mesh_config = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_PEER_MGMT: elems->peering = pos; elems->peering_len = elen; break; case WLAN_EID_MESH_AWAKE_WINDOW: if (elen >= 2) elems->awake_window = (void *)pos; break; case WLAN_EID_PREQ: elems->preq = pos; elems->preq_len = elen; break; case WLAN_EID_PREP: elems->prep = pos; elems->prep_len = elen; break; case WLAN_EID_PERR: elems->perr = pos; elems->perr_len = elen; break; case WLAN_EID_RANN: if (elen >= sizeof(struct ieee80211_rann_ie)) elems->rann = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_CHANNEL_SWITCH: if (elen != sizeof(struct ieee80211_channel_sw_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->ch_switch_ie = (void *)pos; break; case WLAN_EID_EXT_CHANSWITCH_ANN: if (elen != sizeof(struct ieee80211_ext_chansw_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->ext_chansw_ie = (void *)pos; break; case WLAN_EID_SECONDARY_CHANNEL_OFFSET: if (params->mode < IEEE80211_CONN_MODE_HT) break; if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->sec_chan_offs = (void *)pos; break; case WLAN_EID_CHAN_SWITCH_PARAM: if (elen < sizeof(*elems->mesh_chansw_params_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->mesh_chansw_params_ie = (void *)pos; break; case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (!params->action) { elem_parse_failed = IEEE80211_PARSE_ERR_UNEXPECTED_ELEM; break; } if (elen < sizeof(*elems->wide_bw_chansw_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->wide_bw_chansw_ie = (void *)pos; break; case WLAN_EID_CHANNEL_SWITCH_WRAPPER: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (params->action) { elem_parse_failed = IEEE80211_PARSE_ERR_UNEXPECTED_ELEM; break; } /* * This is a bit tricky, but as we only care about * a few elements, parse them out manually. */ subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH, pos, elen); if (subelem) { if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie)) elems->wide_bw_chansw_ie = (void *)subelem->data; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; } if (params->mode < IEEE80211_CONN_MODE_EHT) break; subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION, pos, elen); if (subelem) { const void *edata = subelem->data + 1; u8 edatalen = subelem->datalen - 1; if (ieee80211_bandwidth_indication_size_ok(edata, edatalen)) elems->bandwidth_indication = edata; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; } subelem = cfg80211_find_ext_elem(WLAN_EID_TX_POWER_ENVELOPE, pos, elen); if (subelem) ieee80211_parse_tpe(&elems->csa_tpe, subelem->data + 1, subelem->datalen - 1); break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; break; case WLAN_EID_PWR_CONSTRAINT: if (elen != 1) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->pwr_constr_elem = pos; break; case WLAN_EID_CISCO_VENDOR_SPECIFIC: /* Lots of different options exist, but we only care * about the Dynamic Transmit Power Control element. * First check for the Cisco OUI, then for the DTPC * tag (0x00). */ if (elen < 4) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } if (pos[0] != 0x00 || pos[1] != 0x40 || pos[2] != 0x96 || pos[3] != 0x00) break; if (elen != 6) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); elems->cisco_dtpc_elem = pos; break; case WLAN_EID_ADDBA_EXT: if (elen < sizeof(struct ieee80211_addba_ext_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->addba_ext_ie = (void *)pos; break; case WLAN_EID_TIMEOUT_INTERVAL: if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) elems->timeout_int = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_BSS_MAX_IDLE_PERIOD: if (elen >= sizeof(*elems->max_idle_period_ie)) elems->max_idle_period_ie = (void *)pos; break; case WLAN_EID_RSNX: elems->rsnx = pos; elems->rsnx_len = elen; break; case WLAN_EID_TX_POWER_ENVELOPE: if (params->mode < IEEE80211_CONN_MODE_HE) break; ieee80211_parse_tpe(&elems->tpe, pos, elen); break; case WLAN_EID_EXTENSION: ieee80211_parse_extension_element(calc_crc ? &crc : NULL, elem, elems_parse, params); break; case WLAN_EID_S1G_CAPABILITIES: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen >= sizeof(*elems->s1g_capab)) elems->s1g_capab = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_S1G_OPERATION: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen == sizeof(*elems->s1g_oper)) elems->s1g_oper = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_S1G_BCN_COMPAT: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen == sizeof(*elems->s1g_bcn_compat)) elems->s1g_bcn_compat = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_AID_RESPONSE: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen == sizeof(struct ieee80211_aid_response_ie)) elems->aid_resp = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; default: break; } if (elem_parse_failed) elems->parse_error |= elem_parse_failed; else __set_bit(id, seen_elems); } if (!for_each_element_completed(elem, params->start, params->len)) elems->parse_error |= IEEE80211_PARSE_ERR_INVALID_END; return crc; } static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, struct ieee802_11_elems *elems, struct cfg80211_bss *bss, u8 *nontransmitted_profile) { const struct element *elem, *sub; size_t profile_len = 0; bool found = false; if (!bss || !bss->transmitted_bss) return profile_len; for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { if (elem->datalen < 2) continue; if (elem->data[0] < 1 || elem->data[0] > 8) continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 new_bssid[ETH_ALEN]; const u8 *index; if (sub->id != 0 || sub->datalen < 4) { /* not a valid BSS profile */ continue; } if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP || sub->data[1] != 2) { /* The first element of the * Nontransmitted BSSID Profile is not * the Nontransmitted BSSID Capability * element. */ continue; } memset(nontransmitted_profile, 0, len); profile_len = cfg80211_merge_profile(start, len, elem, sub, nontransmitted_profile, len); /* found a Nontransmitted BSSID Profile */ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, nontransmitted_profile, profile_len); if (!index || index[1] < 1 || index[2] == 0) { /* Invalid MBSSID Index element */ continue; } cfg80211_gen_new_bssid(bss->transmitted_bss->bssid, elem->data[0], index[2], new_bssid); if (ether_addr_equal(new_bssid, bss->bssid)) { found = true; elems->bssid_index_len = index[1]; elems->bssid_index = (void *)&index[2]; break; } } } return found ? profile_len : 0; } static void ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse, u8 link_id) { struct ieee802_11_elems *elems = &elems_parse->elems; const struct ieee80211_multi_link_elem *ml = elems->ml_basic; ssize_t ml_len = elems->ml_basic_len; const struct element *sub; for_each_mle_subelement(sub, (u8 *)ml, ml_len) { struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; ssize_t sta_prof_len; u16 control; if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) continue; if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data, sub->datalen)) return; control = le16_to_cpu(prof->control); if (link_id != u16_get_bits(control, IEEE80211_MLE_STA_CONTROL_LINK_ID)) continue; if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE)) return; /* the sub element can be fragmented */ sta_prof_len = cfg80211_defragment_element(sub, (u8 *)ml, ml_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, IEEE80211_MLE_SUBELEM_FRAGMENT); if (sta_prof_len < 0) return; elems->prof = (void *)elems_parse->scratch_pos; elems->sta_prof_len = sta_prof_len; elems_parse->scratch_pos += sta_prof_len; return; } } static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, struct ieee80211_elems_parse_params *params) { struct ieee802_11_elems *elems = &elems_parse->elems; struct ieee80211_mle_per_sta_profile *prof; struct ieee80211_elems_parse_params sub = { .mode = params->mode, .action = params->action, .from_ap = params->from_ap, .link_id = -1, }; ssize_t ml_len = elems->ml_basic_len; const struct element *non_inherit = NULL; const u8 *end; ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem, elems->ie_start, elems->total_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, WLAN_EID_FRAGMENT); if (ml_len < 0) return; elems->ml_basic = (const void *)elems_parse->scratch_pos; elems->ml_basic_len = ml_len; elems_parse->scratch_pos += ml_len; if (params->link_id == -1) return; ieee80211_mle_get_sta_prof(elems_parse, params->link_id); prof = elems->prof; if (!prof) return; /* check if we have the 4 bytes for the fixed part in assoc response */ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) { elems->prof = NULL; elems->sta_prof_len = 0; return; } /* * Skip the capability information and the status code that are expected * as part of the station profile in association response frames. Note * the -1 is because the 'sta_info_len' is accounted to as part of the * per-STA profile, but not part of the 'u8 variable[]' portion. */ sub.start = prof->variable + prof->sta_info_len - 1 + 4; end = (const u8 *)prof + elems->sta_prof_len; sub.len = end - sub.start; non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, sub.start, sub.len); _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit); } static void ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse) { struct ieee802_11_elems *elems = &elems_parse->elems; ssize_t ml_len; ml_len = cfg80211_defragment_element(elems_parse->ml_reconf_elem, elems->ie_start, elems->total_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, WLAN_EID_FRAGMENT); if (ml_len < 0) return; elems->ml_reconf = (void *)elems_parse->scratch_pos; elems->ml_reconf_len = ml_len; elems_parse->scratch_pos += ml_len; } struct ieee802_11_elems * ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) { struct ieee80211_elems_parse *elems_parse; struct ieee802_11_elems *elems; const struct element *non_inherit = NULL; u8 *nontransmitted_profile; int nontransmitted_profile_len = 0; size_t scratch_len = 3 * params->len; BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0); elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len), GFP_ATOMIC); if (!elems_parse) return NULL; elems_parse->scratch_len = scratch_len; elems_parse->scratch_pos = elems_parse->scratch; elems = &elems_parse->elems; elems->ie_start = params->start; elems->total_len = params->len; /* set all TPE entries to unlimited (but invalid) */ ieee80211_clear_tpe(&elems->tpe); ieee80211_clear_tpe(&elems->csa_tpe); nontransmitted_profile = elems_parse->scratch_pos; nontransmitted_profile_len = ieee802_11_find_bssid_profile(params->start, params->len, elems, params->bss, nontransmitted_profile); elems_parse->scratch_pos += nontransmitted_profile_len; non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, nontransmitted_profile, nontransmitted_profile_len); elems->crc = _ieee802_11_parse_elems_full(params, elems_parse, non_inherit); /* Override with nontransmitted profile, if found */ if (nontransmitted_profile_len) { struct ieee80211_elems_parse_params sub = { .mode = params->mode, .start = nontransmitted_profile, .len = nontransmitted_profile_len, .action = params->action, .link_id = params->link_id, }; _ieee802_11_parse_elems_full(&sub, elems_parse, NULL); } ieee80211_mle_parse_link(elems_parse, params); ieee80211_mle_defrag_reconf(elems_parse); if (elems->tim && !elems->parse_error) { const struct ieee80211_tim_ie *tim_ie = elems->tim; elems->dtim_period = tim_ie->dtim_period; elems->dtim_count = tim_ie->dtim_count; } /* Override DTIM period and count if needed */ if (elems->bssid_index && elems->bssid_index_len >= offsetofend(struct ieee80211_bssid_index, dtim_period)) elems->dtim_period = elems->bssid_index->dtim_period; if (elems->bssid_index && elems->bssid_index_len >= offsetofend(struct ieee80211_bssid_index, dtim_count)) elems->dtim_count = elems->bssid_index->dtim_count; return elems; } EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full); int ieee80211_parse_bitrates(enum nl80211_chan_width width, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates) { u32 rate_flags = ieee80211_chanwidth_rate_flags(width); struct ieee80211_rate *br; int brate, rate, i, j, count = 0; *rates = 0; for (i = 0; i < srates_len; i++) { rate = srates[i] & 0x7f; for (j = 0; j < sband->n_bitrates; j++) { br = &sband->bitrates[j]; if ((rate_flags & br->flags) != rate_flags) continue; brate = DIV_ROUND_UP(br->bitrate, 5); if (brate == rate) { *rates |= BIT(j); count++; break; } } } return count; } |
1377 500 2 7 19 2 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | #ifndef INTERNAL_IO_SLIST_H #define INTERNAL_IO_SLIST_H #include <linux/io_uring_types.h> #define __wq_list_for_each(pos, head) \ for (pos = (head)->first; pos; pos = (pos)->next) #define wq_list_for_each(pos, prv, head) \ for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) #define wq_list_for_each_resume(pos, prv) \ for (; pos; prv = pos, pos = (pos)->next) #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) #define INIT_WQ_LIST(list) do { \ (list)->first = NULL; \ } while (0) static inline void wq_list_add_after(struct io_wq_work_node *node, struct io_wq_work_node *pos, struct io_wq_work_list *list) { struct io_wq_work_node *next = pos->next; pos->next = node; node->next = next; if (!next) list->last = node; } static inline void wq_list_add_tail(struct io_wq_work_node *node, struct io_wq_work_list *list) { node->next = NULL; if (!list->first) { list->last = node; WRITE_ONCE(list->first, node); } else { list->last->next = node; list->last = node; } } static inline void wq_list_add_head(struct io_wq_work_node *node, struct io_wq_work_list *list) { node->next = list->first; if (!node->next) list->last = node; WRITE_ONCE(list->first, node); } static inline void wq_list_cut(struct io_wq_work_list *list, struct io_wq_work_node *last, struct io_wq_work_node *prev) { /* first in the list, if prev==NULL */ if (!prev) WRITE_ONCE(list->first, last->next); else prev->next = last->next; if (last == list->last) list->last = prev; last->next = NULL; } static inline void __wq_list_splice(struct io_wq_work_list *list, struct io_wq_work_node *to) { list->last->next = to->next; to->next = list->first; INIT_WQ_LIST(list); } static inline bool wq_list_splice(struct io_wq_work_list *list, struct io_wq_work_node *to) { if (!wq_list_empty(list)) { __wq_list_splice(list, to); return true; } return false; } static inline void wq_stack_add_head(struct io_wq_work_node *node, struct io_wq_work_node *stack) { node->next = stack->next; stack->next = node; } static inline void wq_list_del(struct io_wq_work_list *list, struct io_wq_work_node *node, struct io_wq_work_node *prev) { wq_list_cut(list, node, prev); } static inline struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack) { struct io_wq_work_node *node = stack->next; stack->next = node->next; return node; } static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) { if (!work->list.next) return NULL; return container_of(work->list.next, struct io_wq_work, list); } #endif // INTERNAL_IO_SLIST_H |
5 2 4 3 3 6 1 5 1 4 1 4 3 3 1 2 2 1 1 7 7 1 2 1 1 9 10 4 4 16 2 14 1 13 14 1 13 13 12 13 2 11 11 10 10 11 11 10 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 | // SPDX-License-Identifier: GPL-2.0-or-later /* net/sched/sch_ingress.c - Ingress and clsact qdisc * * Authors: Jamal Hadi Salim 1999 */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tcx.h> struct ingress_sched_data { struct tcf_block *block; struct tcf_block_ext_info block_info; struct mini_Qdisc_pair miniqp; }; static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long ingress_find(struct Qdisc *sch, u32 classid) { return TC_H_MIN(classid) + 1; } static unsigned long ingress_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return ingress_find(sch, classid); } static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl) { } static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct ingress_sched_data *q = qdisc_priv(sch); return q->block; } static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv) { struct mini_Qdisc_pair *miniqp = priv; mini_qdisc_pair_swap(miniqp, tp_head); }; static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index) { struct ingress_sched_data *q = qdisc_priv(sch); q->block_info.block_index = block_index; } static u32 ingress_ingress_block_get(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); return q->block_info.block_index; } static int ingress_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; bool created; int err; if (sch->parent != TC_H_INGRESS) return -EOPNOTSUPP; net_inc_ingress_queue(); entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->block_info.chain_head_change = clsact_chain_head_change; q->block_info.chain_head_change_priv = &q->miniqp; err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); if (err) return err; mini_qdisc_pair_block_init(&q->miniqp, q->block); return 0; } static void ingress_destroy(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); if (sch->parent != TC_H_INGRESS) return; tcf_block_put_ext(q->block, sch, &q->block_info); if (entry) { tcx_miniq_dec(entry); if (!tcx_entry_is_active(entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(entry); } } net_dec_ingress_queue(); } static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static const struct Qdisc_class_ops ingress_class_ops = { .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, .leaf = ingress_leaf, .find = ingress_find, .walk = ingress_walk, .tcf_block = ingress_tcf_block, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_unbind_filter, }; static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { .cl_ops = &ingress_class_ops, .id = "ingress", .priv_size = sizeof(struct ingress_sched_data), .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = ingress_init, .destroy = ingress_destroy, .dump = ingress_dump, .ingress_block_set = ingress_ingress_block_set, .ingress_block_get = ingress_ingress_block_get, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("ingress"); struct clsact_sched_data { struct tcf_block *ingress_block; struct tcf_block *egress_block; struct tcf_block_ext_info ingress_block_info; struct tcf_block_ext_info egress_block_info; struct mini_Qdisc_pair miniqp_ingress; struct mini_Qdisc_pair miniqp_egress; }; static unsigned long clsact_find(struct Qdisc *sch, u32 classid) { switch (TC_H_MIN(classid)) { case TC_H_MIN(TC_H_MIN_INGRESS): case TC_H_MIN(TC_H_MIN_EGRESS): return TC_H_MIN(classid); default: return 0; } } static unsigned long clsact_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return clsact_find(sch, classid); } static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct clsact_sched_data *q = qdisc_priv(sch); switch (cl) { case TC_H_MIN(TC_H_MIN_INGRESS): return q->ingress_block; case TC_H_MIN(TC_H_MIN_EGRESS): return q->egress_block; default: return NULL; } } static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index) { struct clsact_sched_data *q = qdisc_priv(sch); q->ingress_block_info.block_index = block_index; } static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index) { struct clsact_sched_data *q = qdisc_priv(sch); q->egress_block_info.block_index = block_index; } static u32 clsact_ingress_block_get(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); return q->ingress_block_info.block_index; } static u32 clsact_egress_block_get(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); return q->egress_block_info.block_index; } static int clsact_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; bool created; int err; if (sch->parent != TC_H_CLSACT) return -EOPNOTSUPP; net_inc_ingress_queue(); net_inc_egress_queue(); entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->ingress_block_info.chain_head_change = clsact_chain_head_change; q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, extack); if (err) return err; mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); entry = tcx_entry_fetch_or_create(dev, false, &created); if (!entry) return -ENOMEM; tcx_miniq_inc(entry); mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, false); q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; q->egress_block_info.chain_head_change = clsact_chain_head_change; q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); } static void clsact_destroy(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress); struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress); if (sch->parent != TC_H_CLSACT) return; tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); if (ingress_entry) { tcx_miniq_dec(ingress_entry); if (!tcx_entry_is_active(ingress_entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(ingress_entry); } } if (egress_entry) { tcx_miniq_dec(egress_entry); if (!tcx_entry_is_active(egress_entry)) { tcx_entry_update(dev, NULL, false); tcx_entry_free(egress_entry); } } net_dec_ingress_queue(); net_dec_egress_queue(); } static const struct Qdisc_class_ops clsact_class_ops = { .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, .leaf = ingress_leaf, .find = clsact_find, .walk = ingress_walk, .tcf_block = clsact_tcf_block, .bind_tcf = clsact_bind_filter, .unbind_tcf = ingress_unbind_filter, }; static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { .cl_ops = &clsact_class_ops, .id = "clsact", .priv_size = sizeof(struct clsact_sched_data), .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = clsact_init, .destroy = clsact_destroy, .dump = ingress_dump, .ingress_block_set = clsact_ingress_block_set, .egress_block_set = clsact_egress_block_set, .ingress_block_get = clsact_ingress_block_get, .egress_block_get = clsact_egress_block_get, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("clsact"); static int __init ingress_module_init(void) { int ret; ret = register_qdisc(&ingress_qdisc_ops); if (!ret) { ret = register_qdisc(&clsact_qdisc_ops); if (ret) unregister_qdisc(&ingress_qdisc_ops); } return ret; } static void __exit ingress_module_exit(void) { unregister_qdisc(&ingress_qdisc_ops); unregister_qdisc(&clsact_qdisc_ops); } module_init(ingress_module_init); module_exit(ingress_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs"); |
209 191 241 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM page_pool #if !defined(_TRACE_PAGE_POOL_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_PAGE_POOL_H #include <linux/types.h> #include <linux/tracepoint.h> #include <trace/events/mmflags.h> #include <net/page_pool/types.h> TRACE_EVENT(page_pool_release, TP_PROTO(const struct page_pool *pool, s32 inflight, u32 hold, u32 release), TP_ARGS(pool, inflight, hold, release), TP_STRUCT__entry( __field(const struct page_pool *, pool) __field(s32, inflight) __field(u32, hold) __field(u32, release) __field(u64, cnt) ), TP_fast_assign( __entry->pool = pool; __entry->inflight = inflight; __entry->hold = hold; __entry->release = release; __entry->cnt = pool->destroy_cnt; ), TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu", __entry->pool, __entry->inflight, __entry->hold, __entry->release, __entry->cnt) ); TRACE_EVENT(page_pool_state_release, TP_PROTO(const struct page_pool *pool, netmem_ref netmem, u32 release), TP_ARGS(pool, netmem, release), TP_STRUCT__entry( __field(const struct page_pool *, pool) __field(unsigned long, netmem) __field(u32, release) __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; __entry->netmem = (__force unsigned long)netmem; __entry->release = release; __entry->pfn = netmem_pfn_trace(netmem); ), TP_printk("page_pool=%p netmem=%p is_net_iov=%lu pfn=0x%lx release=%u", __entry->pool, (void *)__entry->netmem, __entry->netmem & NET_IOV, __entry->pfn, __entry->release) ); TRACE_EVENT(page_pool_state_hold, TP_PROTO(const struct page_pool *pool, netmem_ref netmem, u32 hold), TP_ARGS(pool, netmem, hold), TP_STRUCT__entry( __field(const struct page_pool *, pool) __field(unsigned long, netmem) __field(u32, hold) __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; __entry->netmem = (__force unsigned long)netmem; __entry->hold = hold; __entry->pfn = netmem_pfn_trace(netmem); ), TP_printk("page_pool=%p netmem=%p is_net_iov=%lu, pfn=0x%lx hold=%u", __entry->pool, (void *)__entry->netmem, __entry->netmem & NET_IOV, __entry->pfn, __entry->hold) ); TRACE_EVENT(page_pool_update_nid, TP_PROTO(const struct page_pool *pool, int new_nid), TP_ARGS(pool, new_nid), TP_STRUCT__entry( __field(const struct page_pool *, pool) __field(int, pool_nid) __field(int, new_nid) ), TP_fast_assign( __entry->pool = pool; __entry->pool_nid = pool->p.nid; __entry->new_nid = new_nid; ), TP_printk("page_pool=%p pool_nid=%d new_nid=%d", __entry->pool, __entry->pool_nid, __entry->new_nid) ); #endif /* _TRACE_PAGE_POOL_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
5169 5173 5190 5176 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 | // SPDX-License-Identifier: GPL-2.0-or-later /* auditfilter.c -- filtering of audit events * * Copyright 2003-2004 Red Hat, Inc. * Copyright 2005 Hewlett-Packard Development Company, L.P. * Copyright 2005 IBM Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/audit.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/netlink.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <net/net_namespace.h> #include <net/sock.h> #include "audit.h" /* * Locking model: * * audit_filter_mutex: * Synchronizes writes and blocking reads of audit's filterlist * data. Rcu is used to traverse the filterlist and access * contents of structs audit_entry, audit_watch and opaque * LSM rules during filtering. If modified, these structures * must be copied and replace their counterparts in the filterlist. * An audit_parent struct is not accessed during filtering, so may * be written directly provided audit_filter_mutex is held. */ /* Audit filter lists, defined in <linux/audit.h> */ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_filter_list[0]), LIST_HEAD_INIT(audit_filter_list[1]), LIST_HEAD_INIT(audit_filter_list[2]), LIST_HEAD_INIT(audit_filter_list[3]), LIST_HEAD_INIT(audit_filter_list[4]), LIST_HEAD_INIT(audit_filter_list[5]), LIST_HEAD_INIT(audit_filter_list[6]), LIST_HEAD_INIT(audit_filter_list[7]), #if AUDIT_NR_FILTERS != 8 #error Fix audit_filter_list initialiser #endif }; static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_rules_list[0]), LIST_HEAD_INIT(audit_rules_list[1]), LIST_HEAD_INIT(audit_rules_list[2]), LIST_HEAD_INIT(audit_rules_list[3]), LIST_HEAD_INIT(audit_rules_list[4]), LIST_HEAD_INIT(audit_rules_list[5]), LIST_HEAD_INIT(audit_rules_list[6]), LIST_HEAD_INIT(audit_rules_list[7]), }; DEFINE_MUTEX(audit_filter_mutex); static void audit_free_lsm_field(struct audit_field *f) { switch (f->type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: kfree(f->lsm_str); security_audit_rule_free(f->lsm_rule); } } static inline void audit_free_rule(struct audit_entry *e) { int i; struct audit_krule *erule = &e->rule; /* some rules don't have associated watches */ if (erule->watch) audit_put_watch(erule->watch); if (erule->fields) for (i = 0; i < erule->field_count; i++) audit_free_lsm_field(&erule->fields[i]); kfree(erule->fields); kfree(erule->filterkey); kfree(e); } void audit_free_rule_rcu(struct rcu_head *head) { struct audit_entry *e = container_of(head, struct audit_entry, rcu); audit_free_rule(e); } /* Initialize an audit filterlist entry. */ static inline struct audit_entry *audit_init_entry(u32 field_count) { struct audit_entry *entry; struct audit_field *fields; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (unlikely(!entry)) return NULL; fields = kcalloc(field_count, sizeof(*fields), GFP_KERNEL); if (unlikely(!fields)) { kfree(entry); return NULL; } entry->rule.fields = fields; return entry; } /* Unpack a filter field's string representation from user-space * buffer. */ char *audit_unpack_string(void **bufp, size_t *remain, size_t len) { char *str; if (!*bufp || (len == 0) || (len > *remain)) return ERR_PTR(-EINVAL); /* Of the currently implemented string fields, PATH_MAX * defines the longest valid length. */ if (len > PATH_MAX) return ERR_PTR(-ENAMETOOLONG); str = kmalloc(len + 1, GFP_KERNEL); if (unlikely(!str)) return ERR_PTR(-ENOMEM); memcpy(str, *bufp, len); str[len] = 0; *bufp += len; *remain -= len; return str; } /* Translate an inode field to kernel representation. */ static inline int audit_to_inode(struct audit_krule *krule, struct audit_field *f) { if ((krule->listnr != AUDIT_FILTER_EXIT && krule->listnr != AUDIT_FILTER_URING_EXIT) || krule->inode_f || krule->watch || krule->tree || (f->op != Audit_equal && f->op != Audit_not_equal)) return -EINVAL; krule->inode_f = f; return 0; } static __u32 *classes[AUDIT_SYSCALL_CLASSES]; int __init audit_register_class(int class, unsigned *list) { __u32 *p = kcalloc(AUDIT_BITMASK_SIZE, sizeof(__u32), GFP_KERNEL); if (!p) return -ENOMEM; while (*list != ~0U) { unsigned n = *list++; if (n >= AUDIT_BITMASK_SIZE * 32 - AUDIT_SYSCALL_CLASSES) { kfree(p); return -EINVAL; } p[AUDIT_WORD(n)] |= AUDIT_BIT(n); } if (class >= AUDIT_SYSCALL_CLASSES || classes[class]) { kfree(p); return -EINVAL; } classes[class] = p; return 0; } int audit_match_class(int class, unsigned syscall) { if (unlikely(syscall >= AUDIT_BITMASK_SIZE * 32)) return 0; if (unlikely(class >= AUDIT_SYSCALL_CLASSES || !classes[class])) return 0; return classes[class][AUDIT_WORD(syscall)] & AUDIT_BIT(syscall); } #ifdef CONFIG_AUDITSYSCALL static inline int audit_match_class_bits(int class, u32 *mask) { int i; if (classes[class]) { for (i = 0; i < AUDIT_BITMASK_SIZE; i++) if (mask[i] & classes[class][i]) return 0; } return 1; } static int audit_match_signal(struct audit_entry *entry) { struct audit_field *arch = entry->rule.arch_f; if (!arch) { /* When arch is unspecified, we must check both masks on biarch * as syscall number alone is ambiguous. */ return (audit_match_class_bits(AUDIT_CLASS_SIGNAL, entry->rule.mask) && audit_match_class_bits(AUDIT_CLASS_SIGNAL_32, entry->rule.mask)); } switch (audit_classify_arch(arch->val)) { case 0: /* native */ return (audit_match_class_bits(AUDIT_CLASS_SIGNAL, entry->rule.mask)); case 1: /* 32bit on biarch */ return (audit_match_class_bits(AUDIT_CLASS_SIGNAL_32, entry->rule.mask)); default: return 1; } } #endif /* Common user-space to kernel rule translation. */ static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule) { unsigned listnr; struct audit_entry *entry; int i, err; err = -EINVAL; listnr = rule->flags & ~AUDIT_FILTER_PREPEND; switch (listnr) { default: goto exit_err; #ifdef CONFIG_AUDITSYSCALL case AUDIT_FILTER_ENTRY: pr_err("AUDIT_FILTER_ENTRY is deprecated\n"); goto exit_err; case AUDIT_FILTER_EXIT: case AUDIT_FILTER_URING_EXIT: case AUDIT_FILTER_TASK: #endif case AUDIT_FILTER_USER: case AUDIT_FILTER_EXCLUDE: case AUDIT_FILTER_FS: ; } if (unlikely(rule->action == AUDIT_POSSIBLE)) { pr_err("AUDIT_POSSIBLE is deprecated\n"); goto exit_err; } if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) goto exit_err; if (rule->field_count > AUDIT_MAX_FIELDS) goto exit_err; err = -ENOMEM; entry = audit_init_entry(rule->field_count); if (!entry) goto exit_err; entry->rule.flags = rule->flags & AUDIT_FILTER_PREPEND; entry->rule.listnr = listnr; entry->rule.action = rule->action; entry->rule.field_count = rule->field_count; for (i = 0; i < AUDIT_BITMASK_SIZE; i++) entry->rule.mask[i] = rule->mask[i]; for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) { int bit = AUDIT_BITMASK_SIZE * 32 - i - 1; __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; __u32 *class; if (!(*p & AUDIT_BIT(bit))) continue; *p &= ~AUDIT_BIT(bit); class = classes[i]; if (class) { int j; for (j = 0; j < AUDIT_BITMASK_SIZE; j++) entry->rule.mask[j] |= class[j]; } } return entry; exit_err: return ERR_PTR(err); } static u32 audit_ops[] = { [Audit_equal] = AUDIT_EQUAL, [Audit_not_equal] = AUDIT_NOT_EQUAL, [Audit_bitmask] = AUDIT_BIT_MASK, [Audit_bittest] = AUDIT_BIT_TEST, [Audit_lt] = AUDIT_LESS_THAN, [Audit_gt] = AUDIT_GREATER_THAN, [Audit_le] = AUDIT_LESS_THAN_OR_EQUAL, [Audit_ge] = AUDIT_GREATER_THAN_OR_EQUAL, }; static u32 audit_to_op(u32 op) { u32 n; for (n = Audit_equal; n < Audit_bad && audit_ops[n] != op; n++) ; return n; } /* check if an audit field is valid */ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) { switch (f->type) { case AUDIT_MSGTYPE: if (entry->rule.listnr != AUDIT_FILTER_EXCLUDE && entry->rule.listnr != AUDIT_FILTER_USER) return -EINVAL; break; case AUDIT_FSTYPE: if (entry->rule.listnr != AUDIT_FILTER_FS) return -EINVAL; break; case AUDIT_PERM: if (entry->rule.listnr == AUDIT_FILTER_URING_EXIT) return -EINVAL; break; } switch (entry->rule.listnr) { case AUDIT_FILTER_FS: switch (f->type) { case AUDIT_FSTYPE: case AUDIT_FILTERKEY: break; default: return -EINVAL; } } /* Check for valid field type and op */ switch (f->type) { case AUDIT_ARG0: case AUDIT_ARG1: case AUDIT_ARG2: case AUDIT_ARG3: case AUDIT_PERS: /* <uapi/linux/personality.h> */ case AUDIT_DEVMINOR: /* all ops are valid */ break; case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_LOGINUID: case AUDIT_OBJ_UID: case AUDIT_GID: case AUDIT_EGID: case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: case AUDIT_PID: case AUDIT_MSGTYPE: case AUDIT_PPID: case AUDIT_DEVMAJOR: case AUDIT_EXIT: case AUDIT_SUCCESS: case AUDIT_INODE: case AUDIT_SESSIONID: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: case AUDIT_SADDR_FAM: /* bit ops are only useful on syscall args */ if (f->op == Audit_bitmask || f->op == Audit_bittest) return -EINVAL; break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_WATCH: case AUDIT_DIR: case AUDIT_FILTERKEY: case AUDIT_LOGINUID_SET: case AUDIT_ARCH: case AUDIT_FSTYPE: case AUDIT_PERM: case AUDIT_FILETYPE: case AUDIT_FIELD_COMPARE: case AUDIT_EXE: /* only equal and not equal valid ops */ if (f->op != Audit_not_equal && f->op != Audit_equal) return -EINVAL; break; default: /* field not recognized */ return -EINVAL; } /* Check for select valid field values */ switch (f->type) { case AUDIT_LOGINUID_SET: if ((f->val != 0) && (f->val != 1)) return -EINVAL; break; case AUDIT_PERM: if (f->val & ~15) return -EINVAL; break; case AUDIT_FILETYPE: if (f->val & ~S_IFMT) return -EINVAL; break; case AUDIT_FIELD_COMPARE: if (f->val > AUDIT_MAX_FIELD_COMPARE) return -EINVAL; break; case AUDIT_SADDR_FAM: if (f->val >= AF_MAX) return -EINVAL; break; default: break; } return 0; } /* Translate struct audit_rule_data to kernel's rule representation. */ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, size_t datasz) { int err = 0; struct audit_entry *entry; void *bufp; size_t remain = datasz - sizeof(struct audit_rule_data); int i; char *str; struct audit_fsnotify_mark *audit_mark; entry = audit_to_entry_common(data); if (IS_ERR(entry)) goto exit_nofree; bufp = data->buf; for (i = 0; i < data->field_count; i++) { struct audit_field *f = &entry->rule.fields[i]; u32 f_val; err = -EINVAL; f->op = audit_to_op(data->fieldflags[i]); if (f->op == Audit_bad) goto exit_free; f->type = data->fields[i]; f_val = data->values[i]; /* Support legacy tests for a valid loginuid */ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) { f->type = AUDIT_LOGINUID_SET; f_val = 0; entry->rule.pflags |= AUDIT_LOGINUID_LEGACY; } err = audit_field_valid(entry, f); if (err) goto exit_free; err = -EINVAL; switch (f->type) { case AUDIT_LOGINUID: case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_OBJ_UID: f->uid = make_kuid(current_user_ns(), f_val); if (!uid_valid(f->uid)) goto exit_free; break; case AUDIT_GID: case AUDIT_EGID: case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: f->gid = make_kgid(current_user_ns(), f_val); if (!gid_valid(f->gid)) goto exit_free; break; case AUDIT_ARCH: f->val = f_val; entry->rule.arch_f = f; break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } entry->rule.buflen += f_val; f->lsm_str = str; err = security_audit_rule_init(f->type, f->op, str, (void **)&f->lsm_rule, GFP_KERNEL); /* Keep currently invalid fields around in case they * become valid after a policy reload. */ if (err == -EINVAL) { pr_warn("audit rule for LSM \'%s\' is invalid\n", str); err = 0; } else if (err) goto exit_free; break; case AUDIT_WATCH: str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } err = audit_to_watch(&entry->rule, str, f_val, f->op); if (err) { kfree(str); goto exit_free; } entry->rule.buflen += f_val; break; case AUDIT_DIR: str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } err = audit_make_tree(&entry->rule, str, f->op); kfree(str); if (err) goto exit_free; entry->rule.buflen += f_val; break; case AUDIT_INODE: f->val = f_val; err = audit_to_inode(&entry->rule, f); if (err) goto exit_free; break; case AUDIT_FILTERKEY: if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN) goto exit_free; str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } entry->rule.buflen += f_val; entry->rule.filterkey = str; break; case AUDIT_EXE: if (entry->rule.exe || f_val > PATH_MAX) goto exit_free; str = audit_unpack_string(&bufp, &remain, f_val); if (IS_ERR(str)) { err = PTR_ERR(str); goto exit_free; } audit_mark = audit_alloc_mark(&entry->rule, str, f_val); if (IS_ERR(audit_mark)) { kfree(str); err = PTR_ERR(audit_mark); goto exit_free; } entry->rule.buflen += f_val; entry->rule.exe = audit_mark; break; default: f->val = f_val; break; } } if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal) entry->rule.inode_f = NULL; exit_nofree: return entry; exit_free: if (entry->rule.tree) audit_put_tree(entry->rule.tree); /* that's the temporary one */ if (entry->rule.exe) audit_remove_mark(entry->rule.exe); /* that's the template one */ audit_free_rule(entry); return ERR_PTR(err); } /* Pack a filter field's string representation into data block. */ static inline size_t audit_pack_string(void **bufp, const char *str) { size_t len = strlen(str); memcpy(*bufp, str, len); *bufp += len; return len; } /* Translate kernel rule representation to struct audit_rule_data. */ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) { struct audit_rule_data *data; void *bufp; int i; data = kmalloc(struct_size(data, buf, krule->buflen), GFP_KERNEL); if (unlikely(!data)) return NULL; memset(data, 0, sizeof(*data)); data->flags = krule->flags | krule->listnr; data->action = krule->action; data->field_count = krule->field_count; bufp = data->buf; for (i = 0; i < data->field_count; i++) { struct audit_field *f = &krule->fields[i]; data->fields[i] = f->type; data->fieldflags[i] = audit_ops[f->op]; switch (f->type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: data->buflen += data->values[i] = audit_pack_string(&bufp, f->lsm_str); break; case AUDIT_WATCH: data->buflen += data->values[i] = audit_pack_string(&bufp, audit_watch_path(krule->watch)); break; case AUDIT_DIR: data->buflen += data->values[i] = audit_pack_string(&bufp, audit_tree_path(krule->tree)); break; case AUDIT_FILTERKEY: data->buflen += data->values[i] = audit_pack_string(&bufp, krule->filterkey); break; case AUDIT_EXE: data->buflen += data->values[i] = audit_pack_string(&bufp, audit_mark_path(krule->exe)); break; case AUDIT_LOGINUID_SET: if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) { data->fields[i] = AUDIT_LOGINUID; data->values[i] = AUDIT_UID_UNSET; break; } fallthrough; /* if set */ default: data->values[i] = f->val; } } for (i = 0; i < AUDIT_BITMASK_SIZE; i++) data->mask[i] = krule->mask[i]; return data; } /* Compare two rules in kernel format. Considered success if rules * don't match. */ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) { int i; if (a->flags != b->flags || a->pflags != b->pflags || a->listnr != b->listnr || a->action != b->action || a->field_count != b->field_count) return 1; for (i = 0; i < a->field_count; i++) { if (a->fields[i].type != b->fields[i].type || a->fields[i].op != b->fields[i].op) return 1; switch (a->fields[i].type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: if (strcmp(a->fields[i].lsm_str, b->fields[i].lsm_str)) return 1; break; case AUDIT_WATCH: if (strcmp(audit_watch_path(a->watch), audit_watch_path(b->watch))) return 1; break; case AUDIT_DIR: if (strcmp(audit_tree_path(a->tree), audit_tree_path(b->tree))) return 1; break; case AUDIT_FILTERKEY: /* both filterkeys exist based on above type compare */ if (strcmp(a->filterkey, b->filterkey)) return 1; break; case AUDIT_EXE: /* both paths exist based on above type compare */ if (strcmp(audit_mark_path(a->exe), audit_mark_path(b->exe))) return 1; break; case AUDIT_UID: case AUDIT_EUID: case AUDIT_SUID: case AUDIT_FSUID: case AUDIT_LOGINUID: case AUDIT_OBJ_UID: if (!uid_eq(a->fields[i].uid, b->fields[i].uid)) return 1; break; case AUDIT_GID: case AUDIT_EGID: case AUDIT_SGID: case AUDIT_FSGID: case AUDIT_OBJ_GID: if (!gid_eq(a->fields[i].gid, b->fields[i].gid)) return 1; break; default: if (a->fields[i].val != b->fields[i].val) return 1; } } for (i = 0; i < AUDIT_BITMASK_SIZE; i++) if (a->mask[i] != b->mask[i]) return 1; return 0; } /* Duplicate LSM field information. The lsm_rule is opaque, so must be * re-initialized. */ static inline int audit_dupe_lsm_field(struct audit_field *df, struct audit_field *sf) { int ret; char *lsm_str; /* our own copy of lsm_str */ lsm_str = kstrdup(sf->lsm_str, GFP_KERNEL); if (unlikely(!lsm_str)) return -ENOMEM; df->lsm_str = lsm_str; /* our own (refreshed) copy of lsm_rule */ ret = security_audit_rule_init(df->type, df->op, df->lsm_str, (void **)&df->lsm_rule, GFP_KERNEL); /* Keep currently invalid fields around in case they * become valid after a policy reload. */ if (ret == -EINVAL) { pr_warn("audit rule for LSM \'%s\' is invalid\n", df->lsm_str); ret = 0; } return ret; } /* Duplicate an audit rule. This will be a deep copy with the exception * of the watch - that pointer is carried over. The LSM specific fields * will be updated in the copy. The point is to be able to replace the old * rule with the new rule in the filterlist, then free the old rule. * The rlist element is undefined; list manipulations are handled apart from * the initial copy. */ struct audit_entry *audit_dupe_rule(struct audit_krule *old) { u32 fcount = old->field_count; struct audit_entry *entry; struct audit_krule *new; char *fk; int i, err = 0; entry = audit_init_entry(fcount); if (unlikely(!entry)) return ERR_PTR(-ENOMEM); new = &entry->rule; new->flags = old->flags; new->pflags = old->pflags; new->listnr = old->listnr; new->action = old->action; for (i = 0; i < AUDIT_BITMASK_SIZE; i++) new->mask[i] = old->mask[i]; new->prio = old->prio; new->buflen = old->buflen; new->inode_f = old->inode_f; new->field_count = old->field_count; /* * note that we are OK with not refcounting here; audit_match_tree() * never dereferences tree and we can't get false positives there * since we'd have to have rule gone from the list *and* removed * before the chunks found by lookup had been allocated, i.e. before * the beginning of list scan. */ new->tree = old->tree; memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); /* deep copy this information, updating the lsm_rule fields, because * the originals will all be freed when the old rule is freed. */ for (i = 0; i < fcount; i++) { switch (new->fields[i].type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: err = audit_dupe_lsm_field(&new->fields[i], &old->fields[i]); break; case AUDIT_FILTERKEY: fk = kstrdup(old->filterkey, GFP_KERNEL); if (unlikely(!fk)) err = -ENOMEM; else new->filterkey = fk; break; case AUDIT_EXE: err = audit_dupe_exe(new, old); break; } if (err) { if (new->exe) audit_remove_mark(new->exe); audit_free_rule(entry); return ERR_PTR(err); } } if (old->watch) { audit_get_watch(old->watch); new->watch = old->watch; } return entry; } /* Find an existing audit rule. * Caller must hold audit_filter_mutex to prevent stale rule data. */ static struct audit_entry *audit_find_rule(struct audit_entry *entry, struct list_head **p) { struct audit_entry *e, *found = NULL; struct list_head *list; int h; if (entry->rule.inode_f) { h = audit_hash_ino(entry->rule.inode_f->val); *p = list = &audit_inode_hash[h]; } else if (entry->rule.watch) { /* we don't know the inode number, so must walk entire hash */ for (h = 0; h < AUDIT_INODE_BUCKETS; h++) { list = &audit_inode_hash[h]; list_for_each_entry(e, list, list) if (!audit_compare_rule(&entry->rule, &e->rule)) { found = e; goto out; } } goto out; } else { *p = list = &audit_filter_list[entry->rule.listnr]; } list_for_each_entry(e, list, list) if (!audit_compare_rule(&entry->rule, &e->rule)) { found = e; goto out; } out: return found; } static u64 prio_low = ~0ULL/2; static u64 prio_high = ~0ULL/2 - 1; /* Add rule to given filterlist if not a duplicate. */ static inline int audit_add_rule(struct audit_entry *entry) { struct audit_entry *e; struct audit_watch *watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; struct list_head *list; int err = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; /* If any of these, don't count towards total */ switch (entry->rule.listnr) { case AUDIT_FILTER_USER: case AUDIT_FILTER_EXCLUDE: case AUDIT_FILTER_FS: dont_count = 1; } #endif mutex_lock(&audit_filter_mutex); e = audit_find_rule(entry, &list); if (e) { mutex_unlock(&audit_filter_mutex); err = -EEXIST; /* normally audit_add_tree_rule() will free it on failure */ if (tree) audit_put_tree(tree); return err; } if (watch) { /* audit_filter_mutex is dropped and re-taken during this call */ err = audit_add_watch(&entry->rule, &list); if (err) { mutex_unlock(&audit_filter_mutex); /* * normally audit_add_tree_rule() will free it * on failure */ if (tree) audit_put_tree(tree); return err; } } if (tree) { err = audit_add_tree_rule(&entry->rule); if (err) { mutex_unlock(&audit_filter_mutex); return err; } } entry->rule.prio = ~0ULL; if (entry->rule.listnr == AUDIT_FILTER_EXIT || entry->rule.listnr == AUDIT_FILTER_URING_EXIT) { if (entry->rule.flags & AUDIT_FILTER_PREPEND) entry->rule.prio = ++prio_high; else entry->rule.prio = --prio_low; } if (entry->rule.flags & AUDIT_FILTER_PREPEND) { list_add(&entry->rule.list, &audit_rules_list[entry->rule.listnr]); list_add_rcu(&entry->list, list); entry->rule.flags &= ~AUDIT_FILTER_PREPEND; } else { list_add_tail(&entry->rule.list, &audit_rules_list[entry->rule.listnr]); list_add_tail_rcu(&entry->list, list); } #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules++; if (!audit_match_signal(entry)) audit_signals++; #endif mutex_unlock(&audit_filter_mutex); return err; } /* Remove an existing rule from filterlist. */ int audit_del_rule(struct audit_entry *entry) { struct audit_entry *e; struct audit_tree *tree = entry->rule.tree; struct list_head *list; int ret = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; /* If any of these, don't count towards total */ switch (entry->rule.listnr) { case AUDIT_FILTER_USER: case AUDIT_FILTER_EXCLUDE: case AUDIT_FILTER_FS: dont_count = 1; } #endif mutex_lock(&audit_filter_mutex); e = audit_find_rule(entry, &list); if (!e) { ret = -ENOENT; goto out; } if (e->rule.watch) audit_remove_watch_rule(&e->rule); if (e->rule.tree) audit_remove_tree_rule(&e->rule); if (e->rule.exe) audit_remove_mark_rule(&e->rule); #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules--; if (!audit_match_signal(entry)) audit_signals--; #endif list_del_rcu(&e->list); list_del(&e->rule.list); call_rcu(&e->rcu, audit_free_rule_rcu); out: mutex_unlock(&audit_filter_mutex); if (tree) audit_put_tree(tree); /* that's the temporary one */ return ret; } /* List rules using struct audit_rule_data. */ static void audit_list_rules(int seq, struct sk_buff_head *q) { struct sk_buff *skb; struct audit_krule *r; int i; /* This is a blocking read, so use audit_filter_mutex instead of rcu * iterator to sync with list writers. */ for (i = 0; i < AUDIT_NR_FILTERS; i++) { list_for_each_entry(r, &audit_rules_list[i], list) { struct audit_rule_data *data; data = audit_krule_to_data(r); if (unlikely(!data)) break; skb = audit_make_reply(seq, AUDIT_LIST_RULES, 0, 1, data, struct_size(data, buf, data->buflen)); if (skb) skb_queue_tail(q, skb); kfree(data); } } skb = audit_make_reply(seq, AUDIT_LIST_RULES, 1, 1, NULL, 0); if (skb) skb_queue_tail(q, skb); } /* Log rule additions and removals */ static void audit_log_rule_change(char *action, struct audit_krule *rule, int res) { struct audit_buffer *ab; if (!audit_enabled) return; ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_CONFIG_CHANGE); if (!ab) return; audit_log_session_info(ab); audit_log_task_context(ab); audit_log_format(ab, " op=%s", action); audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=%d", rule->listnr, res); audit_log_end(ab); } /** * audit_rule_change - apply all rules to the specified message type * @type: audit message type * @seq: netlink audit message sequence (serial) number * @data: payload data * @datasz: size of payload data */ int audit_rule_change(int type, int seq, void *data, size_t datasz) { int err = 0; struct audit_entry *entry; switch (type) { case AUDIT_ADD_RULE: entry = audit_data_to_entry(data, datasz); if (IS_ERR(entry)) return PTR_ERR(entry); err = audit_add_rule(entry); audit_log_rule_change("add_rule", &entry->rule, !err); break; case AUDIT_DEL_RULE: entry = audit_data_to_entry(data, datasz); if (IS_ERR(entry)) return PTR_ERR(entry); err = audit_del_rule(entry); audit_log_rule_change("remove_rule", &entry->rule, !err); break; default: WARN_ON(1); return -EINVAL; } if (err || type == AUDIT_DEL_RULE) { if (entry->rule.exe) audit_remove_mark(entry->rule.exe); audit_free_rule(entry); } return err; } /** * audit_list_rules_send - list the audit rules * @request_skb: skb of request we are replying to (used to target the reply) * @seq: netlink audit message sequence (serial) number */ int audit_list_rules_send(struct sk_buff *request_skb, int seq) { struct task_struct *tsk; struct audit_netlink_list *dest; /* We can't just spew out the rules here because we might fill * the available socket buffer space and deadlock waiting for * auditctl to read from it... which isn't ever going to * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ dest = kmalloc(sizeof(*dest), GFP_KERNEL); if (!dest) return -ENOMEM; dest->net = get_net(sock_net(NETLINK_CB(request_skb).sk)); dest->portid = NETLINK_CB(request_skb).portid; skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); audit_list_rules(seq, &dest->q); mutex_unlock(&audit_filter_mutex); tsk = kthread_run(audit_send_list_thread, dest, "audit_send_list"); if (IS_ERR(tsk)) { skb_queue_purge(&dest->q); put_net(dest->net); kfree(dest); return PTR_ERR(tsk); } return 0; } int audit_comparator(u32 left, u32 op, u32 right) { switch (op) { case Audit_equal: return (left == right); case Audit_not_equal: return (left != right); case Audit_lt: return (left < right); case Audit_le: return (left <= right); case Audit_gt: return (left > right); case Audit_ge: return (left >= right); case Audit_bitmask: return (left & right); case Audit_bittest: return ((left & right) == right); default: return 0; } } int audit_uid_comparator(kuid_t left, u32 op, kuid_t right) { switch (op) { case Audit_equal: return uid_eq(left, right); case Audit_not_equal: return !uid_eq(left, right); case Audit_lt: return uid_lt(left, right); case Audit_le: return uid_lte(left, right); case Audit_gt: return uid_gt(left, right); case Audit_ge: return uid_gte(left, right); case Audit_bitmask: case Audit_bittest: default: return 0; } } int audit_gid_comparator(kgid_t left, u32 op, kgid_t right) { switch (op) { case Audit_equal: return gid_eq(left, right); case Audit_not_equal: return !gid_eq(left, right); case Audit_lt: return gid_lt(left, right); case Audit_le: return gid_lte(left, right); case Audit_gt: return gid_gt(left, right); case Audit_ge: return gid_gte(left, right); case Audit_bitmask: case Audit_bittest: default: return 0; } } /** * parent_len - find the length of the parent portion of a pathname * @path: pathname of which to determine length */ int parent_len(const char *path) { int plen; const char *p; plen = strlen(path); if (plen == 0) return plen; /* disregard trailing slashes */ p = path + plen - 1; while ((*p == '/') && (p > path)) p--; /* walk backward until we find the next slash or hit beginning */ while ((*p != '/') && (p > path)) p--; /* did we find a slash? Then increment to include it in path */ if (*p == '/') p++; return p - path; } /** * audit_compare_dname_path - compare given dentry name with last component in * given path. Return of 0 indicates a match. * @dname: dentry name that we're comparing * @path: full pathname that we're comparing * @parentlen: length of the parent if known. Passing in AUDIT_NAME_FULL * here indicates that we must compute this value. */ int audit_compare_dname_path(const struct qstr *dname, const char *path, int parentlen) { int dlen, pathlen; const char *p; dlen = dname->len; pathlen = strlen(path); if (pathlen < dlen) return 1; parentlen = parentlen == AUDIT_NAME_FULL ? parent_len(path) : parentlen; if (pathlen - parentlen != dlen) return 1; p = path + parentlen; return strncmp(p, dname->name, dlen); } int audit_filter(int msgtype, unsigned int listtype) { struct audit_entry *e; int ret = 1; /* Audit by default */ rcu_read_lock(); list_for_each_entry_rcu(e, &audit_filter_list[listtype], list) { int i, result = 0; for (i = 0; i < e->rule.field_count; i++) { struct audit_field *f = &e->rule.fields[i]; struct lsm_prop prop = { }; pid_t pid; switch (f->type) { case AUDIT_PID: pid = task_tgid_nr(current); result = audit_comparator(pid, f->op, f->val); break; case AUDIT_UID: result = audit_uid_comparator(current_uid(), f->op, f->uid); break; case AUDIT_GID: result = audit_gid_comparator(current_gid(), f->op, f->gid); break; case AUDIT_LOGINUID: result = audit_uid_comparator(audit_get_loginuid(current), f->op, f->uid); break; case AUDIT_LOGINUID_SET: result = audit_comparator(audit_loginuid_set(current), f->op, f->val); break; case AUDIT_MSGTYPE: result = audit_comparator(msgtype, f->op, f->val); break; case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: if (f->lsm_rule) { security_current_getlsmprop_subj(&prop); result = security_audit_rule_match( &prop, f->type, f->op, f->lsm_rule); } break; case AUDIT_EXE: result = audit_exe_compare(current, e->rule.exe); if (f->op == Audit_not_equal) result = !result; break; default: goto unlock_and_return; } if (result < 0) /* error */ goto unlock_and_return; if (!result) break; } if (result > 0) { if (e->rule.action == AUDIT_NEVER || listtype == AUDIT_FILTER_EXCLUDE) ret = 0; break; } } unlock_and_return: rcu_read_unlock(); return ret; } static int update_lsm_rule(struct audit_krule *r) { struct audit_entry *entry = container_of(r, struct audit_entry, rule); struct audit_entry *nentry; int err = 0; if (!security_audit_rule_known(r)) return 0; nentry = audit_dupe_rule(r); if (entry->rule.exe) audit_remove_mark(entry->rule.exe); if (IS_ERR(nentry)) { /* save the first error encountered for the * return value */ err = PTR_ERR(nentry); audit_panic("error updating LSM filters"); if (r->watch) list_del(&r->rlist); list_del_rcu(&entry->list); list_del(&r->list); } else { if (r->watch || r->tree) list_replace_init(&r->rlist, &nentry->rule.rlist); list_replace_rcu(&entry->list, &nentry->list); list_replace(&r->list, &nentry->rule.list); } call_rcu(&entry->rcu, audit_free_rule_rcu); return err; } /* This function will re-initialize the lsm_rule field of all applicable rules. * It will traverse the filter lists serarching for rules that contain LSM * specific filter fields. When such a rule is found, it is copied, the * LSM field is re-initialized, and the old rule is replaced with the * updated rule. */ int audit_update_lsm_rules(void) { struct audit_krule *r, *n; int i, err = 0; /* audit_filter_mutex synchronizes the writers */ mutex_lock(&audit_filter_mutex); for (i = 0; i < AUDIT_NR_FILTERS; i++) { list_for_each_entry_safe(r, n, &audit_rules_list[i], list) { int res = update_lsm_rule(r); if (!err) err = res; } } mutex_unlock(&audit_filter_mutex); return err; } |
3 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 | /* mii.c: MII interface library Maintained by Jeff Garzik <jgarzik@pobox.com> Copyright 2001,2002 Jeff Garzik Various code came from myson803.c and other files by Donald Becker. Copyright: Written 1998-2002 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_gset - get settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). */ void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ ecmd->port = PORT_MII; /* only supports internal transceiver */ ecmd->transceiver = XCVR_INTERNAL; /* this isn't fully supported at higher layers */ ecmd->phy_address = mii->phy_id; ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) ecmd->advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } nego = ecmd->advertising & ecmd->lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_100); ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); } else { ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ } /** * mii_ethtool_get_link_ksettings - get settings that are specified in @cmd * @mii: MII interface * @cmd: requested ethtool_link_ksettings * * The @cmd parameter is expected to have been cleared before calling * mii_ethtool_get_link_ksettings(). */ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego, supported, advertising, lp_advertising; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ cmd->base.port = PORT_MII; /* this isn't fully supported at higher layers */ cmd->base.phy_address = mii->phy_id; cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmcr & BMCR_ANENABLE) { advertising |= ADVERTISED_Autoneg; cmd->base.autoneg = AUTONEG_ENABLE; if (bmsr & BMSR_ANEGCOMPLETE) { lp_advertising = mii_get_an(mii, MII_LPA); lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { lp_advertising = 0; } nego = advertising & lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { cmd->base.speed = SPEED_1000; cmd->base.duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { cmd->base.speed = SPEED_100; cmd->base.duplex = !!(nego & ADVERTISED_100baseT_Full); } else { cmd->base.speed = SPEED_10; cmd->base.duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.speed = ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10)); cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; lp_advertising = 0; } mii->full_duplex = cmd->base.duplex; ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, lp_advertising); /* ignore maxtxpkt, maxrxpkt for now */ } /** * mii_ethtool_sset - set settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * Returns 0 for success, negative on error. */ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->phy_address != mii->phy_id) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (ecmd->autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (ecmd->duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else mii->full_duplex = 0; if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_ethtool_set_link_ksettings - set settings that are specified in @cmd * @mii: MII interfaces * @cmd: requested ethtool_link_ksettings * * Returns 0 for success, negative on error. */ int mii_ethtool_set_link_ksettings(struct mii_if_info *mii, const struct ethtool_link_ksettings *cmd) { struct net_device *dev = mii->dev; u32 speed = cmd->base.speed; if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL) return -EINVAL; if (cmd->base.port != PORT_MII) return -EINVAL; if (cmd->base.phy_address != mii->phy_id) return -EINVAL; if (cmd->base.autoneg != AUTONEG_DISABLE && cmd->base.autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (cmd->base.autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; u32 advertising; ethtool_convert_link_mode_to_legacy_u32( &advertising, cmd->link_modes.advertising); if ((advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (cmd->base.duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else { mii->full_duplex = 0; } if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_check_gmii_support - check if the MII supports Gb interfaces * @mii: the MII interface */ int mii_check_gmii_support(struct mii_if_info *mii) { int reg; reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (reg & BMSR_ESTATEN) { reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) return 1; } return 0; } /** * mii_link_ok - is link status up/ok * @mii: the MII interface * * Returns 1 if the MII reports link status up/ok, 0 otherwise. */ int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) return 1; return 0; } /** * mii_nway_restart - restart NWay (autonegotiation) for this interface * @mii: the MII interface * * Returns 0 on success, negative on error. */ int mii_nway_restart (struct mii_if_info *mii) { int bmcr; int r = -EINVAL; /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); r = 0; } return r; } /** * mii_check_link - check MII link status * @mii: MII interface * * If the link status changed (previous != current), call * netif_carrier_on() if current link status is Up or call * netif_carrier_off() if current link status is Down. */ void mii_check_link (struct mii_if_info *mii) { int cur_link = mii_link_ok(mii); int prev_link = netif_carrier_ok(mii->dev); if (cur_link && !prev_link) netif_carrier_on(mii->dev); else if (prev_link && !cur_link) netif_carrier_off(mii->dev); } /** * mii_check_media - check the MII interface for a carrier/speed/duplex change * @mii: the MII interface * @ok_to_print: OK to print link up/down messages * @init_media: OK to save duplex mode in @mii * * Returns 1 if the duplex mode changed, 0 if not. * If the media type is forced, always returns 0. */ unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media) { unsigned int old_carrier, new_carrier; int advertise, lpa, media, duplex; int lpa2 = 0; /* check current and old link status */ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; new_carrier = (unsigned int) mii_link_ok(mii); /* if carrier state did not change, this is a "bounce", * just exit as everything is already set correctly */ if ((!init_media) && (old_carrier == new_carrier)) return 0; /* duplex did not change */ /* no carrier, nothing much to do */ if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } /* * we have carrier, see who's on the other end */ netif_carrier_on(mii->dev); if (mii->force_media) { if (ok_to_print) netdev_info(mii->dev, "link up\n"); return 0; /* duplex did not change */ } /* get MII advertise and LPA values */ if ((!init_media) && (mii->advertising)) advertise = mii->advertising; else { advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); mii->advertising = advertise; } lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); if (mii->supports_gmii) lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (ok_to_print) netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10, duplex ? "full" : "half", lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; return 1; /* duplex changed */ } return 0; /* duplex did not change */ } /** * generic_mii_ioctl - main MII ioctl interface * @mii_if: the MII interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * @duplex_chg_out: pointer to @duplex_changed status if there was no * ioctl error * * Returns 0 on success, negative on error. */ int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) { int rc = 0; unsigned int duplex_changed = 0; if (duplex_chg_out) *duplex_chg_out = 0; mii_data->phy_id &= mii_if->phy_id_mask; mii_data->reg_num &= mii_if->reg_num_mask; switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; fallthrough; case SIOCGMIIREG: mii_data->val_out = mii_if->mdio_read(mii_if->dev, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: { u16 val = mii_data->val_in; if (mii_data->phy_id == mii_if->phy_id) { switch(mii_data->reg_num) { case MII_BMCR: { unsigned int new_duplex = 0; if (val & (BMCR_RESET|BMCR_ANENABLE)) mii_if->force_media = 0; else mii_if->force_media = 1; if (mii_if->force_media && (val & BMCR_FULLDPLX)) new_duplex = 1; if (mii_if->full_duplex != new_duplex) { duplex_changed = 1; mii_if->full_duplex = new_duplex; } break; } case MII_ADVERTISE: mii_if->advertising = val; break; default: /* do nothing */ break; } } mii_if->mdio_write(mii_if->dev, mii_data->phy_id, mii_data->reg_num, val); break; } default: rc = -EOPNOTSUPP; break; } if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) *duplex_chg_out = 1; return rc; } MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION ("MII hardware support library"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mii_link_ok); EXPORT_SYMBOL(mii_nway_restart); EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_get_link_ksettings); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_ethtool_set_link_ksettings); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl); |
16420 199 16290 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/uaccess.h> #include <linux/kernel.h> #include <asm/vsyscall.h> #ifdef CONFIG_X86_64 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { unsigned long vaddr = (unsigned long)unsafe_src; /* * Do not allow userspace addresses. This disallows * normal userspace and the userspace guard page: */ if (vaddr < TASK_SIZE_MAX + PAGE_SIZE) return false; /* * Reading from the vsyscall page may cause an unhandled fault in * certain cases. Though it is at an address above TASK_SIZE_MAX, it is * usually considered as a user space address. */ if (is_vsyscall_vaddr(vaddr)) return false; /* * Allow everything during early boot before 'x86_virt_bits' * is initialized. Needed for instruction decoding in early * exception handlers. */ if (!boot_cpu_data.x86_virt_bits) return true; return __is_canonical_address(vaddr, boot_cpu_data.x86_virt_bits); } #else bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { return (unsigned long)unsafe_src >= TASK_SIZE_MAX; } #endif |
86 86 22 22 22 22 22 15 14 6 15 15 15 15 2 15 15 15 64 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C)2003,2004 USAGI/WIDE Project * * Authors Mitsuru KANDA <mk@linux-ipv6.org> * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> * * Based on net/ipv4/xfrm4_tunnel.c */ #include <linux/module.h> #include <linux/xfrm.h> #include <linux/slab.h> #include <linux/rculist.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ipv6.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/mutex.h> #include <net/netns/generic.h> #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 #define XFRM6_TUNNEL_SPI_MIN 1 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff struct xfrm6_tunnel_net { struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; u32 spi; }; static unsigned int xfrm6_tunnel_net_id __read_mostly; static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net) { return net_generic(net, xfrm6_tunnel_net_id); } /* * xfrm_tunnel_spi things are for allocating unique id ("spi") * per xfrm_address_t. */ struct xfrm6_tunnel_spi { struct hlist_node list_byaddr; struct hlist_node list_byspi; xfrm_address_t addr; u32 spi; refcount_t refcnt; struct rcu_head rcu_head; }; static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock); static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) { unsigned int h; h = ipv6_addr_hash((const struct in6_addr *)addr); h ^= h >> 16; h ^= h >> 8; h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; return h; } static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi) { return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; } static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); struct xfrm6_tunnel_spi *x6spi; hlist_for_each_entry_rcu(x6spi, &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) { if (xfrm6_addr_equal(&x6spi->addr, saddr)) return x6spi; } return NULL; } __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr) { struct xfrm6_tunnel_spi *x6spi; u32 spi; rcu_read_lock_bh(); x6spi = __xfrm6_tunnel_spi_lookup(net, saddr); spi = x6spi ? x6spi->spi : 0; rcu_read_unlock_bh(); return htonl(spi); } EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); struct xfrm6_tunnel_spi *x6spi; int index = xfrm6_tunnel_spi_hash_byspi(spi); hlist_for_each_entry(x6spi, &xfrm6_tn->spi_byspi[index], list_byspi) { if (x6spi->spi == spi) return -1; } return index; } static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); u32 spi; struct xfrm6_tunnel_spi *x6spi; int index; if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN || xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX) xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN; else xfrm6_tn->spi++; for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { index = __xfrm6_tunnel_spi_check(net, spi); if (index >= 0) goto alloc_spi; if (spi == XFRM6_TUNNEL_SPI_MAX) break; } for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) { index = __xfrm6_tunnel_spi_check(net, spi); if (index >= 0) goto alloc_spi; } spi = 0; goto out; alloc_spi: xfrm6_tn->spi = spi; x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC); if (!x6spi) goto out; memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); x6spi->spi = spi; refcount_set(&x6spi->refcnt, 1); hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]); index = xfrm6_tunnel_spi_hash_byaddr(saddr); hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]); out: return spi; } __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_spi *x6spi; u32 spi; spin_lock_bh(&xfrm6_tunnel_spi_lock); x6spi = __xfrm6_tunnel_spi_lookup(net, saddr); if (x6spi) { refcount_inc(&x6spi->refcnt); spi = x6spi->spi; } else spi = __xfrm6_tunnel_alloc_spi(net, saddr); spin_unlock_bh(&xfrm6_tunnel_spi_lock); return htonl(spi); } EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); static void x6spi_destroy_rcu(struct rcu_head *head) { kmem_cache_free(xfrm6_tunnel_spi_kmem, container_of(head, struct xfrm6_tunnel_spi, rcu_head)); } static void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); struct xfrm6_tunnel_spi *x6spi; struct hlist_node *n; spin_lock_bh(&xfrm6_tunnel_spi_lock); hlist_for_each_entry_safe(x6spi, n, &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], list_byaddr) { if (xfrm6_addr_equal(&x6spi->addr, saddr)) { if (refcount_dec_and_test(&x6spi->refcnt)) { hlist_del_rcu(&x6spi->list_byaddr); hlist_del_rcu(&x6spi->list_byspi); call_rcu(&x6spi->rcu_head, x6spi_destroy_rcu); break; } } } spin_unlock_bh(&xfrm6_tunnel_spi_lock); } static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) { skb_push(skb, -skb_network_offset(skb)); return 0; } static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { return skb_network_header(skb)[IP6CB(skb)->nhoff]; } static int xfrm6_tunnel_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct ipv6hdr *iph = ipv6_hdr(skb); __be32 spi; spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL); } static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { /* xfrm6_tunnel native err handling */ switch (type) { case ICMPV6_DEST_UNREACH: switch (code) { case ICMPV6_NOROUTE: case ICMPV6_ADM_PROHIBITED: case ICMPV6_NOT_NEIGHBOUR: case ICMPV6_ADDR_UNREACH: case ICMPV6_PORT_UNREACH: default: break; } break; case ICMPV6_PKT_TOOBIG: break; case ICMPV6_TIME_EXCEED: switch (code) { case ICMPV6_EXC_HOPLIMIT: break; case ICMPV6_EXC_FRAGTIME: default: break; } break; case ICMPV6_PARAMPROB: switch (code) { case ICMPV6_HDR_FIELD: break; case ICMPV6_UNK_NEXTHDR: break; case ICMPV6_UNK_OPTION: break; } break; default: break; } return 0; } static int xfrm6_tunnel_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { if (x->props.mode != XFRM_MODE_TUNNEL) { NL_SET_ERR_MSG(extack, "IPv6 tunnel can only be used with tunnel mode"); return -EINVAL; } if (x->encap) { NL_SET_ERR_MSG(extack, "IPv6 tunnel is not compatible with encapsulation"); return -EINVAL; } x->props.header_len = sizeof(struct ipv6hdr); return 0; } static void xfrm6_tunnel_destroy(struct xfrm_state *x) { struct net *net = xs_net(x); xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr); } static const struct xfrm_type xfrm6_tunnel_type = { .owner = THIS_MODULE, .proto = IPPROTO_IPV6, .init_state = xfrm6_tunnel_init_state, .destructor = xfrm6_tunnel_destroy, .input = xfrm6_tunnel_input, .output = xfrm6_tunnel_output, }; static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = { .handler = xfrm6_tunnel_rcv, .err_handler = xfrm6_tunnel_err, .priority = 3, }; static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = { .handler = xfrm6_tunnel_rcv, .err_handler = xfrm6_tunnel_err, .priority = 3, }; static int __net_init xfrm6_tunnel_net_init(struct net *net) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]); for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]); xfrm6_tn->spi = 0; return 0; } static void __net_exit xfrm6_tunnel_net_exit(struct net *net) { struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); unsigned int i; xfrm_flush_gc(); xfrm_state_flush(net, 0, false, true); for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byspi[i])); } static struct pernet_operations xfrm6_tunnel_net_ops = { .init = xfrm6_tunnel_net_init, .exit = xfrm6_tunnel_net_exit, .id = &xfrm6_tunnel_net_id, .size = sizeof(struct xfrm6_tunnel_net), }; static int __init xfrm6_tunnel_init(void) { int rv; xfrm6_tunnel_spi_kmem = KMEM_CACHE(xfrm6_tunnel_spi, SLAB_HWCACHE_ALIGN); if (!xfrm6_tunnel_spi_kmem) return -ENOMEM; rv = register_pernet_subsys(&xfrm6_tunnel_net_ops); if (rv < 0) goto out_pernet; rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6); if (rv < 0) goto out_type; rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6); if (rv < 0) goto out_xfrm6; rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET); if (rv < 0) goto out_xfrm46; return 0; out_xfrm46: xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); out_xfrm6: xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); out_type: unregister_pernet_subsys(&xfrm6_tunnel_net_ops); out_pernet: kmem_cache_destroy(xfrm6_tunnel_spi_kmem); return rv; } static void __exit xfrm6_tunnel_fini(void) { xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET); xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6); xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); unregister_pernet_subsys(&xfrm6_tunnel_net_ops); /* Someone maybe has gotten the xfrm6_tunnel_spi. * So need to wait it. */ rcu_barrier(); kmem_cache_destroy(xfrm6_tunnel_spi_kmem); } module_init(xfrm6_tunnel_init); module_exit(xfrm6_tunnel_fini); MODULE_DESCRIPTION("IPv6 XFRM tunnel driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_IPV6); |
10 10 10 9 9 5 2 5 6 6 10 4 2 6 1 10 5 5 10 10 7 1 2 3 3 3 3 3 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 | // SPDX-License-Identifier: GPL-2.0-or-later /* * cpia CPiA (1) gspca driver * * Copyright (C) 2010-2011 Hans de Goede <hdegoede@redhat.com> * * This module is adapted from the in kernel v4l1 cpia driver which is : * * (C) Copyright 1999-2000 Peter Pregler * (C) Copyright 1999-2000 Scott J. Bertin * (C) Copyright 1999-2000 Johannes Erdfelt <johannes@erdfelt.com> * (C) Copyright 2000 STMicroelectronics */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "cpia1" #include <linux/input.h> #include <linux/sched/signal.h> #include <linux/bitops.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Vision CPiA"); MODULE_LICENSE("GPL"); /* constant value's */ #define MAGIC_0 0x19 #define MAGIC_1 0x68 #define DATA_IN 0xc0 #define DATA_OUT 0x40 #define VIDEOSIZE_QCIF 0 /* 176x144 */ #define VIDEOSIZE_CIF 1 /* 352x288 */ #define SUBSAMPLE_420 0 #define SUBSAMPLE_422 1 #define YUVORDER_YUYV 0 #define YUVORDER_UYVY 1 #define NOT_COMPRESSED 0 #define COMPRESSED 1 #define NO_DECIMATION 0 #define DECIMATION_ENAB 1 #define EOI 0xff /* End Of Image */ #define EOL 0xfd /* End Of Line */ #define FRAME_HEADER_SIZE 64 /* Image grab modes */ #define CPIA_GRAB_SINGLE 0 #define CPIA_GRAB_CONTINEOUS 1 /* Compression parameters */ #define CPIA_COMPRESSION_NONE 0 #define CPIA_COMPRESSION_AUTO 1 #define CPIA_COMPRESSION_MANUAL 2 #define CPIA_COMPRESSION_TARGET_QUALITY 0 #define CPIA_COMPRESSION_TARGET_FRAMERATE 1 /* Return offsets for GetCameraState */ #define SYSTEMSTATE 0 #define GRABSTATE 1 #define STREAMSTATE 2 #define FATALERROR 3 #define CMDERROR 4 #define DEBUGFLAGS 5 #define VPSTATUS 6 #define ERRORCODE 7 /* SystemState */ #define UNINITIALISED_STATE 0 #define PASS_THROUGH_STATE 1 #define LO_POWER_STATE 2 #define HI_POWER_STATE 3 #define WARM_BOOT_STATE 4 /* GrabState */ #define GRAB_IDLE 0 #define GRAB_ACTIVE 1 #define GRAB_DONE 2 /* StreamState */ #define STREAM_NOT_READY 0 #define STREAM_READY 1 #define STREAM_OPEN 2 #define STREAM_PAUSED 3 #define STREAM_FINISHED 4 /* Fatal Error, CmdError, and DebugFlags */ #define CPIA_FLAG 1 #define SYSTEM_FLAG 2 #define INT_CTRL_FLAG 4 #define PROCESS_FLAG 8 #define COM_FLAG 16 #define VP_CTRL_FLAG 32 #define CAPTURE_FLAG 64 #define DEBUG_FLAG 128 /* VPStatus */ #define VP_STATE_OK 0x00 #define VP_STATE_FAILED_VIDEOINIT 0x01 #define VP_STATE_FAILED_AECACBINIT 0x02 #define VP_STATE_AEC_MAX 0x04 #define VP_STATE_ACB_BMAX 0x08 #define VP_STATE_ACB_RMIN 0x10 #define VP_STATE_ACB_GMIN 0x20 #define VP_STATE_ACB_RMAX 0x40 #define VP_STATE_ACB_GMAX 0x80 /* default (minimum) compensation values */ #define COMP_RED 220 #define COMP_GREEN1 214 #define COMP_GREEN2 COMP_GREEN1 #define COMP_BLUE 230 /* exposure status */ #define EXPOSURE_VERY_LIGHT 0 #define EXPOSURE_LIGHT 1 #define EXPOSURE_NORMAL 2 #define EXPOSURE_DARK 3 #define EXPOSURE_VERY_DARK 4 #define CPIA_MODULE_CPIA (0 << 5) #define CPIA_MODULE_SYSTEM (1 << 5) #define CPIA_MODULE_VP_CTRL (5 << 5) #define CPIA_MODULE_CAPTURE (6 << 5) #define CPIA_MODULE_DEBUG (7 << 5) #define INPUT (DATA_IN << 8) #define OUTPUT (DATA_OUT << 8) #define CPIA_COMMAND_GetCPIAVersion (INPUT | CPIA_MODULE_CPIA | 1) #define CPIA_COMMAND_GetPnPID (INPUT | CPIA_MODULE_CPIA | 2) #define CPIA_COMMAND_GetCameraStatus (INPUT | CPIA_MODULE_CPIA | 3) #define CPIA_COMMAND_GotoHiPower (OUTPUT | CPIA_MODULE_CPIA | 4) #define CPIA_COMMAND_GotoLoPower (OUTPUT | CPIA_MODULE_CPIA | 5) #define CPIA_COMMAND_GotoSuspend (OUTPUT | CPIA_MODULE_CPIA | 7) #define CPIA_COMMAND_GotoPassThrough (OUTPUT | CPIA_MODULE_CPIA | 8) #define CPIA_COMMAND_ModifyCameraStatus (OUTPUT | CPIA_MODULE_CPIA | 10) #define CPIA_COMMAND_ReadVCRegs (INPUT | CPIA_MODULE_SYSTEM | 1) #define CPIA_COMMAND_WriteVCReg (OUTPUT | CPIA_MODULE_SYSTEM | 2) #define CPIA_COMMAND_ReadMCPorts (INPUT | CPIA_MODULE_SYSTEM | 3) #define CPIA_COMMAND_WriteMCPort (OUTPUT | CPIA_MODULE_SYSTEM | 4) #define CPIA_COMMAND_SetBaudRate (OUTPUT | CPIA_MODULE_SYSTEM | 5) #define CPIA_COMMAND_SetECPTiming (OUTPUT | CPIA_MODULE_SYSTEM | 6) #define CPIA_COMMAND_ReadIDATA (INPUT | CPIA_MODULE_SYSTEM | 7) #define CPIA_COMMAND_WriteIDATA (OUTPUT | CPIA_MODULE_SYSTEM | 8) #define CPIA_COMMAND_GenericCall (OUTPUT | CPIA_MODULE_SYSTEM | 9) #define CPIA_COMMAND_I2CStart (OUTPUT | CPIA_MODULE_SYSTEM | 10) #define CPIA_COMMAND_I2CStop (OUTPUT | CPIA_MODULE_SYSTEM | 11) #define CPIA_COMMAND_I2CWrite (OUTPUT | CPIA_MODULE_SYSTEM | 12) #define CPIA_COMMAND_I2CRead (INPUT | CPIA_MODULE_SYSTEM | 13) #define CPIA_COMMAND_GetVPVersion (INPUT | CPIA_MODULE_VP_CTRL | 1) #define CPIA_COMMAND_ResetFrameCounter (INPUT | CPIA_MODULE_VP_CTRL | 2) #define CPIA_COMMAND_SetColourParams (OUTPUT | CPIA_MODULE_VP_CTRL | 3) #define CPIA_COMMAND_SetExposure (OUTPUT | CPIA_MODULE_VP_CTRL | 4) #define CPIA_COMMAND_SetColourBalance (OUTPUT | CPIA_MODULE_VP_CTRL | 6) #define CPIA_COMMAND_SetSensorFPS (OUTPUT | CPIA_MODULE_VP_CTRL | 7) #define CPIA_COMMAND_SetVPDefaults (OUTPUT | CPIA_MODULE_VP_CTRL | 8) #define CPIA_COMMAND_SetApcor (OUTPUT | CPIA_MODULE_VP_CTRL | 9) #define CPIA_COMMAND_SetFlickerCtrl (OUTPUT | CPIA_MODULE_VP_CTRL | 10) #define CPIA_COMMAND_SetVLOffset (OUTPUT | CPIA_MODULE_VP_CTRL | 11) #define CPIA_COMMAND_GetColourParams (INPUT | CPIA_MODULE_VP_CTRL | 16) #define CPIA_COMMAND_GetColourBalance (INPUT | CPIA_MODULE_VP_CTRL | 17) #define CPIA_COMMAND_GetExposure (INPUT | CPIA_MODULE_VP_CTRL | 18) #define CPIA_COMMAND_SetSensorMatrix (OUTPUT | CPIA_MODULE_VP_CTRL | 19) #define CPIA_COMMAND_ColourBars (OUTPUT | CPIA_MODULE_VP_CTRL | 25) #define CPIA_COMMAND_ReadVPRegs (INPUT | CPIA_MODULE_VP_CTRL | 30) #define CPIA_COMMAND_WriteVPReg (OUTPUT | CPIA_MODULE_VP_CTRL | 31) #define CPIA_COMMAND_GrabFrame (OUTPUT | CPIA_MODULE_CAPTURE | 1) #define CPIA_COMMAND_UploadFrame (OUTPUT | CPIA_MODULE_CAPTURE | 2) #define CPIA_COMMAND_SetGrabMode (OUTPUT | CPIA_MODULE_CAPTURE | 3) #define CPIA_COMMAND_InitStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 4) #define CPIA_COMMAND_FiniStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 5) #define CPIA_COMMAND_StartStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 6) #define CPIA_COMMAND_EndStreamCap (OUTPUT | CPIA_MODULE_CAPTURE | 7) #define CPIA_COMMAND_SetFormat (OUTPUT | CPIA_MODULE_CAPTURE | 8) #define CPIA_COMMAND_SetROI (OUTPUT | CPIA_MODULE_CAPTURE | 9) #define CPIA_COMMAND_SetCompression (OUTPUT | CPIA_MODULE_CAPTURE | 10) #define CPIA_COMMAND_SetCompressionTarget (OUTPUT | CPIA_MODULE_CAPTURE | 11) #define CPIA_COMMAND_SetYUVThresh (OUTPUT | CPIA_MODULE_CAPTURE | 12) #define CPIA_COMMAND_SetCompressionParams (OUTPUT | CPIA_MODULE_CAPTURE | 13) #define CPIA_COMMAND_DiscardFrame (OUTPUT | CPIA_MODULE_CAPTURE | 14) #define CPIA_COMMAND_GrabReset (OUTPUT | CPIA_MODULE_CAPTURE | 15) #define CPIA_COMMAND_OutputRS232 (OUTPUT | CPIA_MODULE_DEBUG | 1) #define CPIA_COMMAND_AbortProcess (OUTPUT | CPIA_MODULE_DEBUG | 4) #define CPIA_COMMAND_SetDramPage (OUTPUT | CPIA_MODULE_DEBUG | 5) #define CPIA_COMMAND_StartDramUpload (OUTPUT | CPIA_MODULE_DEBUG | 6) #define CPIA_COMMAND_StartDummyDtream (OUTPUT | CPIA_MODULE_DEBUG | 8) #define CPIA_COMMAND_AbortStream (OUTPUT | CPIA_MODULE_DEBUG | 9) #define CPIA_COMMAND_DownloadDRAM (OUTPUT | CPIA_MODULE_DEBUG | 10) #define CPIA_COMMAND_Null (OUTPUT | CPIA_MODULE_DEBUG | 11) #define ROUND_UP_EXP_FOR_FLICKER 15 /* Constants for automatic frame rate adjustment */ #define MAX_EXP 302 #define MAX_EXP_102 255 #define LOW_EXP 140 #define VERY_LOW_EXP 70 #define TC 94 #define EXP_ACC_DARK 50 #define EXP_ACC_LIGHT 90 #define HIGH_COMP_102 160 #define MAX_COMP 239 #define DARK_TIME 3 #define LIGHT_TIME 3 #define FIRMWARE_VERSION(x, y) (sd->params.version.firmwareVersion == (x) && \ sd->params.version.firmwareRevision == (y)) #define CPIA1_CID_COMP_TARGET (V4L2_CTRL_CLASS_USER + 0x1000) #define BRIGHTNESS_DEF 50 #define CONTRAST_DEF 48 #define SATURATION_DEF 50 #define FREQ_DEF V4L2_CID_POWER_LINE_FREQUENCY_50HZ #define ILLUMINATORS_1_DEF 0 #define ILLUMINATORS_2_DEF 0 #define COMP_TARGET_DEF CPIA_COMPRESSION_TARGET_QUALITY /* Developer's Guide Table 5 p 3-34 * indexed by [mains][sensorFps.baserate][sensorFps.divisor]*/ static u8 flicker_jumps[2][2][4] = { { { 76, 38, 19, 9 }, { 92, 46, 23, 11 } }, { { 64, 32, 16, 8 }, { 76, 38, 19, 9} } }; struct cam_params { struct { u8 firmwareVersion; u8 firmwareRevision; u8 vcVersion; u8 vcRevision; } version; struct { u16 vendor; u16 product; u16 deviceRevision; } pnpID; struct { u8 vpVersion; u8 vpRevision; u16 cameraHeadID; } vpVersion; struct { u8 systemState; u8 grabState; u8 streamState; u8 fatalError; u8 cmdError; u8 debugFlags; u8 vpStatus; u8 errorCode; } status; struct { u8 brightness; u8 contrast; u8 saturation; } colourParams; struct { u8 gainMode; u8 expMode; u8 compMode; u8 centreWeight; u8 gain; u8 fineExp; u8 coarseExpLo; u8 coarseExpHi; u8 redComp; u8 green1Comp; u8 green2Comp; u8 blueComp; } exposure; struct { u8 balanceMode; u8 redGain; u8 greenGain; u8 blueGain; } colourBalance; struct { u8 divisor; u8 baserate; } sensorFps; struct { u8 gain1; u8 gain2; u8 gain4; u8 gain8; } apcor; struct { u8 disabled; u8 flickerMode; u8 coarseJump; u8 allowableOverExposure; } flickerControl; struct { u8 gain1; u8 gain2; u8 gain4; u8 gain8; } vlOffset; struct { u8 mode; u8 decimation; } compression; struct { u8 frTargeting; u8 targetFR; u8 targetQ; } compressionTarget; struct { u8 yThreshold; u8 uvThreshold; } yuvThreshold; struct { u8 hysteresis; u8 threshMax; u8 smallStep; u8 largeStep; u8 decimationHysteresis; u8 frDiffStepThresh; u8 qDiffStepThresh; u8 decimationThreshMod; } compressionParams; struct { u8 videoSize; /* CIF/QCIF */ u8 subSample; u8 yuvOrder; } format; struct { /* Intel QX3 specific data */ u8 qx3_detected; /* a QX3 is present */ u8 toplight; /* top light lit , R/W */ u8 bottomlight; /* bottom light lit, R/W */ u8 button; /* snapshot button pressed (R/O) */ u8 cradled; /* microscope is in cradle (R/O) */ } qx3; struct { u8 colStart; /* skip first 8*colStart pixels */ u8 colEnd; /* finish at 8*colEnd pixels */ u8 rowStart; /* skip first 4*rowStart lines */ u8 rowEnd; /* finish at 4*rowEnd lines */ } roi; u8 ecpTiming; u8 streamStartLine; }; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct cam_params params; /* camera settings */ atomic_t cam_exposure; atomic_t fps; int exposure_count; u8 exposure_status; struct v4l2_ctrl *freq; u8 mainsFreq; /* 0 = 50hz, 1 = 60hz */ u8 first_frame; }; static const struct v4l2_pix_format mode[] = { {160, 120, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, /* The sizeimage is trial and error, as with low framerates * the camera will pad out usb frames, making the image * data larger than strictly necessary */ .bytesperline = 160, .sizeimage = 65536, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 3}, {176, 144, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, .bytesperline = 172, .sizeimage = 65536, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 2}, {320, 240, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 262144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {352, 288, V4L2_PIX_FMT_CPIA1, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 262144, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; /********************************************************************** * * General functions * **********************************************************************/ static int cpia_usb_transferCmd(struct gspca_dev *gspca_dev, u8 *command) { u8 requesttype; unsigned int pipe; int ret, databytes = command[6] | (command[7] << 8); /* Sometimes we see spurious EPIPE errors */ int retries = 3; if (command[0] == DATA_IN) { pipe = usb_rcvctrlpipe(gspca_dev->dev, 0); requesttype = USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE; } else if (command[0] == DATA_OUT) { pipe = usb_sndctrlpipe(gspca_dev->dev, 0); requesttype = USB_TYPE_VENDOR | USB_RECIP_DEVICE; } else { gspca_err(gspca_dev, "Unexpected first byte of command: %x\n", command[0]); return -EINVAL; } retry: ret = usb_control_msg(gspca_dev->dev, pipe, command[1], requesttype, command[2] | (command[3] << 8), command[4] | (command[5] << 8), gspca_dev->usb_buf, databytes, 1000); if (ret < 0) pr_err("usb_control_msg %02x, error %d\n", command[1], ret); if (ret == -EPIPE && retries > 0) { retries--; goto retry; } return (ret < 0) ? ret : 0; } /* send an arbitrary command to the camera */ static int do_command(struct gspca_dev *gspca_dev, u16 command, u8 a, u8 b, u8 c, u8 d) { struct sd *sd = (struct sd *) gspca_dev; int ret, datasize; u8 cmd[8]; switch (command) { case CPIA_COMMAND_GetCPIAVersion: case CPIA_COMMAND_GetPnPID: case CPIA_COMMAND_GetCameraStatus: case CPIA_COMMAND_GetVPVersion: case CPIA_COMMAND_GetColourParams: case CPIA_COMMAND_GetColourBalance: case CPIA_COMMAND_GetExposure: datasize = 8; break; case CPIA_COMMAND_ReadMCPorts: case CPIA_COMMAND_ReadVCRegs: datasize = 4; break; default: datasize = 0; break; } cmd[0] = command >> 8; cmd[1] = command & 0xff; cmd[2] = a; cmd[3] = b; cmd[4] = c; cmd[5] = d; cmd[6] = datasize; cmd[7] = 0; ret = cpia_usb_transferCmd(gspca_dev, cmd); if (ret) return ret; switch (command) { case CPIA_COMMAND_GetCPIAVersion: sd->params.version.firmwareVersion = gspca_dev->usb_buf[0]; sd->params.version.firmwareRevision = gspca_dev->usb_buf[1]; sd->params.version.vcVersion = gspca_dev->usb_buf[2]; sd->params.version.vcRevision = gspca_dev->usb_buf[3]; break; case CPIA_COMMAND_GetPnPID: sd->params.pnpID.vendor = gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8); sd->params.pnpID.product = gspca_dev->usb_buf[2] | (gspca_dev->usb_buf[3] << 8); sd->params.pnpID.deviceRevision = gspca_dev->usb_buf[4] | (gspca_dev->usb_buf[5] << 8); break; case CPIA_COMMAND_GetCameraStatus: sd->params.status.systemState = gspca_dev->usb_buf[0]; sd->params.status.grabState = gspca_dev->usb_buf[1]; sd->params.status.streamState = gspca_dev->usb_buf[2]; sd->params.status.fatalError = gspca_dev->usb_buf[3]; sd->params.status.cmdError = gspca_dev->usb_buf[4]; sd->params.status.debugFlags = gspca_dev->usb_buf[5]; sd->params.status.vpStatus = gspca_dev->usb_buf[6]; sd->params.status.errorCode = gspca_dev->usb_buf[7]; break; case CPIA_COMMAND_GetVPVersion: sd->params.vpVersion.vpVersion = gspca_dev->usb_buf[0]; sd->params.vpVersion.vpRevision = gspca_dev->usb_buf[1]; sd->params.vpVersion.cameraHeadID = gspca_dev->usb_buf[2] | (gspca_dev->usb_buf[3] << 8); break; case CPIA_COMMAND_GetColourParams: sd->params.colourParams.brightness = gspca_dev->usb_buf[0]; sd->params.colourParams.contrast = gspca_dev->usb_buf[1]; sd->params.colourParams.saturation = gspca_dev->usb_buf[2]; break; case CPIA_COMMAND_GetColourBalance: sd->params.colourBalance.redGain = gspca_dev->usb_buf[0]; sd->params.colourBalance.greenGain = gspca_dev->usb_buf[1]; sd->params.colourBalance.blueGain = gspca_dev->usb_buf[2]; break; case CPIA_COMMAND_GetExposure: sd->params.exposure.gain = gspca_dev->usb_buf[0]; sd->params.exposure.fineExp = gspca_dev->usb_buf[1]; sd->params.exposure.coarseExpLo = gspca_dev->usb_buf[2]; sd->params.exposure.coarseExpHi = gspca_dev->usb_buf[3]; sd->params.exposure.redComp = gspca_dev->usb_buf[4]; sd->params.exposure.green1Comp = gspca_dev->usb_buf[5]; sd->params.exposure.green2Comp = gspca_dev->usb_buf[6]; sd->params.exposure.blueComp = gspca_dev->usb_buf[7]; break; case CPIA_COMMAND_ReadMCPorts: /* test button press */ a = ((gspca_dev->usb_buf[1] & 0x02) == 0); if (a != sd->params.qx3.button) { #if IS_ENABLED(CONFIG_INPUT) input_report_key(gspca_dev->input_dev, KEY_CAMERA, a); input_sync(gspca_dev->input_dev); #endif sd->params.qx3.button = a; } if (sd->params.qx3.button) { /* button pressed - unlock the latch */ ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 3, 0xdf, 0xdf, 0); if (ret) return ret; ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 3, 0xff, 0xff, 0); if (ret) return ret; } /* test whether microscope is cradled */ sd->params.qx3.cradled = ((gspca_dev->usb_buf[2] & 0x40) == 0); break; } return 0; } /* send a command to the camera with an additional data transaction */ static int do_command_extended(struct gspca_dev *gspca_dev, u16 command, u8 a, u8 b, u8 c, u8 d, u8 e, u8 f, u8 g, u8 h, u8 i, u8 j, u8 k, u8 l) { u8 cmd[8]; cmd[0] = command >> 8; cmd[1] = command & 0xff; cmd[2] = a; cmd[3] = b; cmd[4] = c; cmd[5] = d; cmd[6] = 8; cmd[7] = 0; gspca_dev->usb_buf[0] = e; gspca_dev->usb_buf[1] = f; gspca_dev->usb_buf[2] = g; gspca_dev->usb_buf[3] = h; gspca_dev->usb_buf[4] = i; gspca_dev->usb_buf[5] = j; gspca_dev->usb_buf[6] = k; gspca_dev->usb_buf[7] = l; return cpia_usb_transferCmd(gspca_dev, cmd); } /* find_over_exposure * Finds a suitable value of OverExposure for use with SetFlickerCtrl * Some calculation is required because this value changes with the brightness * set with SetColourParameters * * Parameters: Brightness - last brightness value set with SetColourParameters * * Returns: OverExposure value to use with SetFlickerCtrl */ #define FLICKER_MAX_EXPOSURE 250 #define FLICKER_ALLOWABLE_OVER_EXPOSURE 146 #define FLICKER_BRIGHTNESS_CONSTANT 59 static int find_over_exposure(int brightness) { int MaxAllowableOverExposure, OverExposure; MaxAllowableOverExposure = FLICKER_MAX_EXPOSURE - brightness - FLICKER_BRIGHTNESS_CONSTANT; OverExposure = min(MaxAllowableOverExposure, FLICKER_ALLOWABLE_OVER_EXPOSURE); return OverExposure; } #undef FLICKER_MAX_EXPOSURE #undef FLICKER_ALLOWABLE_OVER_EXPOSURE #undef FLICKER_BRIGHTNESS_CONSTANT /* initialise cam_data structure */ static void reset_camera_params(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct cam_params *params = &sd->params; /* The following parameter values are the defaults from * "Software Developer's Guide for CPiA Cameras". Any changes * to the defaults are noted in comments. */ params->colourParams.brightness = BRIGHTNESS_DEF; params->colourParams.contrast = CONTRAST_DEF; params->colourParams.saturation = SATURATION_DEF; params->exposure.gainMode = 4; params->exposure.expMode = 2; /* AEC */ params->exposure.compMode = 1; params->exposure.centreWeight = 1; params->exposure.gain = 0; params->exposure.fineExp = 0; params->exposure.coarseExpLo = 185; params->exposure.coarseExpHi = 0; params->exposure.redComp = COMP_RED; params->exposure.green1Comp = COMP_GREEN1; params->exposure.green2Comp = COMP_GREEN2; params->exposure.blueComp = COMP_BLUE; params->colourBalance.balanceMode = 2; /* ACB */ params->colourBalance.redGain = 32; params->colourBalance.greenGain = 6; params->colourBalance.blueGain = 92; params->apcor.gain1 = 0x18; params->apcor.gain2 = 0x16; params->apcor.gain4 = 0x24; params->apcor.gain8 = 0x34; params->vlOffset.gain1 = 20; params->vlOffset.gain2 = 24; params->vlOffset.gain4 = 26; params->vlOffset.gain8 = 26; params->compressionParams.hysteresis = 3; params->compressionParams.threshMax = 11; params->compressionParams.smallStep = 1; params->compressionParams.largeStep = 3; params->compressionParams.decimationHysteresis = 2; params->compressionParams.frDiffStepThresh = 5; params->compressionParams.qDiffStepThresh = 3; params->compressionParams.decimationThreshMod = 2; /* End of default values from Software Developer's Guide */ /* Set Sensor FPS to 15fps. This seems better than 30fps * for indoor lighting. */ params->sensorFps.divisor = 1; params->sensorFps.baserate = 1; params->flickerControl.flickerMode = 0; params->flickerControl.disabled = 1; params->flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [params->sensorFps.baserate] [params->sensorFps.divisor]; params->flickerControl.allowableOverExposure = find_over_exposure(params->colourParams.brightness); params->yuvThreshold.yThreshold = 6; /* From windows driver */ params->yuvThreshold.uvThreshold = 6; /* From windows driver */ params->format.subSample = SUBSAMPLE_420; params->format.yuvOrder = YUVORDER_YUYV; params->compression.mode = CPIA_COMPRESSION_AUTO; params->compression.decimation = NO_DECIMATION; params->compressionTarget.frTargeting = COMP_TARGET_DEF; params->compressionTarget.targetFR = 15; /* From windows driver */ params->compressionTarget.targetQ = 5; /* From windows driver */ params->qx3.qx3_detected = 0; params->qx3.toplight = 0; params->qx3.bottomlight = 0; params->qx3.button = 0; params->qx3.cradled = 0; } static void printstatus(struct gspca_dev *gspca_dev, struct cam_params *params) { gspca_dbg(gspca_dev, D_PROBE, "status: %02x %02x %02x %02x %02x %02x %02x %02x\n", params->status.systemState, params->status.grabState, params->status.streamState, params->status.fatalError, params->status.cmdError, params->status.debugFlags, params->status.vpStatus, params->status.errorCode); } static int goto_low_power(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command(gspca_dev, CPIA_COMMAND_GotoLoPower, 0, 0, 0, 0); if (ret) return ret; ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); if (ret) return ret; if (sd->params.status.systemState != LO_POWER_STATE) { if (sd->params.status.systemState != WARM_BOOT_STATE) { gspca_err(gspca_dev, "unexpected state after lo power cmd: %02x\n", sd->params.status.systemState); printstatus(gspca_dev, &sd->params); } return -EIO; } gspca_dbg(gspca_dev, D_CONF, "camera now in LOW power state\n"); return 0; } static int goto_high_power(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command(gspca_dev, CPIA_COMMAND_GotoHiPower, 0, 0, 0, 0); if (ret) return ret; msleep_interruptible(40); /* windows driver does it too */ if (signal_pending(current)) return -EINTR; ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); if (ret) return ret; if (sd->params.status.systemState != HI_POWER_STATE) { gspca_err(gspca_dev, "unexpected state after hi power cmd: %02x\n", sd->params.status.systemState); printstatus(gspca_dev, &sd->params); return -EIO; } gspca_dbg(gspca_dev, D_CONF, "camera now in HIGH power state\n"); return 0; } static int get_version_information(struct gspca_dev *gspca_dev) { int ret; /* GetCPIAVersion */ ret = do_command(gspca_dev, CPIA_COMMAND_GetCPIAVersion, 0, 0, 0, 0); if (ret) return ret; /* GetPnPID */ return do_command(gspca_dev, CPIA_COMMAND_GetPnPID, 0, 0, 0, 0); } static int save_camera_state(struct gspca_dev *gspca_dev) { int ret; ret = do_command(gspca_dev, CPIA_COMMAND_GetColourBalance, 0, 0, 0, 0); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0); } static int command_setformat(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command(gspca_dev, CPIA_COMMAND_SetFormat, sd->params.format.videoSize, sd->params.format.subSample, sd->params.format.yuvOrder, 0); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_SetROI, sd->params.roi.colStart, sd->params.roi.colEnd, sd->params.roi.rowStart, sd->params.roi.rowEnd); } static int command_setcolourparams(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetColourParams, sd->params.colourParams.brightness, sd->params.colourParams.contrast, sd->params.colourParams.saturation, 0); } static int command_setapcor(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetApcor, sd->params.apcor.gain1, sd->params.apcor.gain2, sd->params.apcor.gain4, sd->params.apcor.gain8); } static int command_setvloffset(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetVLOffset, sd->params.vlOffset.gain1, sd->params.vlOffset.gain2, sd->params.vlOffset.gain4, sd->params.vlOffset.gain8); } static int command_setexposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; ret = do_command_extended(gspca_dev, CPIA_COMMAND_SetExposure, sd->params.exposure.gainMode, 1, sd->params.exposure.compMode, sd->params.exposure.centreWeight, sd->params.exposure.gain, sd->params.exposure.fineExp, sd->params.exposure.coarseExpLo, sd->params.exposure.coarseExpHi, sd->params.exposure.redComp, sd->params.exposure.green1Comp, sd->params.exposure.green2Comp, sd->params.exposure.blueComp); if (ret) return ret; if (sd->params.exposure.expMode != 1) { ret = do_command_extended(gspca_dev, CPIA_COMMAND_SetExposure, 0, sd->params.exposure.expMode, 0, 0, sd->params.exposure.gain, sd->params.exposure.fineExp, sd->params.exposure.coarseExpLo, sd->params.exposure.coarseExpHi, 0, 0, 0, 0); } return ret; } static int command_setcolourbalance(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->params.colourBalance.balanceMode == 1) { int ret; ret = do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 1, sd->params.colourBalance.redGain, sd->params.colourBalance.greenGain, sd->params.colourBalance.blueGain); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 3, 0, 0, 0); } if (sd->params.colourBalance.balanceMode == 2) { return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 2, 0, 0, 0); } if (sd->params.colourBalance.balanceMode == 3) { return do_command(gspca_dev, CPIA_COMMAND_SetColourBalance, 3, 0, 0, 0); } return -EINVAL; } static int command_setcompressiontarget(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetCompressionTarget, sd->params.compressionTarget.frTargeting, sd->params.compressionTarget.targetFR, sd->params.compressionTarget.targetQ, 0); } static int command_setyuvtresh(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetYUVThresh, sd->params.yuvThreshold.yThreshold, sd->params.yuvThreshold.uvThreshold, 0, 0); } static int command_setcompressionparams(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command_extended(gspca_dev, CPIA_COMMAND_SetCompressionParams, 0, 0, 0, 0, sd->params.compressionParams.hysteresis, sd->params.compressionParams.threshMax, sd->params.compressionParams.smallStep, sd->params.compressionParams.largeStep, sd->params.compressionParams.decimationHysteresis, sd->params.compressionParams.frDiffStepThresh, sd->params.compressionParams.qDiffStepThresh, sd->params.compressionParams.decimationThreshMod); } static int command_setcompression(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetCompression, sd->params.compression.mode, sd->params.compression.decimation, 0, 0); } static int command_setsensorfps(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetSensorFPS, sd->params.sensorFps.divisor, sd->params.sensorFps.baserate, 0, 0); } static int command_setflickerctrl(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetFlickerCtrl, sd->params.flickerControl.flickerMode, sd->params.flickerControl.coarseJump, sd->params.flickerControl.allowableOverExposure, 0); } static int command_setecptiming(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_SetECPTiming, sd->params.ecpTiming, 0, 0, 0); } static int command_pause(struct gspca_dev *gspca_dev) { return do_command(gspca_dev, CPIA_COMMAND_EndStreamCap, 0, 0, 0, 0); } static int command_resume(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; return do_command(gspca_dev, CPIA_COMMAND_InitStreamCap, 0, sd->params.streamStartLine, 0, 0); } static int command_setlights(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret, p1, p2; p1 = (sd->params.qx3.bottomlight == 0) << 1; p2 = (sd->params.qx3.toplight == 0) << 3; ret = do_command(gspca_dev, CPIA_COMMAND_WriteVCReg, 0x90, 0x8f, 0x50, 0); if (ret) return ret; return do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 2, 0, p1 | p2 | 0xe0, 0); } static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply) { /* Everything in here is from the Windows driver */ /* define for compgain calculation */ #if 0 #define COMPGAIN(base, curexp, newexp) \ (u8) ((((float) base - 128.0) * ((float) curexp / (float) newexp)) + 128.5) #define EXP_FROM_COMP(basecomp, curcomp, curexp) \ (u16)((float)curexp * (float)(u8)(curcomp + 128) / \ (float)(u8)(basecomp - 128)) #else /* equivalent functions without floating point math */ #define COMPGAIN(base, curexp, newexp) \ (u8)(128 + (((u32)(2*(base-128)*curexp + newexp)) / (2 * newexp))) #define EXP_FROM_COMP(basecomp, curcomp, curexp) \ (u16)(((u32)(curexp * (u8)(curcomp + 128)) / (u8)(basecomp - 128))) #endif struct sd *sd = (struct sd *) gspca_dev; int currentexp = sd->params.exposure.coarseExpLo + sd->params.exposure.coarseExpHi * 256; int ret, startexp; if (on) { int cj = sd->params.flickerControl.coarseJump; sd->params.flickerControl.flickerMode = 1; sd->params.flickerControl.disabled = 0; if (sd->params.exposure.expMode != 2) { sd->params.exposure.expMode = 2; sd->exposure_status = EXPOSURE_NORMAL; } if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp)) return -EINVAL; currentexp = currentexp << sd->params.exposure.gain; sd->params.exposure.gain = 0; /* round down current exposure to nearest value */ startexp = (currentexp + ROUND_UP_EXP_FOR_FLICKER) / cj; if (startexp < 1) startexp = 1; startexp = (startexp * cj) - 1; if (FIRMWARE_VERSION(1, 2)) while (startexp > MAX_EXP_102) startexp -= cj; else while (startexp > MAX_EXP) startexp -= cj; sd->params.exposure.coarseExpLo = startexp & 0xff; sd->params.exposure.coarseExpHi = startexp >> 8; if (currentexp > startexp) { if (currentexp > (2 * startexp)) currentexp = 2 * startexp; sd->params.exposure.redComp = COMPGAIN(COMP_RED, currentexp, startexp); sd->params.exposure.green1Comp = COMPGAIN(COMP_GREEN1, currentexp, startexp); sd->params.exposure.green2Comp = COMPGAIN(COMP_GREEN2, currentexp, startexp); sd->params.exposure.blueComp = COMPGAIN(COMP_BLUE, currentexp, startexp); } else { sd->params.exposure.redComp = COMP_RED; sd->params.exposure.green1Comp = COMP_GREEN1; sd->params.exposure.green2Comp = COMP_GREEN2; sd->params.exposure.blueComp = COMP_BLUE; } if (FIRMWARE_VERSION(1, 2)) sd->params.exposure.compMode = 0; else sd->params.exposure.compMode = 1; sd->params.apcor.gain1 = 0x18; sd->params.apcor.gain2 = 0x18; sd->params.apcor.gain4 = 0x16; sd->params.apcor.gain8 = 0x14; } else { sd->params.flickerControl.flickerMode = 0; sd->params.flickerControl.disabled = 1; /* Average equivalent coarse for each comp channel */ startexp = EXP_FROM_COMP(COMP_RED, sd->params.exposure.redComp, currentexp); startexp += EXP_FROM_COMP(COMP_GREEN1, sd->params.exposure.green1Comp, currentexp); startexp += EXP_FROM_COMP(COMP_GREEN2, sd->params.exposure.green2Comp, currentexp); startexp += EXP_FROM_COMP(COMP_BLUE, sd->params.exposure.blueComp, currentexp); startexp = startexp >> 2; while (startexp > MAX_EXP && sd->params.exposure.gain < sd->params.exposure.gainMode - 1) { startexp = startexp >> 1; ++sd->params.exposure.gain; } if (FIRMWARE_VERSION(1, 2) && startexp > MAX_EXP_102) startexp = MAX_EXP_102; if (startexp > MAX_EXP) startexp = MAX_EXP; sd->params.exposure.coarseExpLo = startexp & 0xff; sd->params.exposure.coarseExpHi = startexp >> 8; sd->params.exposure.redComp = COMP_RED; sd->params.exposure.green1Comp = COMP_GREEN1; sd->params.exposure.green2Comp = COMP_GREEN2; sd->params.exposure.blueComp = COMP_BLUE; sd->params.exposure.compMode = 1; sd->params.apcor.gain1 = 0x18; sd->params.apcor.gain2 = 0x16; sd->params.apcor.gain4 = 0x24; sd->params.apcor.gain8 = 0x34; } sd->params.vlOffset.gain1 = 20; sd->params.vlOffset.gain2 = 24; sd->params.vlOffset.gain4 = 26; sd->params.vlOffset.gain8 = 26; if (apply) { ret = command_setexposure(gspca_dev); if (ret) return ret; ret = command_setapcor(gspca_dev); if (ret) return ret; ret = command_setvloffset(gspca_dev); if (ret) return ret; ret = command_setflickerctrl(gspca_dev); if (ret) return ret; } return 0; #undef EXP_FROM_COMP #undef COMPGAIN } /* monitor the exposure and adjust the sensor frame rate if needed */ static void monitor_exposure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 exp_acc, bcomp, cmd[8]; int ret, light_exp, dark_exp, very_dark_exp; int old_exposure, new_exposure, framerate; int setfps = 0, setexp = 0, setflicker = 0; /* get necessary stats and register settings from camera */ /* do_command can't handle this, so do it ourselves */ cmd[0] = CPIA_COMMAND_ReadVPRegs >> 8; cmd[1] = CPIA_COMMAND_ReadVPRegs & 0xff; cmd[2] = 30; cmd[3] = 4; cmd[4] = 9; cmd[5] = 8; cmd[6] = 8; cmd[7] = 0; ret = cpia_usb_transferCmd(gspca_dev, cmd); if (ret) { pr_err("ReadVPRegs(30,4,9,8) - failed: %d\n", ret); return; } exp_acc = gspca_dev->usb_buf[0]; bcomp = gspca_dev->usb_buf[1]; light_exp = sd->params.colourParams.brightness + TC - 50 + EXP_ACC_LIGHT; if (light_exp > 255) light_exp = 255; dark_exp = sd->params.colourParams.brightness + TC - 50 - EXP_ACC_DARK; if (dark_exp < 0) dark_exp = 0; very_dark_exp = dark_exp / 2; old_exposure = sd->params.exposure.coarseExpHi * 256 + sd->params.exposure.coarseExpLo; if (!sd->params.flickerControl.disabled) { /* Flicker control on */ int max_comp = FIRMWARE_VERSION(1, 2) ? MAX_COMP : HIGH_COMP_102; bcomp += 128; /* decode */ if (bcomp >= max_comp && exp_acc < dark_exp) { /* dark */ if (exp_acc < very_dark_exp) { /* very dark */ if (sd->exposure_status == EXPOSURE_VERY_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_DARK; sd->exposure_count = 1; } } else { /* just dark */ if (sd->exposure_status == EXPOSURE_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_DARK; sd->exposure_count = 1; } } } else if (old_exposure <= LOW_EXP || exp_acc > light_exp) { /* light */ if (old_exposure <= VERY_LOW_EXP) { /* very light */ if (sd->exposure_status == EXPOSURE_VERY_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_LIGHT; sd->exposure_count = 1; } } else { /* just light */ if (sd->exposure_status == EXPOSURE_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_LIGHT; sd->exposure_count = 1; } } } else { /* not dark or light */ sd->exposure_status = EXPOSURE_NORMAL; } } else { /* Flicker control off */ if (old_exposure >= MAX_EXP && exp_acc < dark_exp) { /* dark */ if (exp_acc < very_dark_exp) { /* very dark */ if (sd->exposure_status == EXPOSURE_VERY_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_DARK; sd->exposure_count = 1; } } else { /* just dark */ if (sd->exposure_status == EXPOSURE_DARK) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_DARK; sd->exposure_count = 1; } } } else if (old_exposure <= LOW_EXP || exp_acc > light_exp) { /* light */ if (old_exposure <= VERY_LOW_EXP) { /* very light */ if (sd->exposure_status == EXPOSURE_VERY_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_VERY_LIGHT; sd->exposure_count = 1; } } else { /* just light */ if (sd->exposure_status == EXPOSURE_LIGHT) ++sd->exposure_count; else { sd->exposure_status = EXPOSURE_LIGHT; sd->exposure_count = 1; } } } else { /* not dark or light */ sd->exposure_status = EXPOSURE_NORMAL; } } framerate = atomic_read(&sd->fps); if (framerate > 30 || framerate < 1) framerate = 1; if (!sd->params.flickerControl.disabled) { /* Flicker control on */ if ((sd->exposure_status == EXPOSURE_VERY_DARK || sd->exposure_status == EXPOSURE_DARK) && sd->exposure_count >= DARK_TIME * framerate && sd->params.sensorFps.divisor < 2) { /* dark for too long */ ++sd->params.sensorFps.divisor; setfps = 1; sd->params.flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [sd->params.sensorFps.baserate] [sd->params.sensorFps.divisor]; setflicker = 1; new_exposure = sd->params.flickerControl.coarseJump-1; while (new_exposure < old_exposure / 2) new_exposure += sd->params.flickerControl.coarseJump; sd->params.exposure.coarseExpLo = new_exposure & 0xff; sd->params.exposure.coarseExpHi = new_exposure >> 8; setexp = 1; sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically decreasing sensor_fps\n"); } else if ((sd->exposure_status == EXPOSURE_VERY_LIGHT || sd->exposure_status == EXPOSURE_LIGHT) && sd->exposure_count >= LIGHT_TIME * framerate && sd->params.sensorFps.divisor > 0) { /* light for too long */ int max_exp = FIRMWARE_VERSION(1, 2) ? MAX_EXP_102 : MAX_EXP; --sd->params.sensorFps.divisor; setfps = 1; sd->params.flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [sd->params.sensorFps.baserate] [sd->params.sensorFps.divisor]; setflicker = 1; new_exposure = sd->params.flickerControl.coarseJump-1; while (new_exposure < 2 * old_exposure && new_exposure + sd->params.flickerControl.coarseJump < max_exp) new_exposure += sd->params.flickerControl.coarseJump; sd->params.exposure.coarseExpLo = new_exposure & 0xff; sd->params.exposure.coarseExpHi = new_exposure >> 8; setexp = 1; sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically increasing sensor_fps\n"); } } else { /* Flicker control off */ if ((sd->exposure_status == EXPOSURE_VERY_DARK || sd->exposure_status == EXPOSURE_DARK) && sd->exposure_count >= DARK_TIME * framerate && sd->params.sensorFps.divisor < 2) { /* dark for too long */ ++sd->params.sensorFps.divisor; setfps = 1; if (sd->params.exposure.gain > 0) { --sd->params.exposure.gain; setexp = 1; } sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically decreasing sensor_fps\n"); } else if ((sd->exposure_status == EXPOSURE_VERY_LIGHT || sd->exposure_status == EXPOSURE_LIGHT) && sd->exposure_count >= LIGHT_TIME * framerate && sd->params.sensorFps.divisor > 0) { /* light for too long */ --sd->params.sensorFps.divisor; setfps = 1; if (sd->params.exposure.gain < sd->params.exposure.gainMode - 1) { ++sd->params.exposure.gain; setexp = 1; } sd->exposure_status = EXPOSURE_NORMAL; gspca_dbg(gspca_dev, D_CONF, "Automatically increasing sensor_fps\n"); } } if (setexp) command_setexposure(gspca_dev); if (setfps) command_setsensorfps(gspca_dev); if (setflicker) command_setflickerctrl(gspca_dev); } /*-----------------------------------------------------------------*/ /* if flicker is switched off, this function switches it back on.It checks, however, that conditions are suitable before restarting it. This should only be called for firmware version 1.2. It also adjust the colour balance when an exposure step is detected - as long as flicker is running */ static void restart_flicker(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int cam_exposure, old_exp; if (!FIRMWARE_VERSION(1, 2)) return; cam_exposure = atomic_read(&sd->cam_exposure); if (sd->params.flickerControl.flickerMode == 0 || cam_exposure == 0) return; old_exp = sd->params.exposure.coarseExpLo + sd->params.exposure.coarseExpHi*256; /* see how far away camera exposure is from a valid flicker exposure value */ cam_exposure %= sd->params.flickerControl.coarseJump; if (!sd->params.flickerControl.disabled && cam_exposure <= sd->params.flickerControl.coarseJump - 3) { /* Flicker control auto-disabled */ sd->params.flickerControl.disabled = 1; } if (sd->params.flickerControl.disabled && old_exp > sd->params.flickerControl.coarseJump + ROUND_UP_EXP_FOR_FLICKER) { /* exposure is now high enough to switch flicker control back on */ set_flicker(gspca_dev, 1, 1); } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ; reset_camera_params(gspca_dev); gspca_dbg(gspca_dev, D_PROBE, "cpia CPiA camera detected (vid/pid 0x%04X:0x%04X)\n", id->idVendor, id->idProduct); cam = &gspca_dev->cam; cam->cam_mode = mode; cam->nmodes = ARRAY_SIZE(mode); goto_low_power(gspca_dev); /* Check the firmware version. */ sd->params.version.firmwareVersion = 0; get_version_information(gspca_dev); if (sd->params.version.firmwareVersion != 1) { gspca_err(gspca_dev, "only firmware version 1 is supported (got: %d)\n", sd->params.version.firmwareVersion); return -ENODEV; } /* A bug in firmware 1-02 limits gainMode to 2 */ if (sd->params.version.firmwareRevision <= 2 && sd->params.exposure.gainMode > 2) { sd->params.exposure.gainMode = 2; } /* set QX3 detected flag */ sd->params.qx3.qx3_detected = (sd->params.pnpID.vendor == 0x0813 && sd->params.pnpID.product == 0x0001); return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int priv, ret; /* Start the camera in low power mode */ if (goto_low_power(gspca_dev)) { if (sd->params.status.systemState != WARM_BOOT_STATE) { gspca_err(gspca_dev, "unexpected systemstate: %02x\n", sd->params.status.systemState); printstatus(gspca_dev, &sd->params); return -ENODEV; } /* FIXME: this is just dirty trial and error */ ret = goto_high_power(gspca_dev); if (ret) return ret; ret = do_command(gspca_dev, CPIA_COMMAND_DiscardFrame, 0, 0, 0, 0); if (ret) return ret; ret = goto_low_power(gspca_dev); if (ret) return ret; } /* procedure described in developer's guide p3-28 */ /* Check the firmware version. */ sd->params.version.firmwareVersion = 0; get_version_information(gspca_dev); /* The fatal error checking should be done after * the camera powers up (developer's guide p 3-38) */ /* Set streamState before transition to high power to avoid bug * in firmware 1-02 */ ret = do_command(gspca_dev, CPIA_COMMAND_ModifyCameraStatus, STREAMSTATE, 0, STREAM_NOT_READY, 0); if (ret) return ret; /* GotoHiPower */ ret = goto_high_power(gspca_dev); if (ret) return ret; /* Check the camera status */ ret = do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); if (ret) return ret; if (sd->params.status.fatalError) { gspca_err(gspca_dev, "fatal_error: %04x, vp_status: %04x\n", sd->params.status.fatalError, sd->params.status.vpStatus); return -EIO; } /* VPVersion can't be retrieved before the camera is in HiPower, * so get it here instead of in get_version_information. */ ret = do_command(gspca_dev, CPIA_COMMAND_GetVPVersion, 0, 0, 0, 0); if (ret) return ret; /* Determine video mode settings */ sd->params.streamStartLine = 120; priv = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; if (priv & 0x01) { /* crop */ sd->params.roi.colStart = 2; sd->params.roi.rowStart = 6; } else { sd->params.roi.colStart = 0; sd->params.roi.rowStart = 0; } if (priv & 0x02) { /* quarter */ sd->params.format.videoSize = VIDEOSIZE_QCIF; sd->params.roi.colStart /= 2; sd->params.roi.rowStart /= 2; sd->params.streamStartLine /= 2; } else sd->params.format.videoSize = VIDEOSIZE_CIF; sd->params.roi.colEnd = sd->params.roi.colStart + (gspca_dev->pixfmt.width >> 3); sd->params.roi.rowEnd = sd->params.roi.rowStart + (gspca_dev->pixfmt.height >> 2); /* And now set the camera to a known state */ ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode, CPIA_GRAB_CONTINEOUS, 0, 0, 0); if (ret) return ret; /* We start with compression disabled, as we need one uncompressed frame to handle later compressed frames */ ret = do_command(gspca_dev, CPIA_COMMAND_SetCompression, CPIA_COMPRESSION_NONE, NO_DECIMATION, 0, 0); if (ret) return ret; ret = command_setcompressiontarget(gspca_dev); if (ret) return ret; ret = command_setcolourparams(gspca_dev); if (ret) return ret; ret = command_setformat(gspca_dev); if (ret) return ret; ret = command_setyuvtresh(gspca_dev); if (ret) return ret; ret = command_setecptiming(gspca_dev); if (ret) return ret; ret = command_setcompressionparams(gspca_dev); if (ret) return ret; ret = command_setexposure(gspca_dev); if (ret) return ret; ret = command_setcolourbalance(gspca_dev); if (ret) return ret; ret = command_setsensorfps(gspca_dev); if (ret) return ret; ret = command_setapcor(gspca_dev); if (ret) return ret; ret = command_setflickerctrl(gspca_dev); if (ret) return ret; ret = command_setvloffset(gspca_dev); if (ret) return ret; /* Start stream */ ret = command_resume(gspca_dev); if (ret) return ret; /* Wait 6 frames before turning compression on for the sensor to get all settings and AEC/ACB to settle */ sd->first_frame = 6; sd->exposure_status = EXPOSURE_NORMAL; sd->exposure_count = 0; atomic_set(&sd->cam_exposure, 0); atomic_set(&sd->fps, 0); return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd __maybe_unused = (struct sd *) gspca_dev; command_pause(gspca_dev); /* save camera state for later open (developers guide ch 3.5.3) */ save_camera_state(gspca_dev); /* GotoLoPower */ goto_low_power(gspca_dev); /* Update the camera status */ do_command(gspca_dev, CPIA_COMMAND_GetCameraStatus, 0, 0, 0, 0); #if IS_ENABLED(CONFIG_INPUT) /* If the last button state is pressed, release it now! */ if (sd->params.qx3.button) { /* The camera latch will hold the pressed state until we reset the latch, so we do not reset sd->params.qx3.button now, to avoid a false keypress being reported the next sd_start */ input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); } #endif } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; /* Start / Stop the camera to make sure we are talking to a supported camera, and to get some information from it to print. */ ret = sd_start(gspca_dev); if (ret) return ret; /* Ensure the QX3 illuminators' states are restored upon resume, or disable the illuminator controls, if this isn't a QX3 */ if (sd->params.qx3.qx3_detected) command_setlights(gspca_dev); sd_stopN(gspca_dev); gspca_dbg(gspca_dev, D_PROBE, "CPIA Version: %d.%02d (%d.%d)\n", sd->params.version.firmwareVersion, sd->params.version.firmwareRevision, sd->params.version.vcVersion, sd->params.version.vcRevision); gspca_dbg(gspca_dev, D_PROBE, "CPIA PnP-ID: %04x:%04x:%04x", sd->params.pnpID.vendor, sd->params.pnpID.product, sd->params.pnpID.deviceRevision); gspca_dbg(gspca_dev, D_PROBE, "VP-Version: %d.%d %04x", sd->params.vpVersion.vpVersion, sd->params.vpVersion.vpRevision, sd->params.vpVersion.cameraHeadID); return 0; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; /* Check for SOF */ if (len >= 64 && data[0] == MAGIC_0 && data[1] == MAGIC_1 && data[16] == sd->params.format.videoSize && data[17] == sd->params.format.subSample && data[18] == sd->params.format.yuvOrder && data[24] == sd->params.roi.colStart && data[25] == sd->params.roi.colEnd && data[26] == sd->params.roi.rowStart && data[27] == sd->params.roi.rowEnd) { u8 *image; atomic_set(&sd->cam_exposure, data[39] * 2); atomic_set(&sd->fps, data[41]); /* Check for proper EOF for last frame */ image = gspca_dev->image; if (image != NULL && gspca_dev->image_len > 4 && image[gspca_dev->image_len - 4] == 0xff && image[gspca_dev->image_len - 3] == 0xff && image[gspca_dev->image_len - 2] == 0xff && image[gspca_dev->image_len - 1] == 0xff) gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); return; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* Set the normal compression settings once we have captured a few uncompressed frames (and AEC has hopefully settled) */ if (sd->first_frame) { sd->first_frame--; if (sd->first_frame == 0) command_setcompression(gspca_dev); } /* Switch flicker control back on if it got turned off */ restart_flicker(gspca_dev); /* If AEC is enabled, monitor the exposure and adjust the sensor frame rate if needed */ if (sd->params.exposure.expMode == 2) monitor_exposure(gspca_dev); /* Update our knowledge of the camera state */ do_command(gspca_dev, CPIA_COMMAND_GetExposure, 0, 0, 0, 0); do_command(gspca_dev, CPIA_COMMAND_ReadMCPorts, 0, 0, 0, 0); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming && ctrl->id != V4L2_CID_POWER_LINE_FREQUENCY) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: sd->params.colourParams.brightness = ctrl->val; sd->params.flickerControl.allowableOverExposure = find_over_exposure(sd->params.colourParams.brightness); gspca_dev->usb_err = command_setcolourparams(gspca_dev); if (!gspca_dev->usb_err) gspca_dev->usb_err = command_setflickerctrl(gspca_dev); break; case V4L2_CID_CONTRAST: sd->params.colourParams.contrast = ctrl->val; gspca_dev->usb_err = command_setcolourparams(gspca_dev); break; case V4L2_CID_SATURATION: sd->params.colourParams.saturation = ctrl->val; gspca_dev->usb_err = command_setcolourparams(gspca_dev); break; case V4L2_CID_POWER_LINE_FREQUENCY: sd->mainsFreq = ctrl->val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ; sd->params.flickerControl.coarseJump = flicker_jumps[sd->mainsFreq] [sd->params.sensorFps.baserate] [sd->params.sensorFps.divisor]; gspca_dev->usb_err = set_flicker(gspca_dev, ctrl->val != V4L2_CID_POWER_LINE_FREQUENCY_DISABLED, gspca_dev->streaming); break; case V4L2_CID_ILLUMINATORS_1: sd->params.qx3.bottomlight = ctrl->val; gspca_dev->usb_err = command_setlights(gspca_dev); break; case V4L2_CID_ILLUMINATORS_2: sd->params.qx3.toplight = ctrl->val; gspca_dev->usb_err = command_setlights(gspca_dev); break; case CPIA1_CID_COMP_TARGET: sd->params.compressionTarget.frTargeting = ctrl->val; gspca_dev->usb_err = command_setcompressiontarget(gspca_dev); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; static const char * const comp_target_menu[] = { "Quality", "Framerate", NULL }; static const struct v4l2_ctrl_config comp_target = { .ops = &sd_ctrl_ops, .id = CPIA1_CID_COMP_TARGET, .type = V4L2_CTRL_TYPE_MENU, .name = "Compression Target", .qmenu = comp_target_menu, .max = 1, .def = COMP_TARGET_DEF, }; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 7); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 100, 1, BRIGHTNESS_DEF); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 96, 8, CONTRAST_DEF); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, 100, 1, SATURATION_DEF); sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, FREQ_DEF); if (sd->params.qx3.qx3_detected) { v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_ILLUMINATORS_1, 0, 1, 1, ILLUMINATORS_1_DEF); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_ILLUMINATORS_2, 0, 1, 1, ILLUMINATORS_2_DEF); } v4l2_ctrl_new_custom(hdl, &comp_target, NULL); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .dq_callback = sd_dq_callback, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0553, 0x0002)}, {USB_DEVICE(0x0813, 0x0001)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2012 Red Hat. All rights reserved. */ #ifndef BTRFS_RCU_STRING_H #define BTRFS_RCU_STRING_H #include <linux/types.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/printk.h> struct rcu_string { struct rcu_head rcu; char str[]; }; static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask) { size_t len = strlen(src) + 1; struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) + (len * sizeof(char)), mask); if (!ret) return ret; /* Warn if the source got unexpectedly truncated. */ if (WARN_ON(strscpy(ret->str, src, len) < 0)) { kfree(ret); return NULL; } return ret; } static inline void rcu_string_free(struct rcu_string *str) { if (str) kfree_rcu(str, rcu); } #define printk_in_rcu(fmt, ...) do { \ rcu_read_lock(); \ printk(fmt, __VA_ARGS__); \ rcu_read_unlock(); \ } while (0) #define printk_ratelimited_in_rcu(fmt, ...) do { \ rcu_read_lock(); \ printk_ratelimited(fmt, __VA_ARGS__); \ rcu_read_unlock(); \ } while (0) #define rcu_str_deref(rcu_str) ({ \ struct rcu_string *__str = rcu_dereference(rcu_str); \ __str->str; \ }) #endif |
2 2 2 5 3 1 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | // SPDX-License-Identifier: GPL-2.0+ // // Copyright (C) 2018 Sean Young <sean@mess.org> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> /* Each bit is 250us */ #define BIT_DURATION 250 struct imon { struct device *dev; struct urb *ir_urb; struct rc_dev *rcdev; __be64 *ir_buf; char phys[64]; }; /* * The first 5 bytes of data represent IR pulse or space. Each bit, starting * from highest bit in the first byte, represents 250µs of data. It is 1 * for space and 0 for pulse. * * The station sends 10 packets, and the 7th byte will be number 1 to 10, so * when we receive 10 we assume all the data has arrived. */ static void imon_ir_data(struct imon *imon) { struct ir_raw_event rawir = {}; u64 data = be64_to_cpup(imon->ir_buf); u8 packet_no = data & 0xff; int offset = 40; int bit; if (packet_no == 0xff) return; dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf); /* * Only the first 5 bytes contain IR data. Right shift so we move * the IR bits to the lower 40 bits. */ data >>= 24; do { /* * Find highest set bit which is less or equal to offset * * offset is the bit above (base 0) where we start looking. * * data & (BIT_ULL(offset) - 1) masks off any unwanted bits, * so we have just bits less than offset. * * fls will tell us the highest bit set plus 1 (or 0 if no * bits are set). */ rawir.pulse = !rawir.pulse; bit = fls64(data & (BIT_ULL(offset) - 1)); if (bit < offset) { dev_dbg(imon->dev, "%s: %d bits", rawir.pulse ? "pulse" : "space", offset - bit); rawir.duration = (offset - bit) * BIT_DURATION; ir_raw_event_store_with_filter(imon->rcdev, &rawir); offset = bit; } data = ~data; } while (offset > 0); if (packet_no == 0x0a && !imon->rcdev->idle) { ir_raw_event_set_idle(imon->rcdev, true); ir_raw_event_handle(imon->rcdev); } } static void imon_ir_rx(struct urb *urb) { struct imon *imon = urb->context; int ret; switch (urb->status) { case 0: imon_ir_data(imon); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: usb_unlink_urb(urb); return; case -EPIPE: default: dev_dbg(imon->dev, "error: urb status = %d", urb->status); break; } ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret && ret != -ENODEV) dev_warn(imon->dev, "failed to resubmit urb: %d", ret); } static int imon_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_endpoint_descriptor *ir_ep = NULL; struct usb_host_interface *idesc; struct usb_device *udev; struct rc_dev *rcdev; struct imon *imon; int i, ret; udev = interface_to_usbdev(intf); idesc = intf->cur_altsetting; for (i = 0; i < idesc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep = &idesc->endpoint[i].desc; if (usb_endpoint_is_int_in(ep)) { ir_ep = ep; break; } } if (!ir_ep) { dev_err(&intf->dev, "IR endpoint missing"); return -ENODEV; } imon = devm_kmalloc(&intf->dev, sizeof(*imon), GFP_KERNEL); if (!imon) return -ENOMEM; imon->ir_urb = usb_alloc_urb(0, GFP_KERNEL); if (!imon->ir_urb) return -ENOMEM; imon->ir_buf = kmalloc(sizeof(__be64), GFP_KERNEL); if (!imon->ir_buf) { ret = -ENOMEM; goto free_urb; } imon->dev = &intf->dev; usb_fill_int_urb(imon->ir_urb, udev, usb_rcvintpipe(udev, ir_ep->bEndpointAddress), imon->ir_buf, sizeof(__be64), imon_ir_rx, imon, ir_ep->bInterval); rcdev = devm_rc_allocate_device(&intf->dev, RC_DRIVER_IR_RAW); if (!rcdev) { ret = -ENOMEM; goto free_urb; } usb_make_path(udev, imon->phys, sizeof(imon->phys)); rcdev->device_name = "iMON Station"; rcdev->driver_name = KBUILD_MODNAME; rcdev->input_phys = imon->phys; usb_to_input_id(udev, &rcdev->input_id); rcdev->dev.parent = &intf->dev; rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rcdev->map_name = RC_MAP_IMON_RSC; rcdev->rx_resolution = BIT_DURATION; rcdev->priv = imon; ret = devm_rc_register_device(&intf->dev, rcdev); if (ret) goto free_urb; imon->rcdev = rcdev; ret = usb_submit_urb(imon->ir_urb, GFP_KERNEL); if (ret) goto free_urb; usb_set_intfdata(intf, imon); return 0; free_urb: usb_free_urb(imon->ir_urb); kfree(imon->ir_buf); return ret; } static void imon_disconnect(struct usb_interface *intf) { struct imon *imon = usb_get_intfdata(intf); usb_kill_urb(imon->ir_urb); usb_free_urb(imon->ir_urb); kfree(imon->ir_buf); } static const struct usb_device_id imon_table[] = { /* SoundGraph iMON (IR only) -- sg_imon.inf */ { USB_DEVICE(0x04e8, 0xff30) }, {} }; static struct usb_driver imon_driver = { .name = KBUILD_MODNAME, .probe = imon_probe, .disconnect = imon_disconnect, .id_table = imon_table }; module_usb_driver(imon_driver); MODULE_DESCRIPTION("Early raw iMON IR devices"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, imon_table); |
5 5 5 5 4 7 2 2 7 5 1 5 5 1 5 3 5 5 4 7 2 7 2 4 5 7 6 13 10 1 11 2 2 7 3 6 6 1 2 11 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 | // SPDX-License-Identifier: GPL-2.0-only /* * CAIA Delay-Gradient (CDG) congestion control * * This implementation is based on the paper: * D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using * delay gradients." In IFIP Networking, pages 328-341. Springer, 2011. * * Scavenger traffic (Less-than-Best-Effort) should disable coexistence * heuristics using parameters use_shadow=0 and use_ineff=0. * * Parameters window, backoff_beta, and backoff_factor are crucial for * throughput and delay. Future work is needed to determine better defaults, * and to provide guidelines for use in different environments/contexts. * * Except for window, knobs are configured via /sys/module/tcp_cdg/parameters/. * Parameter window is only configurable when loading tcp_cdg as a module. * * Notable differences from paper/FreeBSD: * o Using Hybrid Slow start and Proportional Rate Reduction. * o Add toggle for shadow window mechanism. Suggested by David Hayes. * o Add toggle for non-congestion loss tolerance. * o Scaling parameter G is changed to a backoff factor; * conversion is given by: backoff_factor = 1000/(G * window). * o Limit shadow window to 2 * cwnd, or to cwnd when application limited. * o More accurate e^-x. */ #include <linux/kernel.h> #include <linux/random.h> #include <linux/module.h> #include <linux/sched/clock.h> #include <net/tcp.h> #define HYSTART_ACK_TRAIN 1 #define HYSTART_DELAY 2 static int window __read_mostly = 8; static unsigned int backoff_beta __read_mostly = 0.7071 * 1024; /* sqrt 0.5 */ static unsigned int backoff_factor __read_mostly = 42; static unsigned int hystart_detect __read_mostly = 3; static unsigned int use_ineff __read_mostly = 5; static bool use_shadow __read_mostly = true; static bool use_tolerance __read_mostly; module_param(window, int, 0444); MODULE_PARM_DESC(window, "gradient window size (power of two <= 256)"); module_param(backoff_beta, uint, 0644); MODULE_PARM_DESC(backoff_beta, "backoff beta (0-1024)"); module_param(backoff_factor, uint, 0644); MODULE_PARM_DESC(backoff_factor, "backoff probability scale factor"); module_param(hystart_detect, uint, 0644); MODULE_PARM_DESC(hystart_detect, "use Hybrid Slow start " "(0: disabled, 1: ACK train, 2: delay threshold, 3: both)"); module_param(use_ineff, uint, 0644); MODULE_PARM_DESC(use_ineff, "use ineffectual backoff detection (threshold)"); module_param(use_shadow, bool, 0644); MODULE_PARM_DESC(use_shadow, "use shadow window heuristic"); module_param(use_tolerance, bool, 0644); MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic"); struct cdg_minmax { union { struct { s32 min; s32 max; }; u64 v64; }; }; enum cdg_state { CDG_UNKNOWN = 0, CDG_NONFULL = 1, CDG_FULL = 2, CDG_BACKOFF = 3, }; struct cdg { struct cdg_minmax rtt; struct cdg_minmax rtt_prev; struct cdg_minmax *gradients; struct cdg_minmax gsum; bool gfilled; u8 tail; u8 state; u8 delack; u32 rtt_seq; u32 shadow_wnd; u16 backoff_cnt; u16 sample_cnt; s32 delay_min; u32 last_ack; u32 round_start; }; /** * nexp_u32 - negative base-e exponential * @ux: x in units of micro * * Returns exp(ux * -1e-6) * U32_MAX. */ static u32 __pure nexp_u32(u32 ux) { static const u16 v[] = { /* exp(-x)*65536-1 for x = 0, 0.000256, 0.000512, ... */ 65535, 65518, 65501, 65468, 65401, 65267, 65001, 64470, 63422, 61378, 57484, 50423, 38795, 22965, 8047, 987, 14, }; u32 msb = ux >> 8; u32 res; int i; /* Cut off when ux >= 2^24 (actual result is <= 222/U32_MAX). */ if (msb > U16_MAX) return 0; /* Scale first eight bits linearly: */ res = U32_MAX - (ux & 0xff) * (U32_MAX / 1000000); /* Obtain e^(x + y + ...) by computing e^x * e^y * ...: */ for (i = 1; msb; i++, msb >>= 1) { u32 y = v[i & -(msb & 1)] + U32_C(1); res = ((u64)res * y) >> 16; } return res; } /* Based on the HyStart algorithm (by Ha et al.) that is implemented in * tcp_cubic. Differences/experimental changes: * o Using Hayes' delayed ACK filter. * o Using a usec clock for the ACK train. * o Reset ACK train when application limited. * o Invoked at any cwnd (i.e. also when cwnd < 16). * o Invoked only when cwnd < ssthresh (i.e. not when cwnd == ssthresh). */ static void tcp_cdg_hystart_update(struct sock *sk) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min); if (ca->delay_min == 0) return; if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now_us = tp->tcp_mstamp; if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) { ca->last_ack = now_us; ca->round_start = now_us; } else if (before(now_us, ca->last_ack + 3000)) { u32 base_owd = max(ca->delay_min / 2U, 125U); ca->last_ack = now_us; if (after(now_us, ca->round_start + base_owd)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTTRAINDETECT); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTTRAINCWND, tcp_snd_cwnd(tp)); tp->snd_ssthresh = tcp_snd_cwnd(tp); return; } } } if (hystart_detect & HYSTART_DELAY) { if (ca->sample_cnt < 8) { ca->sample_cnt++; } else { s32 thresh = max(ca->delay_min + ca->delay_min / 8U, 125U); if (ca->rtt.min > thresh) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTDELAYDETECT); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPHYSTARTDELAYCWND, tcp_snd_cwnd(tp)); tp->snd_ssthresh = tcp_snd_cwnd(tp); } } } } static s32 tcp_cdg_grad(struct cdg *ca) { s32 gmin = ca->rtt.min - ca->rtt_prev.min; s32 gmax = ca->rtt.max - ca->rtt_prev.max; s32 grad; if (ca->gradients) { ca->gsum.min += gmin - ca->gradients[ca->tail].min; ca->gsum.max += gmax - ca->gradients[ca->tail].max; ca->gradients[ca->tail].min = gmin; ca->gradients[ca->tail].max = gmax; ca->tail = (ca->tail + 1) & (window - 1); gmin = ca->gsum.min; gmax = ca->gsum.max; } /* We keep sums to ignore gradients during cwnd reductions; * the paper's smoothed gradients otherwise simplify to: * (rtt_latest - rtt_oldest) / window. * * We also drop division by window here. */ grad = gmin > 0 ? gmin : gmax; /* Extrapolate missing values in gradient window: */ if (!ca->gfilled) { if (!ca->gradients && window > 1) grad *= window; /* Memory allocation failed. */ else if (ca->tail == 0) ca->gfilled = true; else grad = (grad * window) / (int)ca->tail; } /* Backoff was effectual: */ if (gmin <= -32 || gmax <= -32) ca->backoff_cnt = 0; if (use_tolerance) { /* Reduce small variations to zero: */ gmin = DIV_ROUND_CLOSEST(gmin, 64); gmax = DIV_ROUND_CLOSEST(gmax, 64); if (gmin > 0 && gmax <= 0) ca->state = CDG_FULL; else if ((gmin > 0 && gmax > 0) || gmax < 0) ca->state = CDG_NONFULL; } return grad; } static bool tcp_cdg_backoff(struct sock *sk, u32 grad) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); if (get_random_u32() <= nexp_u32(grad * backoff_factor)) return false; if (use_ineff) { ca->backoff_cnt++; if (ca->backoff_cnt > use_ineff) return false; } ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp)); ca->state = CDG_BACKOFF; tcp_enter_cwr(sk); return true; } /* Not called in CWR or Recovery state. */ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); u32 prior_snd_cwnd; u32 incr; if (tcp_in_slow_start(tp) && hystart_detect) tcp_cdg_hystart_update(sk); if (after(ack, ca->rtt_seq) && ca->rtt.v64) { s32 grad = 0; if (ca->rtt_prev.v64) grad = tcp_cdg_grad(ca); ca->rtt_seq = tp->snd_nxt; ca->rtt_prev = ca->rtt; ca->rtt.v64 = 0; ca->last_ack = 0; ca->sample_cnt = 0; if (grad > 0 && tcp_cdg_backoff(sk, grad)) return; } if (!tcp_is_cwnd_limited(sk)) { ca->shadow_wnd = min(ca->shadow_wnd, tcp_snd_cwnd(tp)); return; } prior_snd_cwnd = tcp_snd_cwnd(tp); tcp_reno_cong_avoid(sk, ack, acked); incr = tcp_snd_cwnd(tp) - prior_snd_cwnd; ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr); } static void tcp_cdg_acked(struct sock *sk, const struct ack_sample *sample) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); if (sample->rtt_us <= 0) return; /* A heuristic for filtering delayed ACKs, adapted from: * D.A. Hayes. "Timing enhancements to the FreeBSD kernel to support * delay and rate based TCP mechanisms." TR 100219A. CAIA, 2010. */ if (tp->sacked_out == 0) { if (sample->pkts_acked == 1 && ca->delack) { /* A delayed ACK is only used for the minimum if it is * provenly lower than an existing non-zero minimum. */ ca->rtt.min = min(ca->rtt.min, sample->rtt_us); ca->delack--; return; } else if (sample->pkts_acked > 1 && ca->delack < 5) { ca->delack++; } } ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us); ca->rtt.max = max(ca->rtt.max, sample->rtt_us); } static u32 tcp_cdg_ssthresh(struct sock *sk) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); if (ca->state == CDG_BACKOFF) return max(2U, (tcp_snd_cwnd(tp) * min(1024U, backoff_beta)) >> 10); if (ca->state == CDG_NONFULL && use_tolerance) return tcp_snd_cwnd(tp); ca->shadow_wnd = min(ca->shadow_wnd >> 1, tcp_snd_cwnd(tp)); if (use_shadow) return max3(2U, ca->shadow_wnd, tcp_snd_cwnd(tp) >> 1); return max(2U, tcp_snd_cwnd(tp) >> 1); } static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); struct cdg_minmax *gradients; switch (ev) { case CA_EVENT_CWND_RESTART: gradients = ca->gradients; if (gradients) memset(gradients, 0, window * sizeof(gradients[0])); memset(ca, 0, sizeof(*ca)); ca->gradients = gradients; ca->rtt_seq = tp->snd_nxt; ca->shadow_wnd = tcp_snd_cwnd(tp); break; case CA_EVENT_COMPLETE_CWR: ca->state = CDG_UNKNOWN; ca->rtt_seq = tp->snd_nxt; ca->rtt_prev = ca->rtt; ca->rtt.v64 = 0; break; default: break; } } static void tcp_cdg_init(struct sock *sk) { struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); ca->gradients = NULL; /* We silently fall back to window = 1 if allocation fails. */ if (window > 1) ca->gradients = kcalloc(window, sizeof(ca->gradients[0]), GFP_NOWAIT | __GFP_NOWARN); ca->rtt_seq = tp->snd_nxt; ca->shadow_wnd = tcp_snd_cwnd(tp); } static void tcp_cdg_release(struct sock *sk) { struct cdg *ca = inet_csk_ca(sk); kfree(ca->gradients); ca->gradients = NULL; } static struct tcp_congestion_ops tcp_cdg __read_mostly = { .cong_avoid = tcp_cdg_cong_avoid, .cwnd_event = tcp_cdg_cwnd_event, .pkts_acked = tcp_cdg_acked, .undo_cwnd = tcp_reno_undo_cwnd, .ssthresh = tcp_cdg_ssthresh, .release = tcp_cdg_release, .init = tcp_cdg_init, .owner = THIS_MODULE, .name = "cdg", }; static int __init tcp_cdg_register(void) { if (backoff_beta > 1024 || window < 1 || window > 256) return -ERANGE; if (!is_power_of_2(window)) return -EINVAL; BUILD_BUG_ON(sizeof(struct cdg) > ICSK_CA_PRIV_SIZE); tcp_register_congestion_control(&tcp_cdg); return 0; } static void __exit tcp_cdg_unregister(void) { tcp_unregister_congestion_control(&tcp_cdg); } module_init(tcp_cdg_register); module_exit(tcp_cdg_unregister); MODULE_AUTHOR("Kenneth Klette Jonassen"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TCP CDG"); |
223 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Pointer abstraction for IO/system memory */ #ifndef __IOSYS_MAP_H__ #define __IOSYS_MAP_H__ #include <linux/compiler_types.h> #include <linux/io.h> #include <linux/string.h> /** * DOC: overview * * When accessing a memory region, depending on its location, users may have to * access it with I/O operations or memory load/store operations. For example, * copying to system memory could be done with memcpy(), copying to I/O memory * would be done with memcpy_toio(). * * .. code-block:: c * * void *vaddr = ...; // pointer to system memory * memcpy(vaddr, src, len); * * void *vaddr_iomem = ...; // pointer to I/O memory * memcpy_toio(vaddr_iomem, src, len); * * The user of such pointer may not have information about the mapping of that * region or may want to have a single code path to handle operations on that * buffer, regardless if it's located in system or IO memory. The type * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the * buffer can be passed around to other drivers or have separate duties inside * the same driver for allocation, read and write operations. * * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered * bad style. Rather than accessing its fields directly, use one of the provided * helper functions, or implement your own. For example, instances of * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These * helpers will set an address in system memory. * * .. code-block:: c * * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf); * * iosys_map_set_vaddr(&map, 0xdeadbeaf); * * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or * iosys_map_set_vaddr_iomem(). * * .. code-block:: c * * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf); * * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf); * * Instances of struct iosys_map do not have to be cleaned up, but * can be cleared to NULL with iosys_map_clear(). Cleared mappings * always refer to system memory. * * .. code-block:: c * * iosys_map_clear(&map); * * Test if a mapping is valid with either iosys_map_is_set() or * iosys_map_is_null(). * * .. code-block:: c * * if (iosys_map_is_set(&map) != iosys_map_is_null(&map)) * // always true * * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for * equality with iosys_map_is_equal(). Mappings that point to different memory * spaces, system or I/O, are never equal. That's even true if both spaces are * located in the same address space, both mappings contain the same address * value, or both mappings refer to NULL. * * .. code-block:: c * * struct iosys_map sys_map; // refers to system memory * struct iosys_map io_map; // refers to I/O memory * * if (iosys_map_is_equal(&sys_map, &io_map)) * // always false * * A set up instance of struct iosys_map can be used to access or manipulate the * buffer memory. Depending on the location of the memory, the provided helpers * will pick the correct operations. Data can be copied into the memory with * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr(). * * .. code-block:: c * * const void *src = ...; // source buffer * size_t len = ...; // length of src * * iosys_map_memcpy_to(&map, src, len); * iosys_map_incr(&map, len); // go to first byte after the memcpy */ /** * struct iosys_map - Pointer to IO/system memory * @vaddr_iomem: The buffer's address if in I/O memory * @vaddr: The buffer's address if in system memory * @is_iomem: True if the buffer is located in I/O memory, or false * otherwise. */ struct iosys_map { union { void __iomem *vaddr_iomem; void *vaddr; }; bool is_iomem; }; /** * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory * @vaddr_: A system-memory address */ #define IOSYS_MAP_INIT_VADDR(vaddr_) \ { \ .vaddr = (vaddr_), \ .is_iomem = false, \ } /** * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory * @vaddr_iomem_: An I/O-memory address */ #define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \ { \ .vaddr_iomem = (vaddr_iomem_), \ .is_iomem = true, \ } /** * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map * @map_: The dma-buf mapping structure to copy from * @offset_: Offset to add to the other mapping * * Initializes a new iosys_map struct based on another passed as argument. It * does a shallow copy of the struct so it's possible to update the back storage * without changing where the original map points to. It is the equivalent of * doing: * * .. code-block:: c * * iosys_map map = other_map; * iosys_map_incr(&map, &offset); * * Example usage: * * .. code-block:: c * * void foo(struct device *dev, struct iosys_map *base_map) * { * ... * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET); * ... * } * * The advantage of using the initializer over just increasing the offset with * iosys_map_incr() like above is that the new map will always point to the * right place of the buffer during its scope. It reduces the risk of updating * the wrong part of the buffer and having no compiler warning about that. If * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn * about the use of uninitialized variable. */ #define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ struct iosys_map copy_ = *map_; \ iosys_map_incr(©_, offset_); \ copy_; \ }) /** * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory * @map: The iosys_map structure * @vaddr: A system-memory address * * Sets the address and clears the I/O-memory flag. */ static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr) { map->vaddr = vaddr; map->is_iomem = false; } /** * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory * @map: The iosys_map structure * @vaddr_iomem: An I/O-memory address * * Sets the address and the I/O-memory flag. */ static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map, void __iomem *vaddr_iomem) { map->vaddr_iomem = vaddr_iomem; map->is_iomem = true; } /** * iosys_map_is_equal - Compares two iosys mapping structures for equality * @lhs: The iosys_map structure * @rhs: A iosys_map structure to compare with * * Two iosys mapping structures are equal if they both refer to the same type of memory * and to the same address within that memory. * * Returns: * True is both structures are equal, or false otherwise. */ static inline bool iosys_map_is_equal(const struct iosys_map *lhs, const struct iosys_map *rhs) { if (lhs->is_iomem != rhs->is_iomem) return false; else if (lhs->is_iomem) return lhs->vaddr_iomem == rhs->vaddr_iomem; else return lhs->vaddr == rhs->vaddr; } /** * iosys_map_is_null - Tests for a iosys mapping to be NULL * @map: The iosys_map structure * * Depending on the state of struct iosys_map.is_iomem, tests if the * mapping is NULL. * * Returns: * True if the mapping is NULL, or false otherwise. */ static inline bool iosys_map_is_null(const struct iosys_map *map) { if (map->is_iomem) return !map->vaddr_iomem; return !map->vaddr; } /** * iosys_map_is_set - Tests if the iosys mapping has been set * @map: The iosys_map structure * * Depending on the state of struct iosys_map.is_iomem, tests if the * mapping has been set. * * Returns: * True if the mapping is been set, or false otherwise. */ static inline bool iosys_map_is_set(const struct iosys_map *map) { return !iosys_map_is_null(map); } /** * iosys_map_clear - Clears a iosys mapping structure * @map: The iosys_map structure * * Clears all fields to zero, including struct iosys_map.is_iomem, so * mapping structures that were set to point to I/O memory are reset for * system memory. Pointers are cleared to NULL. This is the default. */ static inline void iosys_map_clear(struct iosys_map *map) { if (map->is_iomem) { map->vaddr_iomem = NULL; map->is_iomem = false; } else { map->vaddr = NULL; } } /** * iosys_map_memcpy_to - Memcpy into offset of iosys_map * @dst: The iosys_map structure * @dst_offset: The offset from which to copy * @src: The source buffer * @len: The number of byte in src * * Copies data into a iosys_map with an offset. The source buffer is in * system memory. Depending on the buffer's location, the helper picks the * correct method of accessing the memory. */ static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset, const void *src, size_t len) { if (dst->is_iomem) memcpy_toio(dst->vaddr_iomem + dst_offset, src, len); else memcpy(dst->vaddr + dst_offset, src, len); } /** * iosys_map_memcpy_from - Memcpy from iosys_map into system memory * @dst: Destination in system memory * @src: The iosys_map structure * @src_offset: The offset from which to copy * @len: The number of byte in src * * Copies data from a iosys_map with an offset. The dest buffer is in * system memory. Depending on the mapping location, the helper picks the * correct method of accessing the memory. */ static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src, size_t src_offset, size_t len) { if (src->is_iomem) memcpy_fromio(dst, src->vaddr_iomem + src_offset, len); else memcpy(dst, src->vaddr + src_offset, len); } /** * iosys_map_incr - Increments the address stored in a iosys mapping * @map: The iosys_map structure * @incr: The number of bytes to increment * * Increments the address stored in a iosys mapping. Depending on the * buffer's location, the correct value will be updated. */ static inline void iosys_map_incr(struct iosys_map *map, size_t incr) { if (map->is_iomem) map->vaddr_iomem += incr; else map->vaddr += incr; } /** * iosys_map_memset - Memset iosys_map * @dst: The iosys_map structure * @offset: Offset from dst where to start setting value * @value: The value to set * @len: The number of bytes to set in dst * * Set value in iosys_map. Depending on the buffer's location, the helper * picks the correct method of accessing the memory. */ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, int value, size_t len) { if (dst->is_iomem) memset_io(dst->vaddr_iomem + offset, value, len); else memset(dst->vaddr + offset, value, len); } #ifdef CONFIG_64BIT #define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \ u64: val_ = readq(vaddr_iomem_) #define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \ u64: writeq(val_, vaddr_iomem_) #else #define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \ u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64)) #define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \ u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64)) #endif #define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \ u8: val__ = readb(vaddr_iomem__), \ u16: val__ = readw(vaddr_iomem__), \ u32: val__ = readl(vaddr_iomem__), \ __iosys_map_rd_io_u64_case(val__, vaddr_iomem__)) #define __iosys_map_rd_sys(val__, vaddr__, type__) \ val__ = READ_ONCE(*(type__ *)(vaddr__)) #define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \ u8: writeb(val__, vaddr_iomem__), \ u16: writew(val__, vaddr_iomem__), \ u32: writel(val__, vaddr_iomem__), \ __iosys_map_wr_io_u64_case(val__, vaddr_iomem__)) #define __iosys_map_wr_sys(val__, vaddr__, type__) \ WRITE_ONCE(*(type__ *)(vaddr__), val__) /** * iosys_map_rd - Read a C-type value from the iosys_map * * @map__: The iosys_map structure * @offset__: The offset from which to read * @type__: Type of the value being read * * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or * if pointer may be unaligned (and problematic for the architecture supported), * use iosys_map_memcpy_from(). * * Returns: * The value read from the mapping. */ #define iosys_map_rd(map__, offset__, type__) ({ \ type__ val_; \ if ((map__)->is_iomem) { \ __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ } else { \ __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \ } \ val_; \ }) /** * iosys_map_wr - Write a C-type value to the iosys_map * * @map__: The iosys_map structure * @offset__: The offset from the mapping to write to * @type__: Type of the value being written * @val__: Value to write * * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types * or if pointer may be unaligned (and problematic for the architecture * supported), use iosys_map_memcpy_to() */ #define iosys_map_wr(map__, offset__, type__, val__) ({ \ type__ val_ = (val__); \ if ((map__)->is_iomem) { \ __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \ } else { \ __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \ } \ }) /** * iosys_map_rd_field - Read a member from a struct in the iosys_map * * @map__: The iosys_map structure * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read * * Read a value from iosys_map considering its layout is described by a C struct * starting at @struct_offset__. The field offset and size is calculated and its * value read. If the field access would incur in un-aligned access, then either * iosys_map_memcpy_from() needs to be used or the architecture must support it. * For example: suppose there is a @struct foo defined as below and the value * ``foo.field2.inner2`` needs to be read from the iosys_map: * * .. code-block:: c * * struct foo { * int field1; * struct { * int inner1; * int inner2; * } field2; * int field3; * } __packed; * * This is the expected memory layout of a buffer using iosys_map_rd_field(): * * +------------------------------+--------------------------+ * | Address | Content | * +==============================+==========================+ * | buffer + 0000 | start of mmapped buffer | * | | pointed by iosys_map | * +------------------------------+--------------------------+ * | ... | ... | * +------------------------------+--------------------------+ * | buffer + ``struct_offset__`` | start of ``struct foo`` | * +------------------------------+--------------------------+ * | ... | ... | * +------------------------------+--------------------------+ * | buffer + wwww | ``foo.field2.inner2`` | * +------------------------------+--------------------------+ * | ... | ... | * +------------------------------+--------------------------+ * | buffer + yyyy | end of ``struct foo`` | * +------------------------------+--------------------------+ * | ... | ... | * +------------------------------+--------------------------+ * | buffer + zzzz | end of mmaped buffer | * +------------------------------+--------------------------+ * * Values automatically calculated by this macro or not needed are denoted by * wwww, yyyy and zzzz. This is the code to read that value: * * .. code-block:: c * * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2); * * Returns: * The value read from the mapping. */ #define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ struct_type__ *s_; \ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ typeof(s_->field__)); \ }) /** * iosys_map_wr_field - Write to a member of a struct in the iosys_map * * @map__: The iosys_map structure * @struct_offset__: Offset from the beginning of the map, where the struct * is located * @struct_type__: The struct describing the layout of the mapping * @field__: Member of the struct to read * @val__: Value to write * * Write a value to the iosys_map considering its layout is described by a C * struct starting at @struct_offset__. The field offset and size is calculated * and the @val__ is written. If the field access would incur in un-aligned * access, then either iosys_map_memcpy_to() needs to be used or the * architecture must support it. Refer to iosys_map_rd_field() for expected * usage and memory layout. */ #define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ struct_type__ *s_; \ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ typeof(s_->field__), val__); \ }) #endif /* __IOSYS_MAP_H__ */ |
3 6 3 6 9 9 306 305 118 113 1 25 1 328 13 190 2 1 1 189 79 121 1 26 26 7 7 2 7 3 1 1 3 61 45 4 3 3 2 47 4 57 56 56 9 1 3 180 171 4 3 2 131 140 157 11 154 2 2 1 157 158 122 319 209 209 90 35 188 323 209 116 113 114 116 114 114 11 11 5 5 7 25 11 432 402 404 417 418 153 150 186 186 186 186 186 186 186 185 101 101 186 2 186 186 186 185 186 23 186 186 186 185 186 186 186 186 75 49 25 23 23 75 75 75 75 186 186 35 35 34 7 7 2 2 2 14 49 49 49 38 38 38 38 38 32 29 38 2 2 2 2 2 2 30 2 41 119 171 171 32 170 171 171 3 155 155 158 157 1 31 31 60 61 61 149 149 149 149 147 147 147 12 153 5 149 7 1 1 1 1 2 7 7 7 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 | // SPDX-License-Identifier: GPL-2.0-or-later /* * V4L2 controls framework core implementation. * * Copyright (C) 2010-2021 Hans Verkuil <hverkuil-cisco@xs4all.nl> */ #include <linux/export.h> #include <linux/mm.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> #include "v4l2-ctrls-priv.h" static const union v4l2_ctrl_ptr ptr_null; static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { memset(ev, 0, sizeof(*ev)); ev->type = V4L2_EVENT_CTRL; ev->id = ctrl->id; ev->u.ctrl.changes = changes; ev->u.ctrl.type = ctrl->type; ev->u.ctrl.flags = user_flags(ctrl); if (ctrl->is_ptr) ev->u.ctrl.value64 = 0; else ev->u.ctrl.value64 = *ctrl->p_cur.p_s64; ev->u.ctrl.minimum = ctrl->minimum; ev->u.ctrl.maximum = ctrl->maximum; if (ctrl->type == V4L2_CTRL_TYPE_MENU || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) ev->u.ctrl.step = 1; else ev->u.ctrl.step = ctrl->step; ev->u.ctrl.default_value = ctrl->default_value; } void send_initial_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl) { struct v4l2_event ev; u32 changes = V4L2_EVENT_CTRL_CH_FLAGS; if (!(ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)) changes |= V4L2_EVENT_CTRL_CH_VALUE; fill_event(&ev, ctrl, changes); v4l2_event_queue_fh(fh, &ev); } void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) { struct v4l2_event ev; struct v4l2_subscribed_event *sev; if (list_empty(&ctrl->ev_subs)) return; fill_event(&ev, ctrl, changes); list_for_each_entry(sev, &ctrl->ev_subs, node) if (sev->fh != fh || (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)) v4l2_event_queue_fh(sev->fh, &ev); } bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2) { unsigned int i; switch (ctrl->type) { case V4L2_CTRL_TYPE_BUTTON: return false; case V4L2_CTRL_TYPE_STRING: for (i = 0; i < ctrl->elems; i++) { unsigned int idx = i * ctrl->elem_size; /* strings are always 0-terminated */ if (strcmp(ptr1.p_char + idx, ptr2.p_char + idx)) return false; } return true; default: return !memcmp(ptr1.p_const, ptr2.p_const, ctrl->elems * ctrl->elem_size); } } EXPORT_SYMBOL(v4l2_ctrl_type_op_equal); /* Default intra MPEG-2 quantisation coefficients, from the specification. */ static const u8 mpeg2_intra_quant_matrix[64] = { 8, 16, 16, 19, 16, 19, 22, 22, 22, 22, 22, 22, 26, 24, 26, 27, 27, 27, 26, 26, 26, 26, 27, 27, 27, 29, 29, 29, 34, 34, 34, 29, 29, 29, 27, 27, 29, 29, 32, 32, 34, 34, 37, 38, 37, 35, 35, 34, 35, 38, 38, 40, 40, 40, 48, 48, 46, 46, 56, 56, 58, 69, 69, 83 }; static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, union v4l2_ctrl_ptr ptr) { struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quant; struct v4l2_ctrl_vp8_frame *p_vp8_frame; struct v4l2_ctrl_vp9_frame *p_vp9_frame; struct v4l2_ctrl_fwht_params *p_fwht_params; struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix; struct v4l2_ctrl_av1_sequence *p_av1_sequence; void *p = ptr.p + idx * ctrl->elem_size; if (ctrl->p_def.p_const) memcpy(p, ctrl->p_def.p_const, ctrl->elem_size); else memset(p, 0, ctrl->elem_size); switch ((u32)ctrl->type) { case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: p_mpeg2_sequence = p; /* 4:2:0 */ p_mpeg2_sequence->chroma_format = 1; break; case V4L2_CTRL_TYPE_MPEG2_PICTURE: p_mpeg2_picture = p; /* interlaced top field */ p_mpeg2_picture->picture_structure = V4L2_MPEG2_PIC_TOP_FIELD; p_mpeg2_picture->picture_coding_type = V4L2_MPEG2_PIC_CODING_TYPE_I; break; case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: p_mpeg2_quant = p; memcpy(p_mpeg2_quant->intra_quantiser_matrix, mpeg2_intra_quant_matrix, ARRAY_SIZE(mpeg2_intra_quant_matrix)); /* * The default non-intra MPEG-2 quantisation * coefficients are all 16, as per the specification. */ memset(p_mpeg2_quant->non_intra_quantiser_matrix, 16, sizeof(p_mpeg2_quant->non_intra_quantiser_matrix)); break; case V4L2_CTRL_TYPE_VP8_FRAME: p_vp8_frame = p; p_vp8_frame->num_dct_parts = 1; break; case V4L2_CTRL_TYPE_VP9_FRAME: p_vp9_frame = p; p_vp9_frame->profile = 0; p_vp9_frame->bit_depth = 8; p_vp9_frame->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING; break; case V4L2_CTRL_TYPE_AV1_SEQUENCE: p_av1_sequence = p; p_av1_sequence->bit_depth = 8; break; case V4L2_CTRL_TYPE_FWHT_PARAMS: p_fwht_params = p; p_fwht_params->version = V4L2_FWHT_VERSION; p_fwht_params->width = 1280; p_fwht_params->height = 720; p_fwht_params->flags = V4L2_FWHT_FL_PIXENC_YUV | (2 << V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET); break; case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: p_h264_scaling_matrix = p; /* * The default (flat) H.264 scaling matrix when none are * specified in the bitstream, this is according to formulas * (7-8) and (7-9) of the specification. */ memset(p_h264_scaling_matrix, 16, sizeof(*p_h264_scaling_matrix)); break; } } void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, union v4l2_ctrl_ptr ptr) { unsigned int i; u32 tot_elems = ctrl->elems; u32 elems = tot_elems - from_idx; if (from_idx >= tot_elems) return; switch (ctrl->type) { case V4L2_CTRL_TYPE_STRING: for (i = from_idx; i < tot_elems; i++) { unsigned int offset = i * ctrl->elem_size; memset(ptr.p_char + offset, ' ', ctrl->minimum); ptr.p_char[offset + ctrl->minimum] = '\0'; } break; case V4L2_CTRL_TYPE_INTEGER64: if (ctrl->default_value) { for (i = from_idx; i < tot_elems; i++) ptr.p_s64[i] = ctrl->default_value; } else { memset(ptr.p_s64 + from_idx, 0, elems * sizeof(s64)); } break; case V4L2_CTRL_TYPE_INTEGER: case V4L2_CTRL_TYPE_INTEGER_MENU: case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_BITMASK: case V4L2_CTRL_TYPE_BOOLEAN: if (ctrl->default_value) { for (i = from_idx; i < tot_elems; i++) ptr.p_s32[i] = ctrl->default_value; } else { memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); } break; case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: memset(ptr.p_s32 + from_idx, 0, elems * sizeof(s32)); break; case V4L2_CTRL_TYPE_U8: memset(ptr.p_u8 + from_idx, ctrl->default_value, elems); break; case V4L2_CTRL_TYPE_U16: if (ctrl->default_value) { for (i = from_idx; i < tot_elems; i++) ptr.p_u16[i] = ctrl->default_value; } else { memset(ptr.p_u16 + from_idx, 0, elems * sizeof(u16)); } break; case V4L2_CTRL_TYPE_U32: if (ctrl->default_value) { for (i = from_idx; i < tot_elems; i++) ptr.p_u32[i] = ctrl->default_value; } else { memset(ptr.p_u32 + from_idx, 0, elems * sizeof(u32)); } break; default: for (i = from_idx; i < tot_elems; i++) std_init_compound(ctrl, i, ptr); break; } } EXPORT_SYMBOL(v4l2_ctrl_type_op_init); void v4l2_ctrl_type_op_log(const struct v4l2_ctrl *ctrl) { union v4l2_ctrl_ptr ptr = ctrl->p_cur; if (ctrl->is_array) { unsigned i; for (i = 0; i < ctrl->nr_of_dims; i++) pr_cont("[%u]", ctrl->dims[i]); pr_cont(" "); } switch (ctrl->type) { case V4L2_CTRL_TYPE_INTEGER: pr_cont("%d", *ptr.p_s32); break; case V4L2_CTRL_TYPE_BOOLEAN: pr_cont("%s", *ptr.p_s32 ? "true" : "false"); break; case V4L2_CTRL_TYPE_MENU: pr_cont("%s", ctrl->qmenu[*ptr.p_s32]); break; case V4L2_CTRL_TYPE_INTEGER_MENU: pr_cont("%lld", ctrl->qmenu_int[*ptr.p_s32]); break; case V4L2_CTRL_TYPE_BITMASK: pr_cont("0x%08x", *ptr.p_s32); break; case V4L2_CTRL_TYPE_INTEGER64: pr_cont("%lld", *ptr.p_s64); break; case V4L2_CTRL_TYPE_STRING: pr_cont("%s", ptr.p_char); break; case V4L2_CTRL_TYPE_U8: pr_cont("%u", (unsigned)*ptr.p_u8); break; case V4L2_CTRL_TYPE_U16: pr_cont("%u", (unsigned)*ptr.p_u16); break; case V4L2_CTRL_TYPE_U32: pr_cont("%u", (unsigned)*ptr.p_u32); break; case V4L2_CTRL_TYPE_AREA: pr_cont("%ux%u", ptr.p_area->width, ptr.p_area->height); break; case V4L2_CTRL_TYPE_H264_SPS: pr_cont("H264_SPS"); break; case V4L2_CTRL_TYPE_H264_PPS: pr_cont("H264_PPS"); break; case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: pr_cont("H264_SCALING_MATRIX"); break; case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: pr_cont("H264_SLICE_PARAMS"); break; case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: pr_cont("H264_DECODE_PARAMS"); break; case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: pr_cont("H264_PRED_WEIGHTS"); break; case V4L2_CTRL_TYPE_FWHT_PARAMS: pr_cont("FWHT_PARAMS"); break; case V4L2_CTRL_TYPE_VP8_FRAME: pr_cont("VP8_FRAME"); break; case V4L2_CTRL_TYPE_HDR10_CLL_INFO: pr_cont("HDR10_CLL_INFO"); break; case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: pr_cont("HDR10_MASTERING_DISPLAY"); break; case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: pr_cont("MPEG2_QUANTISATION"); break; case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: pr_cont("MPEG2_SEQUENCE"); break; case V4L2_CTRL_TYPE_MPEG2_PICTURE: pr_cont("MPEG2_PICTURE"); break; case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: pr_cont("VP9_COMPRESSED_HDR"); break; case V4L2_CTRL_TYPE_VP9_FRAME: pr_cont("VP9_FRAME"); break; case V4L2_CTRL_TYPE_HEVC_SPS: pr_cont("HEVC_SPS"); break; case V4L2_CTRL_TYPE_HEVC_PPS: pr_cont("HEVC_PPS"); break; case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: pr_cont("HEVC_SLICE_PARAMS"); break; case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: pr_cont("HEVC_SCALING_MATRIX"); break; case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: pr_cont("HEVC_DECODE_PARAMS"); break; case V4L2_CTRL_TYPE_AV1_SEQUENCE: pr_cont("AV1_SEQUENCE"); break; case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: pr_cont("AV1_TILE_GROUP_ENTRY"); break; case V4L2_CTRL_TYPE_AV1_FRAME: pr_cont("AV1_FRAME"); break; case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: pr_cont("AV1_FILM_GRAIN"); break; default: pr_cont("unknown type %d", ctrl->type); break; } } EXPORT_SYMBOL(v4l2_ctrl_type_op_log); /* * Round towards the closest legal value. Be careful when we are * close to the maximum range of the control type to prevent * wrap-arounds. */ #define ROUND_TO_RANGE(val, offset_type, ctrl) \ ({ \ offset_type offset; \ if ((ctrl)->maximum >= 0 && \ val >= (ctrl)->maximum - (s32)((ctrl)->step / 2)) \ val = (ctrl)->maximum; \ else \ val += (s32)((ctrl)->step / 2); \ val = clamp_t(typeof(val), val, \ (ctrl)->minimum, (ctrl)->maximum); \ offset = (val) - (ctrl)->minimum; \ offset = (ctrl)->step * (offset / (u32)(ctrl)->step); \ val = (ctrl)->minimum + offset; \ 0; \ }) /* Validate a new control */ #define zero_padding(s) \ memset(&(s).padding, 0, sizeof((s).padding)) #define zero_reserved(s) \ memset(&(s).reserved, 0, sizeof((s).reserved)) static int validate_vp9_lf_params(struct v4l2_vp9_loop_filter *lf) { unsigned int i; if (lf->flags & ~(V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED | V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE)) return -EINVAL; /* That all values are in the accepted range. */ if (lf->level > GENMASK(5, 0)) return -EINVAL; if (lf->sharpness > GENMASK(2, 0)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) if (lf->ref_deltas[i] < -63 || lf->ref_deltas[i] > 63) return -EINVAL; for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) if (lf->mode_deltas[i] < -63 || lf->mode_deltas[i] > 63) return -EINVAL; zero_reserved(*lf); return 0; } static int validate_vp9_quant_params(struct v4l2_vp9_quantization *quant) { if (quant->delta_q_y_dc < -15 || quant->delta_q_y_dc > 15 || quant->delta_q_uv_dc < -15 || quant->delta_q_uv_dc > 15 || quant->delta_q_uv_ac < -15 || quant->delta_q_uv_ac > 15) return -EINVAL; zero_reserved(*quant); return 0; } static int validate_vp9_seg_params(struct v4l2_vp9_segmentation *seg) { unsigned int i, j; if (seg->flags & ~(V4L2_VP9_SEGMENTATION_FLAG_ENABLED | V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP | V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE | V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA | V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(seg->feature_enabled); i++) { if (seg->feature_enabled[i] & ~V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK) return -EINVAL; } for (i = 0; i < ARRAY_SIZE(seg->feature_data); i++) { static const int range[] = { 255, 63, 3, 0 }; for (j = 0; j < ARRAY_SIZE(seg->feature_data[j]); j++) { if (seg->feature_data[i][j] < -range[j] || seg->feature_data[i][j] > range[j]) return -EINVAL; } } zero_reserved(*seg); return 0; } static int validate_vp9_compressed_hdr(struct v4l2_ctrl_vp9_compressed_hdr *hdr) { if (hdr->tx_mode > V4L2_VP9_TX_MODE_SELECT) return -EINVAL; return 0; } static int validate_vp9_frame(struct v4l2_ctrl_vp9_frame *frame) { int ret; /* Make sure we're not passed invalid flags. */ if (frame->flags & ~(V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_SHOW_FRAME | V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT | V4L2_VP9_FRAME_FLAG_INTRA_ONLY | V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV | V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX | V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE | V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING | V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING | V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING)) return -EINVAL; if (frame->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT && frame->flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX) return -EINVAL; if (frame->profile > V4L2_VP9_PROFILE_MAX) return -EINVAL; if (frame->reset_frame_context > V4L2_VP9_RESET_FRAME_CTX_ALL) return -EINVAL; if (frame->frame_context_idx >= V4L2_VP9_NUM_FRAME_CTX) return -EINVAL; /* * Profiles 0 and 1 only support 8-bit depth, profiles 2 and 3 only 10 * and 12 bit depths. */ if ((frame->profile < 2 && frame->bit_depth != 8) || (frame->profile >= 2 && (frame->bit_depth != 10 && frame->bit_depth != 12))) return -EINVAL; /* Profile 0 and 2 only accept YUV 4:2:0. */ if ((frame->profile == 0 || frame->profile == 2) && (!(frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) || !(frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) return -EINVAL; /* Profile 1 and 3 only accept YUV 4:2:2, 4:4:0 and 4:4:4. */ if ((frame->profile == 1 || frame->profile == 3) && ((frame->flags & V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING) && (frame->flags & V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING))) return -EINVAL; if (frame->interpolation_filter > V4L2_VP9_INTERP_FILTER_SWITCHABLE) return -EINVAL; /* * According to the spec, tile_cols_log2 shall be less than or equal * to 6. */ if (frame->tile_cols_log2 > 6) return -EINVAL; if (frame->reference_mode > V4L2_VP9_REFERENCE_MODE_SELECT) return -EINVAL; ret = validate_vp9_lf_params(&frame->lf); if (ret) return ret; ret = validate_vp9_quant_params(&frame->quant); if (ret) return ret; ret = validate_vp9_seg_params(&frame->seg); if (ret) return ret; zero_reserved(*frame); return 0; } static int validate_av1_quantization(struct v4l2_av1_quantization *q) { if (q->flags > GENMASK(2, 0)) return -EINVAL; if (q->delta_q_y_dc < -64 || q->delta_q_y_dc > 63 || q->delta_q_u_dc < -64 || q->delta_q_u_dc > 63 || q->delta_q_v_dc < -64 || q->delta_q_v_dc > 63 || q->delta_q_u_ac < -64 || q->delta_q_u_ac > 63 || q->delta_q_v_ac < -64 || q->delta_q_v_ac > 63 || q->delta_q_res > GENMASK(1, 0)) return -EINVAL; if (q->qm_y > GENMASK(3, 0) || q->qm_u > GENMASK(3, 0) || q->qm_v > GENMASK(3, 0)) return -EINVAL; return 0; } static int validate_av1_segmentation(struct v4l2_av1_segmentation *s) { u32 i; u32 j; if (s->flags > GENMASK(4, 0)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(s->feature_data); i++) { static const int segmentation_feature_signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static const int segmentation_feature_max[] = { 255, 63, 63, 63, 63, 7, 0, 0}; for (j = 0; j < ARRAY_SIZE(s->feature_data[j]); j++) { s32 limit = segmentation_feature_max[j]; if (segmentation_feature_signed[j]) { if (s->feature_data[i][j] < -limit || s->feature_data[i][j] > limit) return -EINVAL; } else { if (s->feature_data[i][j] < 0 || s->feature_data[i][j] > limit) return -EINVAL; } } } return 0; } static int validate_av1_loop_filter(struct v4l2_av1_loop_filter *lf) { u32 i; if (lf->flags > GENMASK(3, 0)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(lf->level); i++) { if (lf->level[i] > GENMASK(5, 0)) return -EINVAL; } if (lf->sharpness > GENMASK(2, 0)) return -EINVAL; for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) { if (lf->ref_deltas[i] < -64 || lf->ref_deltas[i] > 63) return -EINVAL; } for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) { if (lf->mode_deltas[i] < -64 || lf->mode_deltas[i] > 63) return -EINVAL; } return 0; } static int validate_av1_cdef(struct v4l2_av1_cdef *cdef) { u32 i; if (cdef->damping_minus_3 > GENMASK(1, 0) || cdef->bits > GENMASK(1, 0)) return -EINVAL; for (i = 0; i < 1 << cdef->bits; i++) { if (cdef->y_pri_strength[i] > GENMASK(3, 0) || cdef->y_sec_strength[i] > 4 || cdef->uv_pri_strength[i] > GENMASK(3, 0) || cdef->uv_sec_strength[i] > 4) return -EINVAL; } return 0; } static int validate_av1_loop_restauration(struct v4l2_av1_loop_restoration *lr) { if (lr->lr_unit_shift > 3 || lr->lr_uv_shift > 1) return -EINVAL; return 0; } static int validate_av1_film_grain(struct v4l2_ctrl_av1_film_grain *fg) { u32 i; if (fg->flags > GENMASK(4, 0)) return -EINVAL; if (fg->film_grain_params_ref_idx > GENMASK(2, 0) || fg->num_y_points > 14 || fg->num_cb_points > 10 || fg->num_cr_points > GENMASK(3, 0) || fg->grain_scaling_minus_8 > GENMASK(1, 0) || fg->ar_coeff_lag > GENMASK(1, 0) || fg->ar_coeff_shift_minus_6 > GENMASK(1, 0) || fg->grain_scale_shift > GENMASK(1, 0)) return -EINVAL; if (!(fg->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) return 0; for (i = 1; i < fg->num_y_points; i++) if (fg->point_y_value[i] <= fg->point_y_value[i - 1]) return -EINVAL; for (i = 1; i < fg->num_cb_points; i++) if (fg->point_cb_value[i] <= fg->point_cb_value[i - 1]) return -EINVAL; for (i = 1; i < fg->num_cr_points; i++) if (fg->point_cr_value[i] <= fg->point_cr_value[i - 1]) return -EINVAL; return 0; } static int validate_av1_frame(struct v4l2_ctrl_av1_frame *f) { int ret = 0; ret = validate_av1_quantization(&f->quantization); if (ret) return ret; ret = validate_av1_segmentation(&f->segmentation); if (ret) return ret; ret = validate_av1_loop_filter(&f->loop_filter); if (ret) return ret; ret = validate_av1_cdef(&f->cdef); if (ret) return ret; ret = validate_av1_loop_restauration(&f->loop_restoration); if (ret) return ret; if (f->flags & ~(V4L2_AV1_FRAME_FLAG_SHOW_FRAME | V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME | V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE | V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE | V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS | V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV | V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC | V4L2_AV1_FRAME_FLAG_USE_SUPERRES | V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV | V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE | V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS | V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF | V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION | V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT | V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET | V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED | V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT | V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE | V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT | V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING)) return -EINVAL; if (f->superres_denom > GENMASK(2, 0) + 9) return -EINVAL; return 0; } static int validate_av1_sequence(struct v4l2_ctrl_av1_sequence *s) { if (s->flags & ~(V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE | V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK | V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA | V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER | V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND | V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND | V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION | V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER | V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT | V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP | V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS | V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES | V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF | V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION | V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME | V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE | V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X | V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y | V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT | V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q)) return -EINVAL; if (s->seq_profile == 1 && s->flags & V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME) return -EINVAL; /* reserved */ if (s->seq_profile > 2) return -EINVAL; /* TODO: PROFILES */ return 0; } /* * Compound controls validation requires setting unused fields/flags to zero * in order to properly detect unchanged controls with v4l2_ctrl_type_op_equal's * memcmp. */ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx, union v4l2_ctrl_ptr ptr) { struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence; struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture; struct v4l2_ctrl_vp8_frame *p_vp8_frame; struct v4l2_ctrl_fwht_params *p_fwht_params; struct v4l2_ctrl_h264_sps *p_h264_sps; struct v4l2_ctrl_h264_pps *p_h264_pps; struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights; struct v4l2_ctrl_h264_slice_params *p_h264_slice_params; struct v4l2_ctrl_h264_decode_params *p_h264_dec_params; struct v4l2_ctrl_hevc_sps *p_hevc_sps; struct v4l2_ctrl_hevc_pps *p_hevc_pps; struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering; struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params; struct v4l2_area *area; void *p = ptr.p + idx * ctrl->elem_size; unsigned int i; switch ((u32)ctrl->type) { case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: p_mpeg2_sequence = p; switch (p_mpeg2_sequence->chroma_format) { case 1: /* 4:2:0 */ case 2: /* 4:2:2 */ case 3: /* 4:4:4 */ break; default: return -EINVAL; } break; case V4L2_CTRL_TYPE_MPEG2_PICTURE: p_mpeg2_picture = p; switch (p_mpeg2_picture->intra_dc_precision) { case 0: /* 8 bits */ case 1: /* 9 bits */ case 2: /* 10 bits */ case 3: /* 11 bits */ break; default: return -EINVAL; } switch (p_mpeg2_picture->picture_structure) { case V4L2_MPEG2_PIC_TOP_FIELD: case V4L2_MPEG2_PIC_BOTTOM_FIELD: case V4L2_MPEG2_PIC_FRAME: break; default: return -EINVAL; } switch (p_mpeg2_picture->picture_coding_type) { case V4L2_MPEG2_PIC_CODING_TYPE_I: case V4L2_MPEG2_PIC_CODING_TYPE_P: case V4L2_MPEG2_PIC_CODING_TYPE_B: break; default: return -EINVAL; } zero_reserved(*p_mpeg2_picture); break; case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: break; case V4L2_CTRL_TYPE_FWHT_PARAMS: p_fwht_params = p; if (p_fwht_params->version < V4L2_FWHT_VERSION) return -EINVAL; if (!p_fwht_params->width || !p_fwht_params->height) return -EINVAL; break; case V4L2_CTRL_TYPE_H264_SPS: p_h264_sps = p; /* Some syntax elements are only conditionally valid */ if (p_h264_sps->pic_order_cnt_type != 0) { p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 = 0; } else if (p_h264_sps->pic_order_cnt_type != 1) { p_h264_sps->num_ref_frames_in_pic_order_cnt_cycle = 0; p_h264_sps->offset_for_non_ref_pic = 0; p_h264_sps->offset_for_top_to_bottom_field = 0; memset(&p_h264_sps->offset_for_ref_frame, 0, sizeof(p_h264_sps->offset_for_ref_frame)); } if (!V4L2_H264_SPS_HAS_CHROMA_FORMAT(p_h264_sps)) { p_h264_sps->chroma_format_idc = 1; p_h264_sps->bit_depth_luma_minus8 = 0; p_h264_sps->bit_depth_chroma_minus8 = 0; p_h264_sps->flags &= ~V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS; if (p_h264_sps->chroma_format_idc < 3) p_h264_sps->flags &= ~V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE; } if (p_h264_sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) p_h264_sps->flags &= ~V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD; /* * Chroma 4:2:2 format require at least High 4:2:2 profile. * * The H264 specification and well-known parser implementations * use profile-idc values directly, as that is clearer and * less ambiguous. We do the same here. */ if (p_h264_sps->profile_idc < 122 && p_h264_sps->chroma_format_idc > 1) return -EINVAL; /* Chroma 4:4:4 format require at least High 4:2:2 profile */ if (p_h264_sps->profile_idc < 244 && p_h264_sps->chroma_format_idc > 2) return -EINVAL; if (p_h264_sps->chroma_format_idc > 3) return -EINVAL; if (p_h264_sps->bit_depth_luma_minus8 > 6) return -EINVAL; if (p_h264_sps->bit_depth_chroma_minus8 > 6) return -EINVAL; if (p_h264_sps->log2_max_frame_num_minus4 > 12) return -EINVAL; if (p_h264_sps->pic_order_cnt_type > 2) return -EINVAL; if (p_h264_sps->log2_max_pic_order_cnt_lsb_minus4 > 12) return -EINVAL; if (p_h264_sps->max_num_ref_frames > V4L2_H264_REF_LIST_LEN) return -EINVAL; break; case V4L2_CTRL_TYPE_H264_PPS: p_h264_pps = p; if (p_h264_pps->num_slice_groups_minus1 > 7) return -EINVAL; if (p_h264_pps->num_ref_idx_l0_default_active_minus1 > (V4L2_H264_REF_LIST_LEN - 1)) return -EINVAL; if (p_h264_pps->num_ref_idx_l1_default_active_minus1 > (V4L2_H264_REF_LIST_LEN - 1)) return -EINVAL; if (p_h264_pps->weighted_bipred_idc > 2) return -EINVAL; /* * pic_init_qp_minus26 shall be in the range of * -(26 + QpBdOffset_y) to +25, inclusive, * where QpBdOffset_y is 6 * bit_depth_luma_minus8 */ if (p_h264_pps->pic_init_qp_minus26 < -62 || p_h264_pps->pic_init_qp_minus26 > 25) return -EINVAL; if (p_h264_pps->pic_init_qs_minus26 < -26 || p_h264_pps->pic_init_qs_minus26 > 25) return -EINVAL; if (p_h264_pps->chroma_qp_index_offset < -12 || p_h264_pps->chroma_qp_index_offset > 12) return -EINVAL; if (p_h264_pps->second_chroma_qp_index_offset < -12 || p_h264_pps->second_chroma_qp_index_offset > 12) return -EINVAL; break; case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: break; case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: p_h264_pred_weights = p; if (p_h264_pred_weights->luma_log2_weight_denom > 7) return -EINVAL; if (p_h264_pred_weights->chroma_log2_weight_denom > 7) return -EINVAL; break; case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: p_h264_slice_params = p; if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) p_h264_slice_params->flags &= ~V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED; if (p_h264_slice_params->colour_plane_id > 2) return -EINVAL; if (p_h264_slice_params->cabac_init_idc > 2) return -EINVAL; if (p_h264_slice_params->disable_deblocking_filter_idc > 2) return -EINVAL; if (p_h264_slice_params->slice_alpha_c0_offset_div2 < -6 || p_h264_slice_params->slice_alpha_c0_offset_div2 > 6) return -EINVAL; if (p_h264_slice_params->slice_beta_offset_div2 < -6 || p_h264_slice_params->slice_beta_offset_div2 > 6) return -EINVAL; if (p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_I || p_h264_slice_params->slice_type == V4L2_H264_SLICE_TYPE_SI) p_h264_slice_params->num_ref_idx_l0_active_minus1 = 0; if (p_h264_slice_params->slice_type != V4L2_H264_SLICE_TYPE_B) p_h264_slice_params->num_ref_idx_l1_active_minus1 = 0; if (p_h264_slice_params->num_ref_idx_l0_active_minus1 > (V4L2_H264_REF_LIST_LEN - 1)) return -EINVAL; if (p_h264_slice_params->num_ref_idx_l1_active_minus1 > (V4L2_H264_REF_LIST_LEN - 1)) return -EINVAL; zero_reserved(*p_h264_slice_params); break; case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: p_h264_dec_params = p; if (p_h264_dec_params->nal_ref_idc > 3) return -EINVAL; for (i = 0; i < V4L2_H264_NUM_DPB_ENTRIES; i++) { struct v4l2_h264_dpb_entry *dpb_entry = &p_h264_dec_params->dpb[i]; zero_reserved(*dpb_entry); } zero_reserved(*p_h264_dec_params); break; case V4L2_CTRL_TYPE_VP8_FRAME: p_vp8_frame = p; switch (p_vp8_frame->num_dct_parts) { case 1: case 2: case 4: case 8: break; default: return -EINVAL; } zero_padding(p_vp8_frame->segment); zero_padding(p_vp8_frame->lf); zero_padding(p_vp8_frame->quant); zero_padding(p_vp8_frame->entropy); zero_padding(p_vp8_frame->coder_state); break; case V4L2_CTRL_TYPE_HEVC_SPS: p_hevc_sps = p; if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED)) { p_hevc_sps->pcm_sample_bit_depth_luma_minus1 = 0; p_hevc_sps->pcm_sample_bit_depth_chroma_minus1 = 0; p_hevc_sps->log2_min_pcm_luma_coding_block_size_minus3 = 0; p_hevc_sps->log2_diff_max_min_pcm_luma_coding_block_size = 0; } if (!(p_hevc_sps->flags & V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT)) p_hevc_sps->num_long_term_ref_pics_sps = 0; break; case V4L2_CTRL_TYPE_HEVC_PPS: p_hevc_pps = p; if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED)) p_hevc_pps->diff_cu_qp_delta_depth = 0; if (!(p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED)) { p_hevc_pps->num_tile_columns_minus1 = 0; p_hevc_pps->num_tile_rows_minus1 = 0; memset(&p_hevc_pps->column_width_minus1, 0, sizeof(p_hevc_pps->column_width_minus1)); memset(&p_hevc_pps->row_height_minus1, 0, sizeof(p_hevc_pps->row_height_minus1)); p_hevc_pps->flags &= ~V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; } if (p_hevc_pps->flags & V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER) { p_hevc_pps->pps_beta_offset_div2 = 0; p_hevc_pps->pps_tc_offset_div2 = 0; } break; case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: p_hevc_decode_params = p; if (p_hevc_decode_params->num_active_dpb_entries > V4L2_HEVC_DPB_ENTRIES_NUM_MAX) return -EINVAL; break; case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: break; case V4L2_CTRL_TYPE_HDR10_CLL_INFO: break; case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: p_hdr10_mastering = p; for (i = 0; i < 3; ++i) { if (p_hdr10_mastering->display_primaries_x[i] < V4L2_HDR10_MASTERING_PRIMARIES_X_LOW || p_hdr10_mastering->display_primaries_x[i] > V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH || p_hdr10_mastering->display_primaries_y[i] < V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW || p_hdr10_mastering->display_primaries_y[i] > V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH) return -EINVAL; } if (p_hdr10_mastering->white_point_x < V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW || p_hdr10_mastering->white_point_x > V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH || p_hdr10_mastering->white_point_y < V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW || p_hdr10_mastering->white_point_y > V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH) return -EINVAL; if (p_hdr10_mastering->max_display_mastering_luminance < V4L2_HDR10_MASTERING_MAX_LUMA_LOW || p_hdr10_mastering->max_display_mastering_luminance > V4L2_HDR10_MASTERING_MAX_LUMA_HIGH || p_hdr10_mastering->min_display_mastering_luminance < V4L2_HDR10_MASTERING_MIN_LUMA_LOW || p_hdr10_mastering->min_display_mastering_luminance > V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) return -EINVAL; /* The following restriction comes from ITU-T Rec. H.265 spec */ if (p_hdr10_mastering->max_display_mastering_luminance == V4L2_HDR10_MASTERING_MAX_LUMA_LOW && p_hdr10_mastering->min_display_mastering_luminance == V4L2_HDR10_MASTERING_MIN_LUMA_HIGH) return -EINVAL; break; case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: break; case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: return validate_vp9_compressed_hdr(p); case V4L2_CTRL_TYPE_VP9_FRAME: return validate_vp9_frame(p); case V4L2_CTRL_TYPE_AV1_FRAME: return validate_av1_frame(p); case V4L2_CTRL_TYPE_AV1_SEQUENCE: return validate_av1_sequence(p); case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: break; case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: return validate_av1_film_grain(p); case V4L2_CTRL_TYPE_AREA: area = p; if (!area->width || !area->height) return -EINVAL; break; default: return -EINVAL; } return 0; } static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx, union v4l2_ctrl_ptr ptr) { size_t len; u64 offset; s64 val; switch ((u32)ctrl->type) { case V4L2_CTRL_TYPE_INTEGER: return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl); case V4L2_CTRL_TYPE_INTEGER64: /* * We can't use the ROUND_TO_RANGE define here due to * the u64 divide that needs special care. */ val = ptr.p_s64[idx]; if (ctrl->maximum >= 0 && val >= ctrl->maximum - (s64)(ctrl->step / 2)) val = ctrl->maximum; else val += (s64)(ctrl->step / 2); val = clamp_t(s64, val, ctrl->minimum, ctrl->maximum); offset = val - ctrl->minimum; do_div(offset, ctrl->step); ptr.p_s64[idx] = ctrl->minimum + offset * ctrl->step; return 0; case V4L2_CTRL_TYPE_U8: return ROUND_TO_RANGE(ptr.p_u8[idx], u8, ctrl); case V4L2_CTRL_TYPE_U16: return ROUND_TO_RANGE(ptr.p_u16[idx], u16, ctrl); case V4L2_CTRL_TYPE_U32: return ROUND_TO_RANGE(ptr.p_u32[idx], u32, ctrl); case V4L2_CTRL_TYPE_BOOLEAN: ptr.p_s32[idx] = !!ptr.p_s32[idx]; return 0; case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_INTEGER_MENU: if (ptr.p_s32[idx] < ctrl->minimum || ptr.p_s32[idx] > ctrl->maximum) return -ERANGE; if (ptr.p_s32[idx] < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(ptr.p_s32[idx]))) return -EINVAL; if (ctrl->type == V4L2_CTRL_TYPE_MENU && ctrl->qmenu[ptr.p_s32[idx]][0] == '\0') return -EINVAL; return 0; case V4L2_CTRL_TYPE_BITMASK: ptr.p_s32[idx] &= ctrl->maximum; return 0; case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: ptr.p_s32[idx] = 0; return 0; case V4L2_CTRL_TYPE_STRING: idx *= ctrl->elem_size; len = strlen(ptr.p_char + idx); if (len < ctrl->minimum) return -ERANGE; if ((len - (u32)ctrl->minimum) % (u32)ctrl->step) return -ERANGE; return 0; default: return std_validate_compound(ctrl, idx, ptr); } } int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr) { unsigned int i; int ret = 0; switch ((u32)ctrl->type) { case V4L2_CTRL_TYPE_U8: if (ctrl->maximum == 0xff && ctrl->minimum == 0 && ctrl->step == 1) return 0; break; case V4L2_CTRL_TYPE_U16: if (ctrl->maximum == 0xffff && ctrl->minimum == 0 && ctrl->step == 1) return 0; break; case V4L2_CTRL_TYPE_U32: if (ctrl->maximum == 0xffffffff && ctrl->minimum == 0 && ctrl->step == 1) return 0; break; case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32)); return 0; } for (i = 0; !ret && i < ctrl->new_elems; i++) ret = std_validate_elem(ctrl, i, ptr); return ret; } EXPORT_SYMBOL(v4l2_ctrl_type_op_validate); static const struct v4l2_ctrl_type_ops std_type_ops = { .equal = v4l2_ctrl_type_op_equal, .init = v4l2_ctrl_type_op_init, .log = v4l2_ctrl_type_op_log, .validate = v4l2_ctrl_type_op_validate, }; void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv) { if (!ctrl) return; if (!notify) { ctrl->call_notify = 0; return; } if (WARN_ON(ctrl->handler->notify && ctrl->handler->notify != notify)) return; ctrl->handler->notify = notify; ctrl->handler->notify_priv = priv; ctrl->call_notify = 1; } EXPORT_SYMBOL(v4l2_ctrl_notify); /* Copy the one value to another. */ static void ptr_to_ptr(struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to, unsigned int elems) { if (ctrl == NULL) return; memcpy(to.p, from.p_const, elems * ctrl->elem_size); } /* Copy the new value to the current value. */ void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) { bool changed; if (ctrl == NULL) return; /* has_changed is set by cluster_changed */ changed = ctrl->has_changed; if (changed) { if (ctrl->is_dyn_array) ctrl->elems = ctrl->new_elems; ptr_to_ptr(ctrl, ctrl->p_new, ctrl->p_cur, ctrl->elems); } if (ch_flags & V4L2_EVENT_CTRL_CH_FLAGS) { /* Note: CH_FLAGS is only set for auto clusters. */ ctrl->flags &= ~(V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_VOLATILE); if (!is_cur_manual(ctrl->cluster[0])) { ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; if (ctrl->cluster[0]->has_volatiles) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; } fh = NULL; } if (changed || ch_flags) { /* If a control was changed that was not one of the controls modified by the application, then send the event to all. */ if (!ctrl->is_new) fh = NULL; send_event(fh, ctrl, (changed ? V4L2_EVENT_CTRL_CH_VALUE : 0) | ch_flags); if (ctrl->call_notify && changed && ctrl->handler->notify) ctrl->handler->notify(ctrl, ctrl->handler->notify_priv); } } /* Copy the current value to the new value */ void cur_to_new(struct v4l2_ctrl *ctrl) { if (ctrl == NULL) return; if (ctrl->is_dyn_array) ctrl->new_elems = ctrl->elems; ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); } static bool req_alloc_array(struct v4l2_ctrl_ref *ref, u32 elems) { void *tmp; if (elems == ref->p_req_array_alloc_elems) return true; if (ref->ctrl->is_dyn_array && elems < ref->p_req_array_alloc_elems) return true; tmp = kvmalloc(elems * ref->ctrl->elem_size, GFP_KERNEL); if (!tmp) { ref->p_req_array_enomem = true; return false; } ref->p_req_array_enomem = false; kvfree(ref->p_req.p); ref->p_req.p = tmp; ref->p_req_array_alloc_elems = elems; return true; } /* Copy the new value to the request value */ void new_to_req(struct v4l2_ctrl_ref *ref) { struct v4l2_ctrl *ctrl; if (!ref) return; ctrl = ref->ctrl; if (ctrl->is_array && !req_alloc_array(ref, ctrl->new_elems)) return; ref->p_req_elems = ctrl->new_elems; ptr_to_ptr(ctrl, ctrl->p_new, ref->p_req, ref->p_req_elems); ref->p_req_valid = true; } /* Copy the current value to the request value */ void cur_to_req(struct v4l2_ctrl_ref *ref) { struct v4l2_ctrl *ctrl; if (!ref) return; ctrl = ref->ctrl; if (ctrl->is_array && !req_alloc_array(ref, ctrl->elems)) return; ref->p_req_elems = ctrl->elems; ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req, ctrl->elems); ref->p_req_valid = true; } /* Copy the request value to the new value */ int req_to_new(struct v4l2_ctrl_ref *ref) { struct v4l2_ctrl *ctrl; if (!ref) return 0; ctrl = ref->ctrl; /* * This control was never set in the request, so just use the current * value. */ if (!ref->p_req_valid) { if (ctrl->is_dyn_array) ctrl->new_elems = ctrl->elems; ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new, ctrl->new_elems); return 0; } /* Not an array, so just copy the request value */ if (!ctrl->is_array) { ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); return 0; } /* Sanity check, should never happen */ if (WARN_ON(!ref->p_req_array_alloc_elems)) return -ENOMEM; if (!ctrl->is_dyn_array && ref->p_req_elems != ctrl->p_array_alloc_elems) return -ENOMEM; /* * Check if the number of elements in the request is more than the * elements in ctrl->p_array. If so, attempt to realloc ctrl->p_array. * Note that p_array is allocated with twice the number of elements * in the dynamic array since it has to store both the current and * new value of such a control. */ if (ref->p_req_elems > ctrl->p_array_alloc_elems) { unsigned int sz = ref->p_req_elems * ctrl->elem_size; void *old = ctrl->p_array; void *tmp = kvzalloc(2 * sz, GFP_KERNEL); if (!tmp) return -ENOMEM; memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); memcpy(tmp + sz, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); ctrl->p_new.p = tmp; ctrl->p_cur.p = tmp + sz; ctrl->p_array = tmp; ctrl->p_array_alloc_elems = ref->p_req_elems; kvfree(old); } ctrl->new_elems = ref->p_req_elems; ptr_to_ptr(ctrl, ref->p_req, ctrl->p_new, ctrl->new_elems); return 0; } /* Control range checking */ int check_range(enum v4l2_ctrl_type type, s64 min, s64 max, u64 step, s64 def) { switch (type) { case V4L2_CTRL_TYPE_BOOLEAN: if (step != 1 || max > 1 || min < 0) return -ERANGE; fallthrough; case V4L2_CTRL_TYPE_U8: case V4L2_CTRL_TYPE_U16: case V4L2_CTRL_TYPE_U32: case V4L2_CTRL_TYPE_INTEGER: case V4L2_CTRL_TYPE_INTEGER64: if (step == 0 || min > max || def < min || def > max) return -ERANGE; return 0; case V4L2_CTRL_TYPE_BITMASK: if (step || min || !max || (def & ~max)) return -ERANGE; return 0; case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_INTEGER_MENU: if (min > max || def < min || def > max || min < 0 || (step && max >= BITS_PER_LONG_LONG)) return -ERANGE; /* Note: step == menu_skip_mask for menu controls. So here we check if the default value is masked out. */ if (def < BITS_PER_LONG_LONG && (step & BIT_ULL(def))) return -EINVAL; return 0; case V4L2_CTRL_TYPE_STRING: if (min > max || min < 0 || step < 1 || def) return -ERANGE; return 0; default: return 0; } } /* Set the handler's error code if it wasn't set earlier already */ static inline int handler_set_err(struct v4l2_ctrl_handler *hdl, int err) { if (hdl->error == 0) hdl->error = err; return err; } /* Initialize the handler */ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, unsigned nr_of_controls_hint, struct lock_class_key *key, const char *name) { mutex_init(&hdl->_lock); hdl->lock = &hdl->_lock; lockdep_set_class_and_name(hdl->lock, key, name); INIT_LIST_HEAD(&hdl->ctrls); INIT_LIST_HEAD(&hdl->ctrl_refs); hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8; hdl->buckets = kvcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]), GFP_KERNEL); hdl->error = hdl->buckets ? 0 : -ENOMEM; v4l2_ctrl_handler_init_request(hdl); return hdl->error; } EXPORT_SYMBOL(v4l2_ctrl_handler_init_class); /* Free all controls and control refs */ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl) { struct v4l2_ctrl_ref *ref, *next_ref; struct v4l2_ctrl *ctrl, *next_ctrl; struct v4l2_subscribed_event *sev, *next_sev; if (hdl == NULL || hdl->buckets == NULL) return; v4l2_ctrl_handler_free_request(hdl); mutex_lock(hdl->lock); /* Free all nodes */ list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) { list_del(&ref->node); if (ref->p_req_array_alloc_elems) kvfree(ref->p_req.p); kfree(ref); } /* Free all controls owned by the handler */ list_for_each_entry_safe(ctrl, next_ctrl, &hdl->ctrls, node) { list_del(&ctrl->node); list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node) list_del(&sev->node); kvfree(ctrl->p_array); kvfree(ctrl); } kvfree(hdl->buckets); hdl->buckets = NULL; hdl->cached = NULL; hdl->error = 0; mutex_unlock(hdl->lock); mutex_destroy(&hdl->_lock); } EXPORT_SYMBOL(v4l2_ctrl_handler_free); /* For backwards compatibility: V4L2_CID_PRIVATE_BASE should no longer be used except in G_CTRL, S_CTRL, QUERYCTRL and QUERYMENU when dealing with applications that do not use the NEXT_CTRL flag. We just find the n-th private user control. It's O(N), but that should not be an issue in this particular case. */ static struct v4l2_ctrl_ref *find_private_ref( struct v4l2_ctrl_handler *hdl, u32 id) { struct v4l2_ctrl_ref *ref; id -= V4L2_CID_PRIVATE_BASE; list_for_each_entry(ref, &hdl->ctrl_refs, node) { /* Search for private user controls that are compatible with VIDIOC_G/S_CTRL. */ if (V4L2_CTRL_ID2WHICH(ref->ctrl->id) == V4L2_CTRL_CLASS_USER && V4L2_CTRL_DRIVER_PRIV(ref->ctrl->id)) { if (!ref->ctrl->is_int) continue; if (id == 0) return ref; id--; } } return NULL; } /* Find a control with the given ID. */ struct v4l2_ctrl_ref *find_ref(struct v4l2_ctrl_handler *hdl, u32 id) { struct v4l2_ctrl_ref *ref; int bucket; id &= V4L2_CTRL_ID_MASK; /* Old-style private controls need special handling */ if (id >= V4L2_CID_PRIVATE_BASE) return find_private_ref(hdl, id); bucket = id % hdl->nr_of_buckets; /* Simple optimization: cache the last control found */ if (hdl->cached && hdl->cached->ctrl->id == id) return hdl->cached; /* Not in cache, search the hash */ ref = hdl->buckets ? hdl->buckets[bucket] : NULL; while (ref && ref->ctrl->id != id) ref = ref->next; if (ref) hdl->cached = ref; /* cache it! */ return ref; } /* Find a control with the given ID. Take the handler's lock first. */ struct v4l2_ctrl_ref *find_ref_lock(struct v4l2_ctrl_handler *hdl, u32 id) { struct v4l2_ctrl_ref *ref = NULL; if (hdl) { mutex_lock(hdl->lock); ref = find_ref(hdl, id); mutex_unlock(hdl->lock); } return ref; } /* Find a control with the given ID. */ struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id) { struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id); return ref ? ref->ctrl : NULL; } EXPORT_SYMBOL(v4l2_ctrl_find); /* Allocate a new v4l2_ctrl_ref and hook it into the handler. */ int handler_new_ref(struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl *ctrl, struct v4l2_ctrl_ref **ctrl_ref, bool from_other_dev, bool allocate_req) { struct v4l2_ctrl_ref *ref; struct v4l2_ctrl_ref *new_ref; u32 id = ctrl->id; u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1; int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ unsigned int size_extra_req = 0; if (ctrl_ref) *ctrl_ref = NULL; /* * Automatically add the control class if it is not yet present and * the new control is not a compound control. */ if (ctrl->type < V4L2_CTRL_COMPOUND_TYPES && id != class_ctrl && find_ref_lock(hdl, class_ctrl) == NULL) if (!v4l2_ctrl_new_std(hdl, NULL, class_ctrl, 0, 0, 0, 0)) return hdl->error; if (hdl->error) return hdl->error; if (allocate_req && !ctrl->is_array) size_extra_req = ctrl->elems * ctrl->elem_size; new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL); if (!new_ref) return handler_set_err(hdl, -ENOMEM); new_ref->ctrl = ctrl; new_ref->from_other_dev = from_other_dev; if (size_extra_req) new_ref->p_req.p = &new_ref[1]; INIT_LIST_HEAD(&new_ref->node); mutex_lock(hdl->lock); /* Add immediately at the end of the list if the list is empty, or if the last element in the list has a lower ID. This ensures that when elements are added in ascending order the insertion is an O(1) operation. */ if (list_empty(&hdl->ctrl_refs) || id > node2id(hdl->ctrl_refs.prev)) { list_add_tail(&new_ref->node, &hdl->ctrl_refs); goto insert_in_hash; } /* Find insert position in sorted list */ list_for_each_entry(ref, &hdl->ctrl_refs, node) { if (ref->ctrl->id < id) continue; /* Don't add duplicates */ if (ref->ctrl->id == id) { kfree(new_ref); goto unlock; } list_add(&new_ref->node, ref->node.prev); break; } insert_in_hash: /* Insert the control node in the hash */ new_ref->next = hdl->buckets[bucket]; hdl->buckets[bucket] = new_ref; if (ctrl_ref) *ctrl_ref = new_ref; if (ctrl->handler == hdl) { /* By default each control starts in a cluster of its own. * new_ref->ctrl is basically a cluster array with one * element, so that's perfect to use as the cluster pointer. * But only do this for the handler that owns the control. */ ctrl->cluster = &new_ref->ctrl; ctrl->ncontrols = 1; } unlock: mutex_unlock(hdl->lock); return 0; } /* Add a new control */ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, const struct v4l2_ctrl_type_ops *type_ops, u32 id, const char *name, enum v4l2_ctrl_type type, s64 min, s64 max, u64 step, s64 def, const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size, u32 flags, const char * const *qmenu, const s64 *qmenu_int, const union v4l2_ctrl_ptr p_def, void *priv) { struct v4l2_ctrl *ctrl; unsigned sz_extra; unsigned nr_of_dims = 0; unsigned elems = 1; bool is_array; unsigned tot_ctrl_size; void *data; int err; if (hdl->error) return NULL; while (dims && dims[nr_of_dims]) { elems *= dims[nr_of_dims]; nr_of_dims++; if (nr_of_dims == V4L2_CTRL_MAX_DIMS) break; } is_array = nr_of_dims > 0; /* Prefill elem_size for all types handled by std_type_ops */ switch ((u32)type) { case V4L2_CTRL_TYPE_INTEGER64: elem_size = sizeof(s64); break; case V4L2_CTRL_TYPE_STRING: elem_size = max + 1; break; case V4L2_CTRL_TYPE_U8: elem_size = sizeof(u8); break; case V4L2_CTRL_TYPE_U16: elem_size = sizeof(u16); break; case V4L2_CTRL_TYPE_U32: elem_size = sizeof(u32); break; case V4L2_CTRL_TYPE_MPEG2_SEQUENCE: elem_size = sizeof(struct v4l2_ctrl_mpeg2_sequence); break; case V4L2_CTRL_TYPE_MPEG2_PICTURE: elem_size = sizeof(struct v4l2_ctrl_mpeg2_picture); break; case V4L2_CTRL_TYPE_MPEG2_QUANTISATION: elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantisation); break; case V4L2_CTRL_TYPE_FWHT_PARAMS: elem_size = sizeof(struct v4l2_ctrl_fwht_params); break; case V4L2_CTRL_TYPE_H264_SPS: elem_size = sizeof(struct v4l2_ctrl_h264_sps); break; case V4L2_CTRL_TYPE_H264_PPS: elem_size = sizeof(struct v4l2_ctrl_h264_pps); break; case V4L2_CTRL_TYPE_H264_SCALING_MATRIX: elem_size = sizeof(struct v4l2_ctrl_h264_scaling_matrix); break; case V4L2_CTRL_TYPE_H264_SLICE_PARAMS: elem_size = sizeof(struct v4l2_ctrl_h264_slice_params); break; case V4L2_CTRL_TYPE_H264_DECODE_PARAMS: elem_size = sizeof(struct v4l2_ctrl_h264_decode_params); break; case V4L2_CTRL_TYPE_H264_PRED_WEIGHTS: elem_size = sizeof(struct v4l2_ctrl_h264_pred_weights); break; case V4L2_CTRL_TYPE_VP8_FRAME: elem_size = sizeof(struct v4l2_ctrl_vp8_frame); break; case V4L2_CTRL_TYPE_HEVC_SPS: elem_size = sizeof(struct v4l2_ctrl_hevc_sps); break; case V4L2_CTRL_TYPE_HEVC_PPS: elem_size = sizeof(struct v4l2_ctrl_hevc_pps); break; case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS: elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params); break; case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX: elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix); break; case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS: elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params); break; case V4L2_CTRL_TYPE_HDR10_CLL_INFO: elem_size = sizeof(struct v4l2_ctrl_hdr10_cll_info); break; case V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY: elem_size = sizeof(struct v4l2_ctrl_hdr10_mastering_display); break; case V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR: elem_size = sizeof(struct v4l2_ctrl_vp9_compressed_hdr); break; case V4L2_CTRL_TYPE_VP9_FRAME: elem_size = sizeof(struct v4l2_ctrl_vp9_frame); break; case V4L2_CTRL_TYPE_AV1_SEQUENCE: elem_size = sizeof(struct v4l2_ctrl_av1_sequence); break; case V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY: elem_size = sizeof(struct v4l2_ctrl_av1_tile_group_entry); break; case V4L2_CTRL_TYPE_AV1_FRAME: elem_size = sizeof(struct v4l2_ctrl_av1_frame); break; case V4L2_CTRL_TYPE_AV1_FILM_GRAIN: elem_size = sizeof(struct v4l2_ctrl_av1_film_grain); break; case V4L2_CTRL_TYPE_AREA: elem_size = sizeof(struct v4l2_area); break; default: if (type < V4L2_CTRL_COMPOUND_TYPES) elem_size = sizeof(s32); break; } /* Sanity checks */ if (id == 0 || name == NULL || !elem_size || id >= V4L2_CID_PRIVATE_BASE || (type == V4L2_CTRL_TYPE_MENU && qmenu == NULL) || (type == V4L2_CTRL_TYPE_INTEGER_MENU && qmenu_int == NULL)) { handler_set_err(hdl, -ERANGE); return NULL; } err = check_range(type, min, max, step, def); if (err) { handler_set_err(hdl, err); return NULL; } if (is_array && (type == V4L2_CTRL_TYPE_BUTTON || type == V4L2_CTRL_TYPE_CTRL_CLASS)) { handler_set_err(hdl, -EINVAL); return NULL; } if (flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY) { /* * For now only support this for one-dimensional arrays only. * * This can be relaxed in the future, but this will * require more effort. */ if (nr_of_dims != 1) { handler_set_err(hdl, -EINVAL); return NULL; } /* Start with just 1 element */ elems = 1; } tot_ctrl_size = elem_size * elems; sz_extra = 0; if (type == V4L2_CTRL_TYPE_BUTTON) flags |= V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; else if (type == V4L2_CTRL_TYPE_CTRL_CLASS) flags |= V4L2_CTRL_FLAG_READ_ONLY; else if (!is_array && (type == V4L2_CTRL_TYPE_INTEGER64 || type == V4L2_CTRL_TYPE_STRING || type >= V4L2_CTRL_COMPOUND_TYPES)) sz_extra += 2 * tot_ctrl_size; if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) sz_extra += elem_size; ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL); if (ctrl == NULL) { handler_set_err(hdl, -ENOMEM); return NULL; } INIT_LIST_HEAD(&ctrl->node); INIT_LIST_HEAD(&ctrl->ev_subs); ctrl->handler = hdl; ctrl->ops = ops; ctrl->type_ops = type_ops ? type_ops : &std_type_ops; ctrl->id = id; ctrl->name = name; ctrl->type = type; ctrl->flags = flags; ctrl->minimum = min; ctrl->maximum = max; ctrl->step = step; ctrl->default_value = def; ctrl->is_string = !is_array && type == V4L2_CTRL_TYPE_STRING; ctrl->is_ptr = is_array || type >= V4L2_CTRL_COMPOUND_TYPES || ctrl->is_string; ctrl->is_int = !ctrl->is_ptr && type != V4L2_CTRL_TYPE_INTEGER64; ctrl->is_array = is_array; ctrl->is_dyn_array = !!(flags & V4L2_CTRL_FLAG_DYNAMIC_ARRAY); ctrl->elems = elems; ctrl->new_elems = elems; ctrl->nr_of_dims = nr_of_dims; if (nr_of_dims) memcpy(ctrl->dims, dims, nr_of_dims * sizeof(dims[0])); ctrl->elem_size = elem_size; if (type == V4L2_CTRL_TYPE_MENU) ctrl->qmenu = qmenu; else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) ctrl->qmenu_int = qmenu_int; ctrl->priv = priv; ctrl->cur.val = ctrl->val = def; data = &ctrl[1]; if (ctrl->is_array) { ctrl->p_array_alloc_elems = elems; ctrl->p_array = kvzalloc(2 * elems * elem_size, GFP_KERNEL); if (!ctrl->p_array) { kvfree(ctrl); return NULL; } data = ctrl->p_array; } if (!ctrl->is_int) { ctrl->p_new.p = data; ctrl->p_cur.p = data + tot_ctrl_size; } else { ctrl->p_new.p = &ctrl->val; ctrl->p_cur.p = &ctrl->cur.val; } if (type >= V4L2_CTRL_COMPOUND_TYPES && p_def.p_const) { if (ctrl->is_array) ctrl->p_def.p = &ctrl[1]; else ctrl->p_def.p = ctrl->p_cur.p + tot_ctrl_size; memcpy(ctrl->p_def.p, p_def.p_const, elem_size); } ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); if (handler_new_ref(hdl, ctrl, NULL, false, false)) { kvfree(ctrl->p_array); kvfree(ctrl); return NULL; } mutex_lock(hdl->lock); list_add_tail(&ctrl->node, &hdl->ctrls); mutex_unlock(hdl->lock); return ctrl; } struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_config *cfg, void *priv) { bool is_menu; struct v4l2_ctrl *ctrl; const char *name = cfg->name; const char * const *qmenu = cfg->qmenu; const s64 *qmenu_int = cfg->qmenu_int; enum v4l2_ctrl_type type = cfg->type; u32 flags = cfg->flags; s64 min = cfg->min; s64 max = cfg->max; u64 step = cfg->step; s64 def = cfg->def; if (name == NULL) v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step, &def, &flags); is_menu = (type == V4L2_CTRL_TYPE_MENU || type == V4L2_CTRL_TYPE_INTEGER_MENU); if (is_menu) WARN_ON(step); else WARN_ON(cfg->menu_skip_mask); if (type == V4L2_CTRL_TYPE_MENU && !qmenu) { qmenu = v4l2_ctrl_get_menu(cfg->id); } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) { handler_set_err(hdl, -EINVAL); return NULL; } ctrl = v4l2_ctrl_new(hdl, cfg->ops, cfg->type_ops, cfg->id, name, type, min, max, is_menu ? cfg->menu_skip_mask : step, def, cfg->dims, cfg->elem_size, flags, qmenu, qmenu_int, cfg->p_def, priv); if (ctrl) ctrl->is_private = cfg->is_private; return ctrl; } EXPORT_SYMBOL(v4l2_ctrl_new_custom); /* Helper function for standard non-menu controls */ struct v4l2_ctrl *v4l2_ctrl_new_std(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, s64 min, s64 max, u64 step, s64 def) { const char *name; enum v4l2_ctrl_type type; u32 flags; v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); if (type == V4L2_CTRL_TYPE_MENU || type == V4L2_CTRL_TYPE_INTEGER_MENU || type >= V4L2_CTRL_COMPOUND_TYPES) { handler_set_err(hdl, -EINVAL); return NULL; } return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, min, max, step, def, NULL, 0, flags, NULL, NULL, ptr_null, NULL); } EXPORT_SYMBOL(v4l2_ctrl_new_std); /* Helper function for standard menu controls */ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, u8 _max, u64 mask, u8 _def) { const char * const *qmenu = NULL; const s64 *qmenu_int = NULL; unsigned int qmenu_int_len = 0; const char *name; enum v4l2_ctrl_type type; s64 min; s64 max = _max; s64 def = _def; u64 step; u32 flags; v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); if (type == V4L2_CTRL_TYPE_MENU) qmenu = v4l2_ctrl_get_menu(id); else if (type == V4L2_CTRL_TYPE_INTEGER_MENU) qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len); if ((!qmenu && !qmenu_int) || (qmenu_int && max >= qmenu_int_len)) { handler_set_err(hdl, -EINVAL); return NULL; } return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, 0, max, mask, def, NULL, 0, flags, qmenu, qmenu_int, ptr_null, NULL); } EXPORT_SYMBOL(v4l2_ctrl_new_std_menu); /* Helper function for standard menu controls with driver defined menu */ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, u8 _max, u64 mask, u8 _def, const char * const *qmenu) { enum v4l2_ctrl_type type; const char *name; u32 flags; u64 step; s64 min; s64 max = _max; s64 def = _def; /* v4l2_ctrl_new_std_menu_items() should only be called for * standard controls without a standard menu. */ if (v4l2_ctrl_get_menu(id)) { handler_set_err(hdl, -EINVAL); return NULL; } v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); if (type != V4L2_CTRL_TYPE_MENU || qmenu == NULL) { handler_set_err(hdl, -EINVAL); return NULL; } return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, 0, max, mask, def, NULL, 0, flags, qmenu, NULL, ptr_null, NULL); } EXPORT_SYMBOL(v4l2_ctrl_new_std_menu_items); /* Helper function for standard compound controls */ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, const union v4l2_ctrl_ptr p_def) { const char *name; enum v4l2_ctrl_type type; u32 flags; s64 min, max, step, def; v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); if (type < V4L2_CTRL_COMPOUND_TYPES) { handler_set_err(hdl, -EINVAL); return NULL; } return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, min, max, step, def, NULL, 0, flags, NULL, NULL, p_def, NULL); } EXPORT_SYMBOL(v4l2_ctrl_new_std_compound); /* Helper function for standard integer menu controls */ struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ops, u32 id, u8 _max, u8 _def, const s64 *qmenu_int) { const char *name; enum v4l2_ctrl_type type; s64 min; u64 step; s64 max = _max; s64 def = _def; u32 flags; v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags); if (type != V4L2_CTRL_TYPE_INTEGER_MENU) { handler_set_err(hdl, -EINVAL); return NULL; } return v4l2_ctrl_new(hdl, ops, NULL, id, name, type, 0, max, 0, def, NULL, 0, flags, NULL, qmenu_int, ptr_null, NULL); } EXPORT_SYMBOL(v4l2_ctrl_new_int_menu); /* Add the controls from another handler to our own. */ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl, struct v4l2_ctrl_handler *add, bool (*filter)(const struct v4l2_ctrl *ctrl), bool from_other_dev) { struct v4l2_ctrl_ref *ref; int ret = 0; /* Do nothing if either handler is NULL or if they are the same */ if (!hdl || !add || hdl == add) return 0; if (hdl->error) return hdl->error; mutex_lock(add->lock); list_for_each_entry(ref, &add->ctrl_refs, node) { struct v4l2_ctrl *ctrl = ref->ctrl; /* Skip handler-private controls. */ if (ctrl->is_private) continue; /* And control classes */ if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) continue; /* Filter any unwanted controls */ if (filter && !filter(ctrl)) continue; ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false); if (ret) break; } mutex_unlock(add->lock); return ret; } EXPORT_SYMBOL(v4l2_ctrl_add_handler); bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl) { if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_TX) return true; if (V4L2_CTRL_ID2WHICH(ctrl->id) == V4L2_CTRL_CLASS_FM_RX) return true; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BALANCE: case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: case V4L2_CID_AUDIO_LOUDNESS: return true; default: break; } return false; } EXPORT_SYMBOL(v4l2_ctrl_radio_filter); /* Cluster controls */ void v4l2_ctrl_cluster(unsigned ncontrols, struct v4l2_ctrl **controls) { bool has_volatiles = false; int i; /* The first control is the master control and it must not be NULL */ if (WARN_ON(ncontrols == 0 || controls[0] == NULL)) return; for (i = 0; i < ncontrols; i++) { if (controls[i]) { controls[i]->cluster = controls; controls[i]->ncontrols = ncontrols; if (controls[i]->flags & V4L2_CTRL_FLAG_VOLATILE) has_volatiles = true; } } controls[0]->has_volatiles = has_volatiles; } EXPORT_SYMBOL(v4l2_ctrl_cluster); void v4l2_ctrl_auto_cluster(unsigned ncontrols, struct v4l2_ctrl **controls, u8 manual_val, bool set_volatile) { struct v4l2_ctrl *master = controls[0]; u32 flag = 0; int i; v4l2_ctrl_cluster(ncontrols, controls); WARN_ON(ncontrols <= 1); WARN_ON(manual_val < master->minimum || manual_val > master->maximum); WARN_ON(set_volatile && !has_op(master, g_volatile_ctrl)); master->is_auto = true; master->has_volatiles = set_volatile; master->manual_mode_value = manual_val; master->flags |= V4L2_CTRL_FLAG_UPDATE; if (!is_cur_manual(master)) flag = V4L2_CTRL_FLAG_INACTIVE | (set_volatile ? V4L2_CTRL_FLAG_VOLATILE : 0); for (i = 1; i < ncontrols; i++) if (controls[i]) controls[i]->flags |= flag; } EXPORT_SYMBOL(v4l2_ctrl_auto_cluster); /* * Obtain the current volatile values of an autocluster and mark them * as new. */ void update_from_auto_cluster(struct v4l2_ctrl *master) { int i; for (i = 1; i < master->ncontrols; i++) cur_to_new(master->cluster[i]); if (!call_op(master, g_volatile_ctrl)) for (i = 1; i < master->ncontrols; i++) if (master->cluster[i]) master->cluster[i]->is_new = 1; } /* * Return non-zero if one or more of the controls in the cluster has a new * value that differs from the current value. */ static int cluster_changed(struct v4l2_ctrl *master) { bool changed = false; int i; for (i = 0; i < master->ncontrols; i++) { struct v4l2_ctrl *ctrl = master->cluster[i]; bool ctrl_changed = false; if (!ctrl) continue; if (ctrl->flags & V4L2_CTRL_FLAG_EXECUTE_ON_WRITE) { changed = true; ctrl_changed = true; } /* * Set has_changed to false to avoid generating * the event V4L2_EVENT_CTRL_CH_VALUE */ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { ctrl->has_changed = false; continue; } if (ctrl->elems != ctrl->new_elems) ctrl_changed = true; if (!ctrl_changed) ctrl_changed = !ctrl->type_ops->equal(ctrl, ctrl->p_cur, ctrl->p_new); ctrl->has_changed = ctrl_changed; changed |= ctrl->has_changed; } return changed; } /* * Core function that calls try/s_ctrl and ensures that the new value is * copied to the current value on a set. * Must be called with ctrl->handler->lock held. */ int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, bool set, u32 ch_flags) { bool update_flag; int ret; int i; /* * Go through the cluster and either validate the new value or * (if no new value was set), copy the current value to the new * value, ensuring a consistent view for the control ops when * called. */ for (i = 0; i < master->ncontrols; i++) { struct v4l2_ctrl *ctrl = master->cluster[i]; if (!ctrl) continue; if (!ctrl->is_new) { cur_to_new(ctrl); continue; } /* * Check again: it may have changed since the * previous check in try_or_set_ext_ctrls(). */ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) return -EBUSY; } ret = call_op(master, try_ctrl); /* Don't set if there is no change */ if (ret || !set || !cluster_changed(master)) return ret; ret = call_op(master, s_ctrl); if (ret) return ret; /* If OK, then make the new values permanent. */ update_flag = is_cur_manual(master) != is_new_manual(master); for (i = 0; i < master->ncontrols; i++) { /* * If we switch from auto to manual mode, and this cluster * contains volatile controls, then all non-master controls * have to be marked as changed. The 'new' value contains * the volatile value (obtained by update_from_auto_cluster), * which now has to become the current value. */ if (i && update_flag && is_new_manual(master) && master->has_volatiles && master->cluster[i]) master->cluster[i]->has_changed = true; new_to_cur(fh, master->cluster[i], ch_flags | ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0)); } return 0; } /* Activate/deactivate a control. */ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active) { /* invert since the actual flag is called 'inactive' */ bool inactive = !active; bool old; if (ctrl == NULL) return; if (inactive) /* set V4L2_CTRL_FLAG_INACTIVE */ old = test_and_set_bit(4, &ctrl->flags); else /* clear V4L2_CTRL_FLAG_INACTIVE */ old = test_and_clear_bit(4, &ctrl->flags); if (old != inactive) send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); } EXPORT_SYMBOL(v4l2_ctrl_activate); void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed) { bool old; if (ctrl == NULL) return; lockdep_assert_held(ctrl->handler->lock); if (grabbed) /* set V4L2_CTRL_FLAG_GRABBED */ old = test_and_set_bit(1, &ctrl->flags); else /* clear V4L2_CTRL_FLAG_GRABBED */ old = test_and_clear_bit(1, &ctrl->flags); if (old != grabbed) send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS); } EXPORT_SYMBOL(__v4l2_ctrl_grab); /* Call s_ctrl for all controls owned by the handler */ int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) { struct v4l2_ctrl *ctrl; int ret = 0; if (hdl == NULL) return 0; lockdep_assert_held(hdl->lock); list_for_each_entry(ctrl, &hdl->ctrls, node) ctrl->done = false; list_for_each_entry(ctrl, &hdl->ctrls, node) { struct v4l2_ctrl *master = ctrl->cluster[0]; int i; /* Skip if this control was already handled by a cluster. */ /* Skip button controls and read-only controls. */ if (ctrl->done || ctrl->type == V4L2_CTRL_TYPE_BUTTON || (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)) continue; for (i = 0; i < master->ncontrols; i++) { if (master->cluster[i]) { cur_to_new(master->cluster[i]); master->cluster[i]->is_new = 1; master->cluster[i]->done = true; } } ret = call_op(master, s_ctrl); if (ret) break; } return ret; } EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup); int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl) { int ret; if (hdl == NULL) return 0; mutex_lock(hdl->lock); ret = __v4l2_ctrl_handler_setup(hdl); mutex_unlock(hdl->lock); return ret; } EXPORT_SYMBOL(v4l2_ctrl_handler_setup); /* Log the control name and value */ static void log_ctrl(const struct v4l2_ctrl *ctrl, const char *prefix, const char *colon) { if (ctrl->flags & (V4L2_CTRL_FLAG_DISABLED | V4L2_CTRL_FLAG_WRITE_ONLY)) return; if (ctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) return; pr_info("%s%s%s: ", prefix, colon, ctrl->name); ctrl->type_ops->log(ctrl); if (ctrl->flags & (V4L2_CTRL_FLAG_INACTIVE | V4L2_CTRL_FLAG_GRABBED | V4L2_CTRL_FLAG_VOLATILE)) { if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) pr_cont(" inactive"); if (ctrl->flags & V4L2_CTRL_FLAG_GRABBED) pr_cont(" grabbed"); if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) pr_cont(" volatile"); } pr_cont("\n"); } /* Log all controls owned by the handler */ void v4l2_ctrl_handler_log_status(struct v4l2_ctrl_handler *hdl, const char *prefix) { struct v4l2_ctrl *ctrl; const char *colon = ""; int len; if (!hdl) return; if (!prefix) prefix = ""; len = strlen(prefix); if (len && prefix[len - 1] != ' ') colon = ": "; mutex_lock(hdl->lock); list_for_each_entry(ctrl, &hdl->ctrls, node) if (!(ctrl->flags & V4L2_CTRL_FLAG_DISABLED)) log_ctrl(ctrl, prefix, colon); mutex_unlock(hdl->lock); } EXPORT_SYMBOL(v4l2_ctrl_handler_log_status); int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, const struct v4l2_ctrl_ops *ctrl_ops, const struct v4l2_fwnode_device_properties *p) { if (hdl->error) return hdl->error; if (p->orientation != V4L2_FWNODE_PROPERTY_UNSET) { u32 orientation_ctrl; switch (p->orientation) { case V4L2_FWNODE_ORIENTATION_FRONT: orientation_ctrl = V4L2_CAMERA_ORIENTATION_FRONT; break; case V4L2_FWNODE_ORIENTATION_BACK: orientation_ctrl = V4L2_CAMERA_ORIENTATION_BACK; break; case V4L2_FWNODE_ORIENTATION_EXTERNAL: orientation_ctrl = V4L2_CAMERA_ORIENTATION_EXTERNAL; break; default: return -EINVAL; } if (!v4l2_ctrl_new_std_menu(hdl, ctrl_ops, V4L2_CID_CAMERA_ORIENTATION, V4L2_CAMERA_ORIENTATION_EXTERNAL, 0, orientation_ctrl)) return hdl->error; } if (p->rotation != V4L2_FWNODE_PROPERTY_UNSET) { if (!v4l2_ctrl_new_std(hdl, ctrl_ops, V4L2_CID_CAMERA_SENSOR_ROTATION, p->rotation, p->rotation, 1, p->rotation)) return hdl->error; } return hdl->error; } EXPORT_SYMBOL(v4l2_ctrl_new_fwnode_properties); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Logitech Flight System G940 * * Copyright (c) 2009 Gary Stein <LordCnidarian@gmail.com> */ /* */ #include <linux/input.h> #include <linux/hid.h> #include "hid-lg.h" /* * G940 Theory of Operation (from experimentation) * * There are 63 fields (only 3 of them currently used) * 0 - seems to be command field * 1 - 30 deal with the x axis * 31 -60 deal with the y axis * * Field 1 is x axis constant force * Field 31 is y axis constant force * * other interesting fields 1,2,3,4 on x axis * (same for 31,32,33,34 on y axis) * * 0 0 127 127 makes the joystick autocenter hard * * 127 0 127 127 makes the joystick loose on the right, * but stops all movemnt left * * -127 0 -127 -127 makes the joystick loose on the left, * but stops all movement right * * 0 0 -127 -127 makes the joystick rattle very hard * * I'm sure these are effects that I don't know enough about them */ static int hid_lg3ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int x, y; /* * Available values in the field should always be 63, but we only use up to * 35. Instead, clear the entire area, however big it is. */ memset(report->field[0]->value, 0, sizeof(__s32) * report->field[0]->report_count); switch (effect->type) { case FF_CONSTANT: /* * Already clamped in ff_memless * 0 is center (different then other logitech) */ x = effect->u.ramp.start_level; y = effect->u.ramp.end_level; /* send command byte */ report->field[0]->value[0] = 0x51; /* * Sign backwards from other Force3d pro * which get recast here in two's complement 8 bits */ report->field[0]->value[1] = (unsigned char)(-x); report->field[0]->value[31] = (unsigned char)(-y); hid_hw_request(hid, report, HID_REQ_SET_REPORT); break; } return 0; } static void hid_lg3ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); /* * Auto Centering probed from device * NOTE: deadman's switch on G940 must be covered * for effects to work */ report->field[0]->value[0] = 0x51; report->field[0]->value[1] = 0x00; report->field[0]->value[2] = 0x00; report->field[0]->value[3] = 0x7F; report->field[0]->value[4] = 0x7F; report->field[0]->value[31] = 0x00; report->field[0]->value[32] = 0x00; report->field[0]->value[33] = 0x7F; report->field[0]->value[34] = 0x7F; hid_hw_request(hid, report, HID_REQ_SET_REPORT); } static const signed short ff3_joystick_ac[] = { FF_CONSTANT, FF_AUTOCENTER, -1 }; int lg3ff_init(struct hid_device *hid) { struct hid_input *hidinput; struct input_dev *dev; const signed short *ff_bits = ff3_joystick_ac; int error; int i; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; /* Check that the report looks ok */ if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) return -ENODEV; /* Assume single fixed device G940 */ for (i = 0; ff_bits[i] >= 0; i++) set_bit(ff_bits[i], dev->ffbit); error = input_ff_create_memless(dev, NULL, hid_lg3ff_play); if (error) return error; if (test_bit(FF_AUTOCENTER, dev->ffbit)) dev->ff->set_autocenter = hid_lg3ff_set_autocenter; hid_info(hid, "Force feedback for Logitech Flight System G940 by Gary Stein <LordCnidarian@gmail.com>\n"); return 0; } |
39 1 1 2 6 29 28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | // SPDX-License-Identifier: GPL-2.0-only /* String matching match for iptables * * (C) 2005 Pablo Neira Ayuso <pablo@eurodev.net> */ #include <linux/gfp.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_string.h> #include <linux/textsearch.h> MODULE_AUTHOR("Pablo Neira Ayuso <pablo@eurodev.net>"); MODULE_DESCRIPTION("Xtables: string-based matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_string"); MODULE_ALIAS("ip6t_string"); MODULE_ALIAS("ebt_string"); static bool string_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_string_info *conf = par->matchinfo; bool invert; invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT; return (skb_find_text((struct sk_buff *)skb, conf->from_offset, conf->to_offset, conf->config) != UINT_MAX) ^ invert; } #define STRING_TEXT_PRIV(m) ((struct xt_string_info *)(m)) static int string_mt_check(const struct xt_mtchk_param *par) { struct xt_string_info *conf = par->matchinfo; struct ts_config *ts_conf; int flags = TS_AUTOLOAD; /* Damn, can't handle this case properly with iptables... */ if (conf->from_offset > conf->to_offset) return -EINVAL; if (conf->algo[XT_STRING_MAX_ALGO_NAME_SIZE - 1] != '\0') return -EINVAL; if (conf->patlen > XT_STRING_MAX_PATTERN_SIZE) return -EINVAL; if (conf->u.v1.flags & ~(XT_STRING_FLAG_IGNORECASE | XT_STRING_FLAG_INVERT)) return -EINVAL; if (conf->u.v1.flags & XT_STRING_FLAG_IGNORECASE) flags |= TS_IGNORECASE; ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen, GFP_KERNEL, flags); if (IS_ERR(ts_conf)) return PTR_ERR(ts_conf); conf->config = ts_conf; return 0; } static void string_mt_destroy(const struct xt_mtdtor_param *par) { textsearch_destroy(STRING_TEXT_PRIV(par->matchinfo)->config); } static struct xt_match xt_string_mt_reg __read_mostly = { .name = "string", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = string_mt_check, .match = string_mt, .destroy = string_mt_destroy, .matchsize = sizeof(struct xt_string_info), .usersize = offsetof(struct xt_string_info, config), .me = THIS_MODULE, }; static int __init string_mt_init(void) { return xt_register_match(&xt_string_mt_reg); } static void __exit string_mt_exit(void) { xt_unregister_match(&xt_string_mt_reg); } module_init(string_mt_init); module_exit(string_mt_exit); |
205 2392 2406 146 146 147 146 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 | /* SPDX-License-Identifier: GPL-2.0 */ /* * include/linux/backing-dev.h * * low-level device information and state which is propagated up through * to high-level code. */ #ifndef _LINUX_BACKING_DEV_H #define _LINUX_BACKING_DEV_H #include <linux/kernel.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/device.h> #include <linux/writeback.h> #include <linux/backing-dev-defs.h> #include <linux/slab.h> static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) { kref_get(&bdi->refcnt); return bdi; } struct backing_dev_info *bdi_get_by_id(u64 id); void bdi_put(struct backing_dev_info *bdi); __printf(2, 3) int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); __printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); struct backing_dev_info *bdi_alloc(int node_id); void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); void wb_wait_for_completion(struct wb_completion *done); extern spinlock_t bdi_lock; extern struct list_head bdi_list; extern struct workqueue_struct *bdi_wq; static inline bool wb_has_dirty_io(struct bdi_writeback *wb) { return test_bit(WB_has_dirty_io, &wb->state); } static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) { /* * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are * any dirty wbs. See wb_update_write_bandwidth(). */ return atomic_long_read(&bdi->tot_write_bandwidth); } static inline void wb_stat_mod(struct bdi_writeback *wb, enum wb_stat_item item, s64 amount) { percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); } static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { wb_stat_mod(wb, item, 1); } static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { wb_stat_mod(wb, item, -1); } static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_read_positive(&wb->stat[item]); } static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) { return percpu_counter_sum_positive(&wb->stat[item]); } extern void wb_writeout_inc(struct bdi_writeback *wb); /* * maximal error of a stat counter. */ static inline unsigned long wb_stat_error(void) { #ifdef CONFIG_SMP return nr_cpu_ids * WB_STAT_BATCH; #else return 1; #endif } /* BDI ratio is expressed as part per 1000000 for finer granularity. */ #define BDI_RATIO_SCALE 10000 u64 bdi_get_min_bytes(struct backing_dev_info *bdi); u64 bdi_get_max_bytes(struct backing_dev_info *bdi); int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio); int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio); int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes); int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes); int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit); /* * Flags in backing_dev_info::capability * * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages * should contribute to accounting * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold */ #define BDI_CAP_WRITEBACK (1 << 0) #define BDI_CAP_WRITEBACK_ACCT (1 << 1) #define BDI_CAP_STRICTLIMIT (1 << 2) extern struct backing_dev_info noop_backing_dev_info; int bdi_init(struct backing_dev_info *bdi); /** * writeback_in_progress - determine whether there is writeback in progress * @wb: bdi_writeback of interest * * Determine whether there is writeback waiting to be handled against a * bdi_writeback. */ static inline bool writeback_in_progress(struct bdi_writeback *wb) { return test_bit(WB_writeback_running, &wb->state); } struct backing_dev_info *inode_to_bdi(struct inode *inode); static inline bool mapping_can_writeback(struct address_space *mapping) { return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; } #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp); void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest * * Cgroup writeback requires support from the filesystem. Also, both memcg and * iocg have to be on the default hierarchy. Test whether all conditions are * met. * * Note that the test result may change dynamically on the same inode * depending on how memcg and iocg are configured. */ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); return cgroup_subsys_on_dfl(memory_cgrp_subsys) && cgroup_subsys_on_dfl(io_cgrp_subsys) && (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } /** * wb_find_current - find wb for %current on a bdi * @bdi: bdi of interest * * Find the wb of @bdi which matches both the memcg and blkcg of %current. * Must be called under rcu_read_lock() which protects the returend wb. * NULL if not found. */ static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { struct cgroup_subsys_state *memcg_css; struct bdi_writeback *wb; memcg_css = task_css(current, memory_cgrp_id); if (!memcg_css->parent) return &bdi->wb; wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); /* * %current's blkcg equals the effective blkcg of its memcg. No * need to use the relatively expensive cgroup_get_e_css(). */ if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) return wb; return NULL; } /** * wb_get_create_current - get or create wb for %current on a bdi * @bdi: bdi of interest * @gfp: allocation mask * * Equivalent to wb_get_create() on %current's memcg. This function is * called from a relatively hot path and optimizes the common cases using * wb_find_current(). */ static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { struct bdi_writeback *wb; rcu_read_lock(); wb = wb_find_current(bdi); if (wb && unlikely(!wb_tryget(wb))) wb = NULL; rcu_read_unlock(); if (unlikely(!wb)) { struct cgroup_subsys_state *memcg_css; memcg_css = task_get_css(current, memory_cgrp_id); wb = wb_get_create(bdi, memcg_css, gfp); css_put(memcg_css); } return wb; } /** * inode_to_wb - determine the wb of an inode * @inode: inode of interest * * Returns the wb @inode is currently associated with. The caller must be * holding either @inode->i_lock, the i_pages lock, or the * associated wb's list_lock. */ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&inode->i_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); #endif return inode->i_wb; } static inline struct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc) { /* * If wbc does not have inode attached, it means cgroup writeback was * disabled when wbc started. Just use the default wb in that case. */ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; } /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This * function determines the wb associated with @inode and ensures that the * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and * can't sleep during the transaction. IRQs may or may not be disabled on * return. */ static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); /* * Paired with store_release in inode_switch_wbs_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; if (unlikely(cookie->locked)) xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages * lock. inode_to_wb() will bark. Deref directly. */ return inode->i_wb; } /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode * @cookie: @cookie from unlocked_inode_to_wb_begin() */ static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { if (unlikely(cookie->locked)) xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) { return false; } static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; } static inline struct bdi_writeback * wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) { return &bdi->wb; } static inline struct bdi_writeback *inode_to_wb(struct inode *inode) { return &inode_to_bdi(inode)->wb; } static inline struct bdi_writeback *inode_to_wb_wbc( struct inode *inode, struct writeback_control *wbc) { return inode_to_wb(inode); } static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } static inline void unlocked_inode_to_wb_end(struct inode *inode, struct wb_lock_cookie *cookie) { } static inline void wb_memcg_offline(struct mem_cgroup *memcg) { } static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 | /* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/segment.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #include <linux/blkdev.h> #include <linux/backing-dev.h> /* constant macro */ #define NULL_SEGNO ((unsigned int)(~0)) #define NULL_SECNO ((unsigned int)(~0)) #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */ #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */ #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */ #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */ #define INVALID_MTIME ULLONG_MAX /* no valid blocks in a segment/section */ /* L: Logical segment # in volume, R: Relative segment # in main area */ #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno) #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno) #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA) #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE && (t) <= CURSEG_COLD_NODE) #define SE_PAGETYPE(se) ((IS_NODESEG((se)->type) ? NODE : DATA)) static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi, unsigned short seg_type) { f2fs_bug_on(sbi, seg_type >= NR_PERSISTENT_LOG); } #define IS_CURSEG(sbi, seg) \ (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno) || \ ((seg) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno)) #define IS_CURSEC(sbi, secno) \ (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA_PINNED)->segno / \ SEGS_PER_SEC(sbi)) || \ ((secno) == CURSEG_I(sbi, CURSEG_ALL_DATA_ATGC)->segno / \ SEGS_PER_SEC(sbi))) #define MAIN_BLKADDR(sbi) \ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr)) #define SEG0_BLKADDR(sbi) \ (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr)) #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) #define MAIN_SECS(sbi) ((sbi)->total_sections) #define TOTAL_SEGS(sbi) \ (SM_I(sbi) ? SM_I(sbi)->segment_count : \ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count)) #define TOTAL_BLKS(sbi) (SEGS_TO_BLKS(sbi, TOTAL_SEGS(sbi))) #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \ (sbi)->log_blocks_per_seg)) #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \ (SEGS_TO_BLKS(sbi, GET_R2L_SEGNO(FREE_I(sbi), segno)))) #define NEXT_FREE_BLKADDR(sbi, curseg) \ (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff) #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi)) #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \ (BLKS_TO_SEGS(sbi, GET_SEGOFF_FROM_SEG0(sbi, blk_addr))) #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \ (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (BLKS_PER_SEG(sbi) - 1)) #define GET_SEGNO(sbi, blk_addr) \ ((!__is_valid_data_blkaddr(blk_addr)) ? \ NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \ GET_SEGNO_FROM_SEG0(sbi, blk_addr))) #define CAP_BLKS_PER_SEC(sbi) \ (BLKS_PER_SEC(sbi) - (sbi)->unusable_blocks_per_sec) #define CAP_SEGS_PER_SEC(sbi) \ (SEGS_PER_SEC(sbi) - \ BLKS_TO_SEGS(sbi, (sbi)->unusable_blocks_per_sec)) #define GET_SEC_FROM_SEG(sbi, segno) \ (((segno) == -1) ? -1 : (segno) / SEGS_PER_SEC(sbi)) #define GET_SEG_FROM_SEC(sbi, secno) \ ((secno) * SEGS_PER_SEC(sbi)) #define GET_ZONE_FROM_SEC(sbi, secno) \ (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone) #define GET_ZONE_FROM_SEG(sbi, segno) \ GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno)) #define GET_SUM_BLOCK(sbi, segno) \ ((sbi)->sm_info->ssa_blkaddr + (segno)) #define GET_SUM_TYPE(footer) ((footer)->entry_type) #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type)) #define SIT_ENTRY_OFFSET(sit_i, segno) \ ((segno) % (sit_i)->sents_per_block) #define SIT_BLOCK_OFFSET(segno) \ ((segno) / SIT_ENTRY_PER_BLOCK) #define START_SEGNO(segno) \ (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) #define SIT_BLK_CNT(sbi) \ DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK) #define f2fs_bitmap_size(nr) \ (BITS_TO_LONGS(nr) * sizeof(unsigned long)) #define SECTOR_FROM_BLOCK(blk_addr) \ (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK) #define SECTOR_TO_BLOCK(sectors) \ ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK) /* * In the victim_sel_policy->alloc_mode, there are three block allocation modes. * LFS writes data sequentially with cleaning operations. * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. * AT_SSR (Age Threshold based Slack Space Recycle) merges fragments into * fragmented segment which has similar aging degree. */ enum { LFS = 0, SSR, AT_SSR, }; /* * In the victim_sel_policy->gc_mode, there are three gc, aka cleaning, modes. * GC_CB is based on cost-benefit algorithm. * GC_GREEDY is based on greedy algorithm. * GC_AT is based on age-threshold algorithm. */ enum { GC_CB = 0, GC_GREEDY, GC_AT, ALLOC_NEXT, FLUSH_DEVICE, MAX_GC_POLICY, }; /* * BG_GC means the background cleaning job. * FG_GC means the on-demand cleaning job. */ enum { BG_GC = 0, FG_GC, }; /* for a function parameter to select a victim segment */ struct victim_sel_policy { int alloc_mode; /* LFS or SSR */ int gc_mode; /* GC_CB or GC_GREEDY */ unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ unsigned int max_search; /* * maximum # of segments/sections * to search */ unsigned int offset; /* last scanned bitmap offset */ unsigned int ofs_unit; /* bitmap search unit */ unsigned int min_cost; /* minimum cost */ unsigned long long oldest_age; /* oldest age of segments having the same min cost */ unsigned int min_segno; /* segment # having min. cost */ unsigned long long age; /* mtime of GCed section*/ unsigned long long age_threshold;/* age threshold */ bool one_time_gc; /* one time GC */ }; struct seg_entry { unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */ unsigned int valid_blocks:10; /* # of valid blocks */ unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ unsigned int padding:6; /* padding */ unsigned char *cur_valid_map; /* validity bitmap of blocks */ #ifdef CONFIG_F2FS_CHECK_FS unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ #endif /* * # of valid blocks and the validity bitmap stored in the last * checkpoint pack. This information is used by the SSR mode. */ unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ unsigned char *discard_map; unsigned long long mtime; /* modification time of the segment */ }; struct sec_entry { unsigned int valid_blocks; /* # of valid blocks in a section */ }; #define MAX_SKIP_GC_COUNT 16 struct revoke_entry { struct list_head list; block_t old_addr; /* for revoking when fail to commit */ pgoff_t index; }; struct sit_info { block_t sit_base_addr; /* start block address of SIT area */ block_t sit_blocks; /* # of blocks used by SIT area */ block_t written_valid_blocks; /* # of valid blocks in main area */ char *bitmap; /* all bitmaps pointer */ char *sit_bitmap; /* SIT bitmap pointer */ #ifdef CONFIG_F2FS_CHECK_FS char *sit_bitmap_mir; /* SIT bitmap mirror */ /* bitmap of segments to be ignored by GC in case of errors */ unsigned long *invalid_segmap; #endif unsigned int bitmap_size; /* SIT bitmap size */ unsigned long *tmp_map; /* bitmap for temporal use */ unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ unsigned int dirty_sentries; /* # of dirty sentries */ unsigned int sents_per_block; /* # of SIT entries per block */ struct rw_semaphore sentry_lock; /* to protect SIT cache */ struct seg_entry *sentries; /* SIT segment-level cache */ struct sec_entry *sec_entries; /* SIT section-level cache */ /* for cost-benefit algorithm in cleaning procedure */ unsigned long long elapsed_time; /* elapsed time after mount */ unsigned long long mounted_time; /* mount time */ unsigned long long min_mtime; /* min. modification time */ unsigned long long max_mtime; /* max. modification time */ unsigned long long dirty_min_mtime; /* rerange candidates in GC_AT */ unsigned long long dirty_max_mtime; /* rerange candidates in GC_AT */ unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */ }; struct free_segmap_info { unsigned int start_segno; /* start segment number logically */ unsigned int free_segments; /* # of free segments */ unsigned int free_sections; /* # of free sections */ spinlock_t segmap_lock; /* free segmap lock */ unsigned long *free_segmap; /* free segment bitmap */ unsigned long *free_secmap; /* free section bitmap */ }; /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ enum dirty_type { DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */ DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */ DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */ DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */ DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */ DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */ DIRTY, /* to count # of dirty segments */ PRE, /* to count # of entirely obsolete segments */ NR_DIRTY_TYPE }; struct dirty_seglist_info { unsigned long *dirty_segmap[NR_DIRTY_TYPE]; unsigned long *dirty_secmap; struct mutex seglist_lock; /* lock for segment bitmaps */ int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ unsigned long *victim_secmap; /* background GC victims */ unsigned long *pinned_secmap; /* pinned victims from foreground GC */ unsigned int pinned_secmap_cnt; /* count of victims which has pinned data */ bool enable_pin_section; /* enable pinning section */ }; /* for active log information */ struct curseg_info { struct mutex curseg_mutex; /* lock for consistency */ struct f2fs_summary_block *sum_blk; /* cached summary block */ struct rw_semaphore journal_rwsem; /* protect journal area */ struct f2fs_journal *journal; /* cached journal info */ unsigned char alloc_type; /* current allocation type */ unsigned short seg_type; /* segment type like CURSEG_XXX_TYPE */ unsigned int segno; /* current segment number */ unsigned short next_blkoff; /* next block offset to write */ unsigned int zone; /* current zone number */ unsigned int next_segno; /* preallocated segment */ int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */ bool inited; /* indicate inmem log is inited */ }; struct sit_entry_set { struct list_head set_list; /* link with all sit sets */ unsigned int start_segno; /* start segno of sits in set */ unsigned int entry_cnt; /* the # of sit entries in set */ }; /* * inline functions */ static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) { return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); } static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, unsigned int segno) { struct sit_info *sit_i = SIT_I(sbi); return &sit_i->sentries[segno]; } static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, unsigned int segno) { struct sit_info *sit_i = SIT_I(sbi); return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)]; } static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, unsigned int segno, bool use_section) { /* * In order to get # of valid blocks in a section instantly from many * segments, f2fs manages two counting structures separately. */ if (use_section && __is_large_section(sbi)) return get_sec_entry(sbi, segno)->valid_blocks; else return get_seg_entry(sbi, segno)->valid_blocks; } static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi, unsigned int segno, bool use_section) { if (use_section && __is_large_section(sbi)) { unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int blocks = 0; int i; for (i = 0; i < SEGS_PER_SEC(sbi); i++, start_segno++) { struct seg_entry *se = get_seg_entry(sbi, start_segno); blocks += se->ckpt_valid_blocks; } return blocks; } return get_seg_entry(sbi, segno)->ckpt_valid_blocks; } static inline void seg_info_from_raw_sit(struct seg_entry *se, struct f2fs_sit_entry *rs) { se->valid_blocks = GET_SIT_VBLOCKS(rs); se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); #ifdef CONFIG_F2FS_CHECK_FS memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE); #endif se->type = GET_SIT_TYPE(rs); se->mtime = le64_to_cpu(rs->mtime); } static inline void __seg_info_to_raw_sit(struct seg_entry *se, struct f2fs_sit_entry *rs) { unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | se->valid_blocks; rs->vblocks = cpu_to_le16(raw_vblocks); memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); rs->mtime = cpu_to_le64(se->mtime); } static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi, struct page *page, unsigned int start) { struct f2fs_sit_block *raw_sit; struct seg_entry *se; struct f2fs_sit_entry *rs; unsigned int end = min(start + SIT_ENTRY_PER_BLOCK, (unsigned long)MAIN_SEGS(sbi)); int i; raw_sit = (struct f2fs_sit_block *)page_address(page); memset(raw_sit, 0, PAGE_SIZE); for (i = 0; i < end - start; i++) { rs = &raw_sit->entries[i]; se = get_seg_entry(sbi, start + i); __seg_info_to_raw_sit(se, rs); } } static inline void seg_info_to_raw_sit(struct seg_entry *se, struct f2fs_sit_entry *rs) { __seg_info_to_raw_sit(se, rs); memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); se->ckpt_valid_blocks = se->valid_blocks; } static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, unsigned int max, unsigned int segno) { unsigned int ret; spin_lock(&free_i->segmap_lock); ret = find_next_bit(free_i->free_segmap, max, segno); spin_unlock(&free_i->segmap_lock); return ret; } static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int next; unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi); spin_lock(&free_i->segmap_lock); clear_bit(segno, free_i->free_segmap); free_i->free_segments++; next = find_next_bit(free_i->free_segmap, start_segno + SEGS_PER_SEC(sbi), start_segno); if (next >= start_segno + usable_segs) { clear_bit(secno, free_i->free_secmap); free_i->free_sections++; } spin_unlock(&free_i->segmap_lock); } static inline void __set_inuse(struct f2fs_sb_info *sbi, unsigned int segno) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); set_bit(segno, free_i->free_segmap); free_i->free_segments--; if (!test_and_set_bit(secno, free_i->free_secmap)) free_i->free_sections--; } static inline void __set_test_and_free(struct f2fs_sb_info *sbi, unsigned int segno, bool inmem) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno); unsigned int next; unsigned int usable_segs = f2fs_usable_segs_in_sec(sbi); spin_lock(&free_i->segmap_lock); if (test_and_clear_bit(segno, free_i->free_segmap)) { free_i->free_segments++; if (!inmem && IS_CURSEC(sbi, secno)) goto skip_free; next = find_next_bit(free_i->free_segmap, start_segno + SEGS_PER_SEC(sbi), start_segno); if (next >= start_segno + usable_segs) { if (test_and_clear_bit(secno, free_i->free_secmap)) free_i->free_sections++; } } skip_free: spin_unlock(&free_i->segmap_lock); } static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, unsigned int segno) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); spin_lock(&free_i->segmap_lock); if (!test_and_set_bit(segno, free_i->free_segmap)) { free_i->free_segments--; if (!test_and_set_bit(secno, free_i->free_secmap)) free_i->free_sections--; } spin_unlock(&free_i->segmap_lock); } static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, void *dst_addr) { struct sit_info *sit_i = SIT_I(sbi); #ifdef CONFIG_F2FS_CHECK_FS if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir, sit_i->bitmap_size)) f2fs_bug_on(sbi, 1); #endif memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); } static inline block_t written_block_count(struct f2fs_sb_info *sbi) { return SIT_I(sbi)->written_valid_blocks; } static inline unsigned int free_segments(struct f2fs_sb_info *sbi) { return FREE_I(sbi)->free_segments; } static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) { return SM_I(sbi)->reserved_segments; } static inline unsigned int free_sections(struct f2fs_sb_info *sbi) { return FREE_I(sbi)->free_sections; } static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) { return DIRTY_I(sbi)->nr_dirty[PRE]; } static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) { return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; } static inline int overprovision_segments(struct f2fs_sb_info *sbi) { return SM_I(sbi)->ovp_segments; } static inline int reserved_sections(struct f2fs_sb_info *sbi) { return GET_SEC_FROM_SEG(sbi, reserved_segments(sbi)); } static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, unsigned int node_blocks, unsigned int data_blocks, unsigned int dent_blocks) { unsigned int segno, left_blocks, blocks; int i; /* check current data/node sections in the worst case. */ for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) { segno = CURSEG_I(sbi, i)->segno; left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true); blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks; if (blocks > left_blocks) return false; } /* check current data section for dentry blocks. */ segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno; left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true); if (dent_blocks > left_blocks) return false; return true; } /* * calculate needed sections for dirty node/dentry and call * has_curseg_enough_space, please note that, it needs to account * dirty data as well in lfs mode when checkpoint is disabled. */ static inline void __get_secs_required(struct f2fs_sb_info *sbi, unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p) { unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) + get_pages(sbi, F2FS_DIRTY_DENTS) + get_pages(sbi, F2FS_DIRTY_IMETA); unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); unsigned int total_data_blocks = 0; unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi); unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi); unsigned int data_secs = 0; unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi); unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi); unsigned int data_blocks = 0; if (f2fs_lfs_mode(sbi) && unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA); data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi); data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi); } if (lower_p) *lower_p = node_secs + dent_secs + data_secs; if (upper_p) *upper_p = node_secs + dent_secs + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) + (data_blocks ? 1 : 0); if (curseg_p) *curseg_p = has_curseg_enough_space(sbi, node_blocks, data_blocks, dent_blocks); } static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed, int needed) { unsigned int free_secs, lower_secs, upper_secs; bool curseg_space; if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) return false; __get_secs_required(sbi, &lower_secs, &upper_secs, &curseg_space); free_secs = free_sections(sbi) + freed; lower_secs += needed + reserved_sections(sbi); upper_secs += needed + reserved_sections(sbi); if (free_secs > upper_secs) return false; if (free_secs <= lower_secs) return true; return !curseg_space; } static inline bool has_enough_free_secs(struct f2fs_sb_info *sbi, int freed, int needed) { return !has_not_enough_free_secs(sbi, freed, needed); } static inline bool has_enough_free_blks(struct f2fs_sb_info *sbi) { unsigned int total_free_blocks = 0; unsigned int avail_user_block_count; spin_lock(&sbi->stat_lock); avail_user_block_count = get_available_block_count(sbi, NULL, true); total_free_blocks = avail_user_block_count - (unsigned int)valid_user_blocks(sbi); spin_unlock(&sbi->stat_lock); return total_free_blocks > 0; } static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) { if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return true; if (likely(has_enough_free_secs(sbi, 0, 0))) return true; if (!f2fs_lfs_mode(sbi) && likely(has_enough_free_blks(sbi))) return true; return false; } static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi) { return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments; } static inline int utilization(struct f2fs_sb_info *sbi) { return div_u64((u64)valid_user_blocks(sbi) * 100, sbi->user_block_count); } /* * Sometimes f2fs may be better to drop out-of-place update policy. * And, users can control the policy through sysfs entries. * There are five policies with triggering conditions as follows. * F2FS_IPU_FORCE - all the time, * F2FS_IPU_SSR - if SSR mode is activated, * F2FS_IPU_UTIL - if FS utilization is over threashold, * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over * threashold, * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash * storages. IPU will be triggered only if the # of dirty * pages over min_fsync_blocks. (=default option) * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests. * F2FS_IPU_NOCACHE - disable IPU bio cache. * F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has * FI_OPU_WRITE flag. * F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode) */ #define DEF_MIN_IPU_UTIL 70 #define DEF_MIN_FSYNC_BLOCKS 8 #define DEF_MIN_HOT_BLOCKS 16 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */ #define F2FS_IPU_DISABLE 0 /* Modification on enum should be synchronized with ipu_mode_names array */ enum { F2FS_IPU_FORCE, F2FS_IPU_SSR, F2FS_IPU_UTIL, F2FS_IPU_SSR_UTIL, F2FS_IPU_FSYNC, F2FS_IPU_ASYNC, F2FS_IPU_NOCACHE, F2FS_IPU_HONOR_OPU_WRITE, F2FS_IPU_MAX, }; static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi) { return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE; } #define F2FS_IPU_POLICY(name) \ static inline bool IS_##name(struct f2fs_sb_info *sbi) \ { \ return SM_I(sbi)->ipu_policy & BIT(name); \ } F2FS_IPU_POLICY(F2FS_IPU_FORCE); F2FS_IPU_POLICY(F2FS_IPU_SSR); F2FS_IPU_POLICY(F2FS_IPU_UTIL); F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL); F2FS_IPU_POLICY(F2FS_IPU_FSYNC); F2FS_IPU_POLICY(F2FS_IPU_ASYNC); F2FS_IPU_POLICY(F2FS_IPU_NOCACHE); F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE); static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); return curseg->segno; } static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); return curseg->alloc_type; } static inline bool valid_main_segno(struct f2fs_sb_info *sbi, unsigned int segno) { return segno <= (MAIN_SEGS(sbi) - 1); } static inline void verify_fio_blkaddr(struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; if (__is_valid_data_blkaddr(fio->old_blkaddr)) verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC); verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC_ENHANCE); } /* * Summary block is always treated as an invalid block */ static inline int check_block_count(struct f2fs_sb_info *sbi, int segno, struct f2fs_sit_entry *raw_sit) { bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false; int valid_blocks = 0; int cur_pos = 0, next_pos; unsigned int usable_blks_per_seg = f2fs_usable_blks_in_seg(sbi, segno); /* check bitmap with valid block count */ do { if (is_valid) { next_pos = find_next_zero_bit_le(&raw_sit->valid_map, usable_blks_per_seg, cur_pos); valid_blocks += next_pos - cur_pos; } else next_pos = find_next_bit_le(&raw_sit->valid_map, usable_blks_per_seg, cur_pos); cur_pos = next_pos; is_valid = !is_valid; } while (cur_pos < usable_blks_per_seg); if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { f2fs_err(sbi, "Mismatch valid blocks %d vs. %d", GET_SIT_VBLOCKS(raw_sit), valid_blocks); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT); return -EFSCORRUPTED; } if (usable_blks_per_seg < BLKS_PER_SEG(sbi)) f2fs_bug_on(sbi, find_next_bit_le(&raw_sit->valid_map, BLKS_PER_SEG(sbi), usable_blks_per_seg) != BLKS_PER_SEG(sbi)); /* check segment usage, and check boundary of a given segment number */ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > usable_blks_per_seg || !valid_main_segno(sbi, segno))) { f2fs_err(sbi, "Wrong valid blocks %d or segno %u", GET_SIT_VBLOCKS(raw_sit), segno); set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_handle_error(sbi, ERROR_INCONSISTENT_SIT); return -EFSCORRUPTED; } return 0; } static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, unsigned int start) { struct sit_info *sit_i = SIT_I(sbi); unsigned int offset = SIT_BLOCK_OFFSET(start); block_t blk_addr = sit_i->sit_base_addr + offset; f2fs_bug_on(sbi, !valid_main_segno(sbi, start)); #ifdef CONFIG_F2FS_CHECK_FS if (f2fs_test_bit(offset, sit_i->sit_bitmap) != f2fs_test_bit(offset, sit_i->sit_bitmap_mir)) f2fs_bug_on(sbi, 1); #endif /* calculate sit block address */ if (f2fs_test_bit(offset, sit_i->sit_bitmap)) blk_addr += sit_i->sit_blocks; return blk_addr; } static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, pgoff_t block_addr) { struct sit_info *sit_i = SIT_I(sbi); block_addr -= sit_i->sit_base_addr; if (block_addr < sit_i->sit_blocks) block_addr += sit_i->sit_blocks; else block_addr -= sit_i->sit_blocks; return block_addr + sit_i->sit_base_addr; } static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) { unsigned int block_off = SIT_BLOCK_OFFSET(start); f2fs_change_bit(block_off, sit_i->sit_bitmap); #ifdef CONFIG_F2FS_CHECK_FS f2fs_change_bit(block_off, sit_i->sit_bitmap_mir); #endif } static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi, bool base_time) { struct sit_info *sit_i = SIT_I(sbi); time64_t diff, now = ktime_get_boottime_seconds(); if (now >= sit_i->mounted_time) return sit_i->elapsed_time + now - sit_i->mounted_time; /* system time is set to the past */ if (!base_time) { diff = sit_i->mounted_time - now; if (sit_i->elapsed_time >= diff) return sit_i->elapsed_time - diff; return 0; } return sit_i->elapsed_time; } static inline void set_summary(struct f2fs_summary *sum, nid_t nid, unsigned int ofs_in_node, unsigned char version) { sum->nid = cpu_to_le32(nid); sum->ofs_in_node = cpu_to_le16(ofs_in_node); sum->version = version; } static inline block_t start_sum_block(struct f2fs_sb_info *sbi) { return __start_cp_addr(sbi) + le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); } static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) { return __start_cp_addr(sbi) + le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) - (base + 1) + type; } static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno) { if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno)) return true; return false; } /* * It is very important to gather dirty pages and write at once, so that we can * submit a big bio without interfering other data writes. * By default, 512 pages for directory data, * 512 pages (2MB) * 8 for nodes, and * 256 pages * 8 for meta are set. */ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type) { if (sbi->sb->s_bdi->wb.dirty_exceeded) return 0; if (type == DATA) return BLKS_PER_SEG(sbi); else if (type == NODE) return SEGS_TO_BLKS(sbi, 8); else if (type == META) return 8 * BIO_MAX_VECS; else return 0; } /* * When writing pages, it'd better align nr_to_write for segment size. */ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type, struct writeback_control *wbc) { long nr_to_write, desired; if (wbc->sync_mode != WB_SYNC_NONE) return 0; nr_to_write = wbc->nr_to_write; desired = BIO_MAX_VECS; if (type == NODE) desired <<= 1; wbc->nr_to_write = desired; return desired - nr_to_write; } static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; bool wakeup = false; int i; if (force) goto wake_up; mutex_lock(&dcc->cmd_lock); for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { if (i + 1 < dcc->discard_granularity) break; if (!list_empty(&dcc->pend_list[i])) { wakeup = true; break; } } mutex_unlock(&dcc->cmd_lock); if (!wakeup || !is_idle(sbi, DISCARD_TIME)) return; wake_up: dcc->discard_wake = true; wake_up_interruptible_all(&dcc->discard_wait_queue); } |
119 19 19 19 767 651 120 119 119 119 9 3 3 2 2 13 1 12 12 12 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | // SPDX-License-Identifier: GPL-2.0 #include "cgroup-internal.h" #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/nsproxy.h> #include <linux/proc_ns.h> /* cgroup namespaces */ static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES); } static void dec_cgroup_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES); } static struct cgroup_namespace *alloc_cgroup_ns(void) { struct cgroup_namespace *new_ns; int ret; new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL_ACCOUNT); if (!new_ns) return ERR_PTR(-ENOMEM); ret = ns_alloc_inum(&new_ns->ns); if (ret) { kfree(new_ns); return ERR_PTR(ret); } refcount_set(&new_ns->ns.count, 1); new_ns->ns.ops = &cgroupns_operations; return new_ns; } void free_cgroup_ns(struct cgroup_namespace *ns) { put_css_set(ns->root_cset); dec_cgroup_namespaces(ns->ucounts); put_user_ns(ns->user_ns); ns_free_inum(&ns->ns); kfree(ns); } EXPORT_SYMBOL(free_cgroup_ns); struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, struct cgroup_namespace *old_ns) { struct cgroup_namespace *new_ns; struct ucounts *ucounts; struct css_set *cset; BUG_ON(!old_ns); if (!(flags & CLONE_NEWCGROUP)) { get_cgroup_ns(old_ns); return old_ns; } /* Allow only sysadmin to create cgroup namespace. */ if (!ns_capable(user_ns, CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); ucounts = inc_cgroup_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC); /* It is not safe to take cgroup_mutex here */ spin_lock_irq(&css_set_lock); cset = task_css_set(current); get_css_set(cset); spin_unlock_irq(&css_set_lock); new_ns = alloc_cgroup_ns(); if (IS_ERR(new_ns)) { put_css_set(cset); dec_cgroup_namespaces(ucounts); return new_ns; } new_ns->user_ns = get_user_ns(user_ns); new_ns->ucounts = ucounts; new_ns->root_cset = cset; return new_ns; } static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns) { return container_of(ns, struct cgroup_namespace, ns); } static int cgroupns_install(struct nsset *nsset, struct ns_common *ns) { struct nsproxy *nsproxy = nsset->nsproxy; struct cgroup_namespace *cgroup_ns = to_cg_ns(ns); if (!ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN) || !ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN)) return -EPERM; /* Don't need to do anything if we are attaching to our own cgroupns. */ if (cgroup_ns == nsproxy->cgroup_ns) return 0; get_cgroup_ns(cgroup_ns); put_cgroup_ns(nsproxy->cgroup_ns); nsproxy->cgroup_ns = cgroup_ns; return 0; } static struct ns_common *cgroupns_get(struct task_struct *task) { struct cgroup_namespace *ns = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) { ns = nsproxy->cgroup_ns; get_cgroup_ns(ns); } task_unlock(task); return ns ? &ns->ns : NULL; } static void cgroupns_put(struct ns_common *ns) { put_cgroup_ns(to_cg_ns(ns)); } static struct user_namespace *cgroupns_owner(struct ns_common *ns) { return to_cg_ns(ns)->user_ns; } const struct proc_ns_operations cgroupns_operations = { .name = "cgroup", .type = CLONE_NEWCGROUP, .get = cgroupns_get, .put = cgroupns_put, .install = cgroupns_install, .owner = cgroupns_owner, }; |
50236 14080 28684 50055 11243 48 7 320 7 114 175 119 3475 267 2231 328 1010 203 3 1571 5301 438 80 27 325 232 305 1813 690 3 3 29 1530 4315 2 5749 12054 3 203 845 8 96 1692 12 17 905 248 54 9 905 54 54 114 25 48 47 7 3693 38114 188 6206 1825 1827 78 312 5610 5623 5550 659 659 660 38 38 38 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Macros for manipulating and testing page->flags */ #ifndef PAGE_FLAGS_H #define PAGE_FLAGS_H #include <linux/types.h> #include <linux/bug.h> #include <linux/mmdebug.h> #ifndef __GENERATING_BOUNDS_H #include <linux/mm_types.h> #include <generated/bounds.h> #endif /* !__GENERATING_BOUNDS_H */ /* * Various page->flags bits: * * PG_reserved is set for special pages. The "struct page" of such a page * should in general not be touched (e.g. set dirty) except by its owner. * Pages marked as PG_reserved include: * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, * initrd, HW tables) * - Pages reserved or allocated early during boot (before the page allocator * was initialized). This includes (depending on the architecture) the * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much * much more. Once (if ever) freed, PG_reserved is cleared and they will * be given to the page allocator. * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying * to read/write these pages might end badly. Don't touch! * - The zero page(s) * - Pages allocated in the context of kexec/kdump (loaded kernel image, * control pages, vmcoreinfo) * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are * not marked PG_reserved (as they might be in use by somebody else who does * not respect the caching strategy). * - MCA pages on ia64 * - Pages holding CPU notes for POWER Firmware Assisted Dump * - Device memory (e.g. PMEM, DAX, HMM) * Some PG_reserved pages will be excluded from the hibernation image. * PG_reserved does in general not hinder anybody from dumping or swapping * and is no longer required for remap_pfn_range(). ioremap might require it. * Consequently, PG_reserved for a page mapped into user space can indicate * the zero page, the vDSO, MMIO pages or device memory. * * The PG_private bitflag is set on pagecache pages if they contain filesystem * specific data (which is normally at page->private). It can be used by * private allocations for its own usage. * * During initiation of disk I/O, PG_locked is set. This bit is set before I/O * and cleared when writeback _starts_ or when read _completes_. PG_writeback * is set before writeback starts and cleared when it finishes. * * PG_locked also pins a page in pagecache, and blocks truncation of the file * while it is held. * * page_waitqueue(page) is a wait queue of all tasks waiting for the page * to become unlocked. * * PG_swapbacked is set when a page uses swap as a backing storage. This are * usually PageAnon or shmem pages but please note that even anonymous pages * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as * a result of MADV_FREE). * * PG_referenced, PG_reclaim are used for page reclaim for anonymous and * file-backed pagecache (see mm/vmscan.c). * * PG_arch_1 is an architecture specific page state bit. The generic code * guarantees that this bit is cleared for a page when it first is entered into * the page cache. * * PG_hwpoison indicates that a page got corrupted in hardware and contains * data with incorrect ECC bits that triggered a machine check. Accessing is * not safe since it may cause another machine check. Don't touch! */ /* * Don't use the pageflags directly. Use the PageFoo macros. * * The page flags field is split into two parts, the main flags area * which extends from the low bits upwards, and the fields area which * extends from the high bits downwards. * * | FIELD | ... | FLAGS | * N-1 ^ 0 * (NR_PAGEFLAGS) * * The fields area is reserved for fields mapping zone, node (for NUMA) and * SPARSEMEM section (for variants of SPARSEMEM that require section ids like * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ PG_writeback, /* Page is under writeback */ PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_head, /* Must be in bit 6 */ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_active, PG_workingset, PG_owner_priv_1, /* Owner use. If pagecache, fs may use */ PG_owner_2, /* Owner use. If pagecache, fs may use */ PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ PG_private_2, /* If pagecache, has fs aux data */ PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ #ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) PG_young, PG_idle, #endif #ifdef CONFIG_ARCH_USES_PG_ARCH_2 PG_arch_2, #endif #ifdef CONFIG_ARCH_USES_PG_ARCH_3 PG_arch_3, #endif __NR_PAGEFLAGS, PG_readahead = PG_reclaim, /* Anonymous memory (and shmem) */ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ /* Some filesystems */ PG_checked = PG_owner_priv_1, /* * Depending on the way an anonymous folio can be mapped into a page * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped * THP), PG_anon_exclusive may be set only for the head page or for * tail pages of an anonymous folio. For now, we only expect it to be * set on tail pages for PTE-mapped THP. */ PG_anon_exclusive = PG_owner_2, /* * Set if all buffer heads in the folio are mapped. * Filesystems which do not use BHs can use it for their own purpose. */ PG_mappedtodisk = PG_owner_2, /* Two page bits are conscripted by FS-Cache to maintain local caching * state. These bits are set on pages belonging to the netfs's inodes * when those inodes are being locally cached. */ PG_fscache = PG_private_2, /* page backed by cache */ /* XEN */ /* Pinned in Xen as a read-only pagetable page. */ PG_pinned = PG_owner_priv_1, /* Pinned as part of domain save (see xen_mm_pin_all()). */ PG_savepinned = PG_dirty, /* Has a grant mapping of another (foreign) domain's page. */ PG_foreign = PG_owner_priv_1, /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, /* non-lru isolated movable page */ PG_isolated = PG_reclaim, /* Only valid for buddy pages. Used to track pages that are reported */ PG_reported = PG_uptodate, #ifdef CONFIG_MEMORY_HOTPLUG /* For self-hosted memmap pages */ PG_vmemmap_self_hosted = PG_owner_priv_1, #endif /* * Flags only valid for compound pages. Stored in first tail page's * flags word. Cannot use the first 8 flags or any flag marked as * PF_ANY. */ /* At least one page in this folio has the hwpoison flag set */ PG_has_hwpoisoned = PG_active, PG_large_rmappable = PG_workingset, /* anon or file-backed */ PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */ }; #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) #ifndef __GENERATING_BOUNDS_H #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); /* * Return the real head page struct iff the @page is a fake head page, otherwise * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. */ static __always_inline const struct page *page_fixed_fake_head(const struct page *page) { if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) return page; /* * Only addresses aligned with PAGE_SIZE of struct page may be fake head * struct page. The alignment check aims to avoid access the fields ( * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) * cold cacheline in some cases. */ if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && test_bit(PG_head, &page->flags)) { /* * We can safely access the field of the @page[1] with PG_head * because the @page is a compound page composed with at least * two contiguous pages. */ unsigned long head = READ_ONCE(page[1].compound_head); if (likely(head & 1)) return (const struct page *)(head - 1); } return page; } #else static inline const struct page *page_fixed_fake_head(const struct page *page) { return page; } #endif static __always_inline int page_is_fake_head(const struct page *page) { return page_fixed_fake_head(page) != page; } static __always_inline unsigned long _compound_head(const struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) return head - 1; return (unsigned long)page_fixed_fake_head(page); } #define compound_head(page) ((typeof(page))_compound_head(page)) /** * page_folio - Converts from page to folio. * @p: The page. * * Every page is part of a folio. This function cannot be called on a * NULL pointer. * * Context: No reference, nor lock is required on @page. If the caller * does not hold a reference, this call may race with a folio split, so * it should re-check the folio still contains this page after gaining * a reference on the folio. * Return: The folio which contains this page. */ #define page_folio(p) (_Generic((p), \ const struct page *: (const struct folio *)_compound_head(p), \ struct page *: (struct folio *)_compound_head(p))) /** * folio_page - Return a page from a folio. * @folio: The folio. * @n: The page number to return. * * @n is relative to the start of the folio. This function does not * check that the page number lies within @folio; the caller is presumed * to have a reference to the page. */ #define folio_page(folio, n) nth_page(&(folio)->page, n) static __always_inline int PageTail(const struct page *page) { return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); } static __always_inline int PageCompound(const struct page *page) { return test_bit(PG_head, &page->flags) || READ_ONCE(page->compound_head) & 1; } #define PAGE_POISON_PATTERN -1l static inline int PagePoisoned(const struct page *page) { return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; } #ifdef CONFIG_DEBUG_VM void page_init_poison(struct page *page, size_t size); #else static inline void page_init_poison(struct page *page, size_t size) { } #endif static const unsigned long *const_folio_flags(const struct folio *folio, unsigned n) { const struct page *page = &folio->page; VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } static unsigned long *folio_flags(struct folio *folio, unsigned n) { struct page *page = &folio->page; VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } /* * Page flags policies wrt compound pages * * PF_POISONED_CHECK * check if this struct page poisoned/uninitialized * * PF_ANY: * the page flag is relevant for small, head and tail pages. * * PF_HEAD: * for compound page all operations related to the page flag applied to * head page. * * PF_NO_TAIL: * modifications of the page flag must be done on small or head pages, * checks can be done on tail pages too. * * PF_NO_COMPOUND: * the page flag is not relevant for compound pages. * * PF_SECOND: * the page flag is stored in the first tail page. */ #define PF_POISONED_CHECK(page) ({ \ VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ page; }) #define PF_ANY(page, enforce) PF_POISONED_CHECK(page) #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ PF_POISONED_CHECK(compound_head(page)); }) #define PF_NO_COMPOUND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ PF_POISONED_CHECK(page); }) #define PF_SECOND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(!PageHead(page), page); \ PF_POISONED_CHECK(&page[1]); }) /* Which page is the flag stored in */ #define FOLIO_PF_ANY 0 #define FOLIO_PF_HEAD 0 #define FOLIO_PF_NO_TAIL 0 #define FOLIO_PF_NO_COMPOUND 0 #define FOLIO_PF_SECOND 1 #define FOLIO_HEAD_PAGE 0 #define FOLIO_SECOND_PAGE 1 /* * Macros to create function definitions for page flags */ #define FOLIO_TEST_FLAG(name, page) \ static __always_inline bool folio_test_##name(const struct folio *folio) \ { return test_bit(PG_##name, const_folio_flags(folio, page)); } #define FOLIO_SET_FLAG(name, page) \ static __always_inline void folio_set_##name(struct folio *folio) \ { set_bit(PG_##name, folio_flags(folio, page)); } #define FOLIO_CLEAR_FLAG(name, page) \ static __always_inline void folio_clear_##name(struct folio *folio) \ { clear_bit(PG_##name, folio_flags(folio, page)); } #define __FOLIO_SET_FLAG(name, page) \ static __always_inline void __folio_set_##name(struct folio *folio) \ { __set_bit(PG_##name, folio_flags(folio, page)); } #define __FOLIO_CLEAR_FLAG(name, page) \ static __always_inline void __folio_clear_##name(struct folio *folio) \ { __clear_bit(PG_##name, folio_flags(folio, page)); } #define FOLIO_TEST_SET_FLAG(name, page) \ static __always_inline bool folio_test_set_##name(struct folio *folio) \ { return test_and_set_bit(PG_##name, folio_flags(folio, page)); } #define FOLIO_TEST_CLEAR_FLAG(name, page) \ static __always_inline bool folio_test_clear_##name(struct folio *folio) \ { return test_and_clear_bit(PG_##name, folio_flags(folio, page)); } #define FOLIO_FLAG(name, page) \ FOLIO_TEST_FLAG(name, page) \ FOLIO_SET_FLAG(name, page) \ FOLIO_CLEAR_FLAG(name, page) #define TESTPAGEFLAG(uname, lname, policy) \ FOLIO_TEST_FLAG(lname, FOLIO_##policy) \ static __always_inline int Page##uname(const struct page *page) \ { return test_bit(PG_##lname, &policy(page, 0)->flags); } #define SETPAGEFLAG(uname, lname, policy) \ FOLIO_SET_FLAG(lname, FOLIO_##policy) \ static __always_inline void SetPage##uname(struct page *page) \ { set_bit(PG_##lname, &policy(page, 1)->flags); } #define CLEARPAGEFLAG(uname, lname, policy) \ FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ static __always_inline void ClearPage##uname(struct page *page) \ { clear_bit(PG_##lname, &policy(page, 1)->flags); } #define __SETPAGEFLAG(uname, lname, policy) \ __FOLIO_SET_FLAG(lname, FOLIO_##policy) \ static __always_inline void __SetPage##uname(struct page *page) \ { __set_bit(PG_##lname, &policy(page, 1)->flags); } #define __CLEARPAGEFLAG(uname, lname, policy) \ __FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \ static __always_inline void __ClearPage##uname(struct page *page) \ { __clear_bit(PG_##lname, &policy(page, 1)->flags); } #define TESTSETFLAG(uname, lname, policy) \ FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \ static __always_inline int TestSetPage##uname(struct page *page) \ { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); } #define TESTCLEARFLAG(uname, lname, policy) \ FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \ static __always_inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); } #define PAGEFLAG(uname, lname, policy) \ TESTPAGEFLAG(uname, lname, policy) \ SETPAGEFLAG(uname, lname, policy) \ CLEARPAGEFLAG(uname, lname, policy) #define __PAGEFLAG(uname, lname, policy) \ TESTPAGEFLAG(uname, lname, policy) \ __SETPAGEFLAG(uname, lname, policy) \ __CLEARPAGEFLAG(uname, lname, policy) #define TESTSCFLAG(uname, lname, policy) \ TESTSETFLAG(uname, lname, policy) \ TESTCLEARFLAG(uname, lname, policy) #define FOLIO_TEST_FLAG_FALSE(name) \ static inline bool folio_test_##name(const struct folio *folio) \ { return false; } #define FOLIO_SET_FLAG_NOOP(name) \ static inline void folio_set_##name(struct folio *folio) { } #define FOLIO_CLEAR_FLAG_NOOP(name) \ static inline void folio_clear_##name(struct folio *folio) { } #define __FOLIO_SET_FLAG_NOOP(name) \ static inline void __folio_set_##name(struct folio *folio) { } #define __FOLIO_CLEAR_FLAG_NOOP(name) \ static inline void __folio_clear_##name(struct folio *folio) { } #define FOLIO_TEST_SET_FLAG_FALSE(name) \ static inline bool folio_test_set_##name(struct folio *folio) \ { return false; } #define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \ static inline bool folio_test_clear_##name(struct folio *folio) \ { return false; } #define FOLIO_FLAG_FALSE(name) \ FOLIO_TEST_FLAG_FALSE(name) \ FOLIO_SET_FLAG_NOOP(name) \ FOLIO_CLEAR_FLAG_NOOP(name) #define TESTPAGEFLAG_FALSE(uname, lname) \ FOLIO_TEST_FLAG_FALSE(lname) \ static inline int Page##uname(const struct page *page) { return 0; } #define SETPAGEFLAG_NOOP(uname, lname) \ FOLIO_SET_FLAG_NOOP(lname) \ static inline void SetPage##uname(struct page *page) { } #define CLEARPAGEFLAG_NOOP(uname, lname) \ FOLIO_CLEAR_FLAG_NOOP(lname) \ static inline void ClearPage##uname(struct page *page) { } #define __CLEARPAGEFLAG_NOOP(uname, lname) \ __FOLIO_CLEAR_FLAG_NOOP(lname) \ static inline void __ClearPage##uname(struct page *page) { } #define TESTSETFLAG_FALSE(uname, lname) \ FOLIO_TEST_SET_FLAG_FALSE(lname) \ static inline int TestSetPage##uname(struct page *page) { return 0; } #define TESTCLEARFLAG_FALSE(uname, lname) \ FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \ static inline int TestClearPage##uname(struct page *page) { return 0; } #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \ SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname) #define TESTSCFLAG_FALSE(uname, lname) \ TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) __PAGEFLAG(Locked, locked, PF_NO_TAIL) FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE) __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE) PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) TESTCLEARFLAG(LRU, lru, PF_HEAD) FOLIO_FLAG(active, FOLIO_HEAD_PAGE) __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ /* Xen */ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND) TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND) PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND); PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND); PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE) __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE) __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE) /* * Private page markings that may be used by the filesystem that owns the page * for its own purposes. * - PG_private and PG_private_2 cause release_folio() and co to be invoked */ PAGEFLAG(Private, private, PF_ANY) FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE) /* owner_2 can be set on tail pages for anon memory */ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE) #ifdef CONFIG_HIGHMEM /* * Must use a macro here due to header dependency issues. page_zone() is not * available at this point. */ #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p)) #define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f)) #else PAGEFLAG_FALSE(HighMem, highmem) #endif #ifdef CONFIG_SWAP static __always_inline bool folio_test_swapcache(const struct folio *folio) { return folio_test_swapbacked(folio) && test_bit(PG_swapcache, const_folio_flags(folio, 0)); } FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE) FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE) #else FOLIO_FLAG_FALSE(swapcache) #endif FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE) __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) #ifdef CONFIG_MMU FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE) __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE) #else FOLIO_FLAG_FALSE(mlocked) __FOLIO_CLEAR_FLAG_NOOP(mlocked) FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked) FOLIO_TEST_SET_FLAG_FALSE(mlocked) #endif #ifdef CONFIG_MEMORY_FAILURE PAGEFLAG(HWPoison, hwpoison, PF_ANY) TESTSCFLAG(HWPoison, hwpoison, PF_ANY) #define __PG_HWPOISON (1UL << PG_hwpoison) #else PAGEFLAG_FALSE(HWPoison, hwpoison) #define __PG_HWPOISON 0 #endif #ifdef CONFIG_PAGE_IDLE_FLAG #ifdef CONFIG_64BIT FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) #endif /* See page_idle.h for !64BIT workaround */ #else /* !CONFIG_PAGE_IDLE_FLAG */ FOLIO_FLAG_FALSE(young) FOLIO_TEST_CLEAR_FLAG_FALSE(young) FOLIO_FLAG_FALSE(idle) #endif /* * PageReported() is used to track reported free pages within the Buddy * allocator. We can use the non-atomic version of the test and set * operations as both should be shielded with the zone lock to prevent * any possible races on the setting or clearing of the bit. */ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) #ifdef CONFIG_MEMORY_HOTPLUG PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) #else PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #endif /* * On an anonymous folio mapped into a user virtual memory area, * folio->mapping points to its anon_vma, not to a struct address_space; * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON * bit; and then folio->mapping points, not to an anon_vma, but to a private * structure which KSM associates with that merged page. See ksm.h. * * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable * page and then folio->mapping points to a struct movable_operations. * * Please note that, confusingly, "folio_mapping" refers to the inode * address_space which maps the folio from disk; whereas "folio_mapped" * refers to user virtual address space into which the folio is mapped. * * For slab pages, since slab reuses the bits in struct page to store its * internal states, the folio->mapping does not exist as such, nor do * these flags below. So in order to avoid testing non-existent bits, * please make sure that folio_test_slab(folio) actually evaluates to * false before calling the following functions (e.g., folio_test_anon). * See mm/slab.h. */ #define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_MOVABLE 0x2 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) /* * Different with flags above, this flag is used only for fsdax mode. It * indicates that this page->mapping is now under reflink case. */ #define PAGE_MAPPING_DAX_SHARED ((void *)0x1) static __always_inline bool folio_mapping_flags(const struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; } static __always_inline bool PageMappingFlags(const struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; } static __always_inline bool folio_test_anon(const struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; } static __always_inline bool PageAnonNotKsm(const struct page *page) { unsigned long flags = (unsigned long)page_folio(page)->mapping; return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON; } static __always_inline bool PageAnon(const struct page *page) { return folio_test_anon(page_folio(page)); } static __always_inline bool __folio_test_movable(const struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_MOVABLE; } static __always_inline bool __PageMovable(const struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_MOVABLE; } #ifdef CONFIG_KSM /* * A KSM page is one of those write-protected "shared pages" or "merged pages" * which KSM maps into multiple mms, wherever identical anonymous page content * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any * anon_vma, but to that page's node of the stable tree. */ static __always_inline bool folio_test_ksm(const struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_KSM; } #else FOLIO_TEST_FLAG_FALSE(ksm) #endif u64 stable_page_flags(const struct page *page); /** * folio_xor_flags_has_waiters - Change some folio flags. * @folio: The folio. * @mask: Bits set in this word will be changed. * * This must only be used for flags which are changed with the folio * lock held. For example, it is unsafe to use for PG_dirty as that * can be set without the folio lock held. It can also only be used * on flags which are in the range 0-6 as some of the implementations * only affect those bits. * * Return: Whether there are tasks waiting on the folio. */ static inline bool folio_xor_flags_has_waiters(struct folio *folio, unsigned long mask) { return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0)); } /** * folio_test_uptodate - Is this folio up to date? * @folio: The folio. * * The uptodate flag is set on a folio when every byte in the folio is * at least as new as the corresponding bytes on storage. Anonymous * and CoW folios are always uptodate. If the folio is not uptodate, * some of the bytes in it may be; see the is_partially_uptodate() * address_space operation. */ static inline bool folio_test_uptodate(const struct folio *folio) { bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0)); /* * Must ensure that the data we read out of the folio is loaded * _after_ we've loaded folio->flags to check the uptodate bit. * We can skip the barrier if the folio is not uptodate, because * we wouldn't be reading anything from it. * * See folio_mark_uptodate() for the other side of the story. */ if (ret) smp_rmb(); return ret; } static inline bool PageUptodate(const struct page *page) { return folio_test_uptodate(page_folio(page)); } static __always_inline void __folio_mark_uptodate(struct folio *folio) { smp_wmb(); __set_bit(PG_uptodate, folio_flags(folio, 0)); } static __always_inline void folio_mark_uptodate(struct folio *folio) { /* * Memory barrier must be issued before setting the PG_uptodate bit, * so that all previous stores issued in order to bring the folio * uptodate are actually visible before folio_test_uptodate becomes true. */ smp_wmb(); set_bit(PG_uptodate, folio_flags(folio, 0)); } static __always_inline void __SetPageUptodate(struct page *page) { __folio_mark_uptodate((struct folio *)page); } static __always_inline void SetPageUptodate(struct page *page) { folio_mark_uptodate((struct folio *)page); } CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) void __folio_start_writeback(struct folio *folio, bool keep_write); void set_page_writeback(struct page *page); #define folio_start_writeback(folio) \ __folio_start_writeback(folio, false) #define folio_start_writeback_keepwrite(folio) \ __folio_start_writeback(folio, true) static __always_inline bool folio_test_head(const struct folio *folio) { return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY)); } static __always_inline int PageHead(const struct page *page) { PF_POISONED_CHECK(page); return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); } __SETPAGEFLAG(Head, head, PF_ANY) __CLEARPAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) /** * folio_test_large() - Does this folio contain more than one page? * @folio: The folio to test. * * Return: True if the folio is larger than one page. */ static inline bool folio_test_large(const struct folio *folio) { return folio_test_head(folio); } static __always_inline void set_compound_head(struct page *page, struct page *head) { WRITE_ONCE(page->compound_head, (unsigned long)head + 1); } static __always_inline void clear_compound_head(struct page *page) { WRITE_ONCE(page->compound_head, 0); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void ClearPageCompound(struct page *page) { BUG_ON(!PageHead(page)); ClearPageHead(page); } FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE) #else FOLIO_FLAG_FALSE(large_rmappable) FOLIO_FLAG_FALSE(partially_mapped) #endif #define PG_head_mask ((1UL << PG_head)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * PageHuge() only returns true for hugetlbfs pages, but not for * normal or transparent huge pages. * * PageTransHuge() returns true for both transparent huge and * hugetlbfs pages, but not normal pages. PageTransHuge() can only be * called only in the core VM paths where hugetlbfs pages can't exist. */ static inline int PageTransHuge(const struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); return PageHead(page); } /* * PageTransCompound returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. */ static inline int PageTransCompound(const struct page *page) { return PageCompound(page); } /* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. */ static inline int PageTransTail(const struct page *page) { return PageTail(page); } #else TESTPAGEFLAG_FALSE(TransHuge, transhuge) TESTPAGEFLAG_FALSE(TransCompound, transcompound) TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) TESTPAGEFLAG_FALSE(TransTail, transtail) #endif #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) /* * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the * compound page. * * This flag is set by hwpoison handler. Cleared by THP split or free page. */ PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) #else PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) #endif /* * For pages that do not use mapcount, page_type may be used. * The low 24 bits of pagetype may be used for your own purposes, as long * as you are careful to not affect the top 8 bits. The low bits of * pagetype will be overwritten when you clear the page_type from the page. */ enum pagetype { /* 0x00-0x7f are positive numbers, ie mapcount */ /* Reserve 0x80-0xef for mapcount overflow. */ PGTY_buddy = 0xf0, PGTY_offline = 0xf1, PGTY_table = 0xf2, PGTY_guard = 0xf3, PGTY_hugetlb = 0xf4, PGTY_slab = 0xf5, PGTY_zsmalloc = 0xf6, PGTY_unaccepted = 0xf7, PGTY_mapcount_underflow = 0xff }; static inline bool page_type_has_type(int page_type) { return page_type < (PGTY_mapcount_underflow << 24); } /* This takes a mapcount which is one more than page->_mapcount */ static inline bool page_mapcount_is_type(unsigned int mapcount) { return page_type_has_type(mapcount - 1); } static inline bool page_has_type(const struct page *page) { return page_mapcount_is_type(data_race(page->page_type)); } #define FOLIO_TYPE_OPS(lname, fname) \ static __always_inline bool folio_test_##fname(const struct folio *folio) \ { \ return data_race(folio->page.page_type >> 24) == PGTY_##lname; \ } \ static __always_inline void __folio_set_##fname(struct folio *folio) \ { \ if (folio_test_##fname(folio)) \ return; \ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \ folio); \ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \ } \ static __always_inline void __folio_clear_##fname(struct folio *folio) \ { \ if (folio->page.page_type == UINT_MAX) \ return; \ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ folio->page.page_type = UINT_MAX; \ } #define PAGE_TYPE_OPS(uname, lname, fname) \ FOLIO_TYPE_OPS(lname, fname) \ static __always_inline int Page##uname(const struct page *page) \ { \ return data_race(page->page_type >> 24) == PGTY_##lname; \ } \ static __always_inline void __SetPage##uname(struct page *page) \ { \ if (Page##uname(page)) \ return; \ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \ page->page_type = (unsigned int)PGTY_##lname << 24; \ } \ static __always_inline void __ClearPage##uname(struct page *page) \ { \ if (page->page_type == UINT_MAX) \ return; \ VM_BUG_ON_PAGE(!Page##uname(page), page); \ page->page_type = UINT_MAX; \ } /* * PageBuddy() indicates that the page is free and in the buddy system * (see mm/page_alloc.c). */ PAGE_TYPE_OPS(Buddy, buddy, buddy) /* * PageOffline() indicates that the page is logically offline although the * containing section is online. (e.g. inflated in a balloon driver or * not onlined when onlining the section). * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. * * When a memory block gets onlined, all pages are initialized with a * refcount of 1 and PageOffline(). generic_online_page() will * take care of clearing PageOffline(). * * If a driver wants to allow to offline unmovable PageOffline() pages without * putting them back to the buddy, it can do so via the memory notifier by * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() * pages (now with a reference count of zero) are treated like free (unmanaged) * pages, allowing the containing memory block to get offlined. A driver that * relies on this feature is aware that re-onlining the memory block will * require not giving them to the buddy via generic_online_page(). * * Memory offlining code will not adjust the managed page count for any * PageOffline() pages, treating them like they were never exposed to the * buddy using generic_online_page(). * * There are drivers that mark a page PageOffline() and expect there won't be * any further access to page content. PFN walkers that read content of random * pages should check PageOffline() and synchronize with such drivers using * page_offline_freeze()/page_offline_thaw(). */ PAGE_TYPE_OPS(Offline, offline, offline) extern void page_offline_freeze(void); extern void page_offline_thaw(void); extern void page_offline_begin(void); extern void page_offline_end(void); /* * Marks pages in use as page tables. */ PAGE_TYPE_OPS(Table, table, pgtable) /* * Marks guardpages used with debug_pagealloc. */ PAGE_TYPE_OPS(Guard, guard, guard) FOLIO_TYPE_OPS(slab, slab) /** * PageSlab - Determine if the page belongs to the slab allocator * @page: The page to test. * * Context: Any context. * Return: True for slab pages, false for any other kind of page. */ static inline bool PageSlab(const struct page *page) { return folio_test_slab(page_folio(page)); } #ifdef CONFIG_HUGETLB_PAGE FOLIO_TYPE_OPS(hugetlb, hugetlb) #else FOLIO_TEST_FLAG_FALSE(hugetlb) #endif PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) /* * Mark pages that has to be accepted before touched for the first time. * * Serialized with zone lock. */ PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) /** * PageHuge - Determine if the page belongs to hugetlbfs * @page: The page to test. * * Context: Any context. * Return: True for hugetlbfs pages, false for anon pages or pages * belonging to other filesystems. */ static inline bool PageHuge(const struct page *page) { return folio_test_hugetlb(page_folio(page)); } /* * Check if a page is currently marked HWPoisoned. Note that this check is * best effort only and inherently racy: there is no way to synchronize with * failing hardware. */ static inline bool is_page_hwpoison(const struct page *page) { const struct folio *folio; if (PageHWPoison(page)) return true; folio = page_folio(page); return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } bool is_free_buddy_page(const struct page *page); PAGEFLAG(Isolated, isolated, PF_ANY); static __always_inline int PageAnonExclusive(const struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnon(page), page); /* * HugeTLB stores this information on the head page; THP keeps it per * page */ if (PageHuge(page)) page = compound_head(page); return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } static __always_inline void SetPageAnonExclusive(struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } static __always_inline void ClearPageAnonExclusive(struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } static __always_inline void __ClearPageAnonExclusive(struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnon(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } #ifdef CONFIG_MMU #define __PG_MLOCKED (1UL << PG_mlocked) #else #define __PG_MLOCKED 0 #endif /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. If they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ (1UL << PG_lru | 1UL << PG_locked | \ 1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_writeback | 1UL << PG_reserved | \ 1UL << PG_active | \ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) /* * Flags checked when a page is prepped for return by the page allocator. * Pages being prepped should not have these flags set. If they are set, * there has been a kernel bug or struct page corruption. * * __PG_HWPOISON is exceptional because it needs to be kept beyond page's * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK) /* * Flags stored in the second page of a compound page. They may overlap * the CHECK_AT_FREE flags above, so need to be cleared. */ #define PAGE_FLAGS_SECOND \ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) /** * folio_has_private - Determine if folio has private stuff * @folio: The folio to be checked * * Determine if a folio has private stuff, indicating that release routines * should be invoked upon it. */ static inline int folio_has_private(const struct folio *folio) { return !!(folio->flags & PAGE_FLAGS_PRIVATE); } #undef PF_ANY #undef PF_HEAD #undef PF_NO_TAIL #undef PF_NO_COMPOUND #undef PF_SECOND #endif /* !__GENERATING_BOUNDS_H */ #endif /* PAGE_FLAGS_H */ |
119 118 119 119 119 83 84 84 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. * * This file contains power management functions related to interrupts. */ #include <linux/irq.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/suspend.h> #include <linux/syscore_ops.h> #include "internals.h" bool irq_pm_check_wakeup(struct irq_desc *desc) { if (irqd_is_wakeup_armed(&desc->irq_data)) { irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; desc->depth++; irq_disable(desc); pm_system_irq_wakeup(irq_desc_get_irq(desc)); return true; } return false; } /* * Called from __setup_irq() with desc->lock held after @action has * been installed in the action chain. */ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { desc->nr_actions++; if (action->flags & IRQF_FORCE_RESUME) desc->force_resume_depth++; WARN_ON_ONCE(desc->force_resume_depth && desc->force_resume_depth != desc->nr_actions); if (action->flags & IRQF_NO_SUSPEND) desc->no_suspend_depth++; else if (action->flags & IRQF_COND_SUSPEND) desc->cond_suspend_depth++; WARN_ON_ONCE(desc->no_suspend_depth && (desc->no_suspend_depth + desc->cond_suspend_depth) != desc->nr_actions); } /* * Called from __free_irq() with desc->lock held after @action has * been removed from the action chain. */ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { desc->nr_actions--; if (action->flags & IRQF_FORCE_RESUME) desc->force_resume_depth--; if (action->flags & IRQF_NO_SUSPEND) desc->no_suspend_depth--; else if (action->flags & IRQF_COND_SUSPEND) desc->cond_suspend_depth--; } static bool suspend_device_irq(struct irq_desc *desc) { unsigned long chipflags = irq_desc_get_chip(desc)->flags; struct irq_data *irqd = &desc->irq_data; if (!desc->action || irq_desc_is_chained(desc) || desc->no_suspend_depth) return false; if (irqd_is_wakeup_set(irqd)) { irqd_set(irqd, IRQD_WAKEUP_ARMED); if ((chipflags & IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND) && irqd_irq_disabled(irqd)) { /* * Interrupt marked for wakeup is in disabled state. * Enable interrupt here to unmask/enable in irqchip * to be able to resume with such interrupts. */ __enable_irq(desc); irqd_set(irqd, IRQD_IRQ_ENABLED_ON_SUSPEND); } /* * We return true here to force the caller to issue * synchronize_irq(). We need to make sure that the * IRQD_WAKEUP_ARMED is visible before we return from * suspend_device_irqs(). */ return true; } desc->istate |= IRQS_SUSPENDED; __disable_irq(desc); /* * Hardware which has no wakeup source configuration facility * requires that the non wakeup interrupts are masked at the * chip level. The chip implementation indicates that with * IRQCHIP_MASK_ON_SUSPEND. */ if (chipflags & IRQCHIP_MASK_ON_SUSPEND) mask_irq(desc); return true; } /** * suspend_device_irqs - disable all currently enabled interrupt lines * * During system-wide suspend or hibernation device drivers need to be * prevented from receiving interrupts and this function is provided * for this purpose. * * So we disable all interrupts and mark them IRQS_SUSPENDED except * for those which are unused, those which are marked as not * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND * set and those which are marked as active wakeup sources. * * The active wakeup sources are handled by the flow handler entry * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the * interrupt and notifies the pm core about the wakeup. */ void suspend_device_irqs(void) { struct irq_desc *desc; int irq; for_each_irq_desc(irq, desc) { unsigned long flags; bool sync; if (irq_settings_is_nested_thread(desc)) continue; raw_spin_lock_irqsave(&desc->lock, flags); sync = suspend_device_irq(desc); raw_spin_unlock_irqrestore(&desc->lock, flags); if (sync) synchronize_irq(irq); } } static void resume_irq(struct irq_desc *desc) { struct irq_data *irqd = &desc->irq_data; irqd_clear(irqd, IRQD_WAKEUP_ARMED); if (irqd_is_enabled_on_suspend(irqd)) { /* * Interrupt marked for wakeup was enabled during suspend * entry. Disable such interrupts to restore them back to * original state. */ __disable_irq(desc); irqd_clear(irqd, IRQD_IRQ_ENABLED_ON_SUSPEND); } if (desc->istate & IRQS_SUSPENDED) goto resume; /* Force resume the interrupt? */ if (!desc->force_resume_depth) return; /* Pretend that it got disabled ! */ desc->depth++; irq_state_set_disabled(desc); irq_state_set_masked(desc); resume: desc->istate &= ~IRQS_SUSPENDED; __enable_irq(desc); } static void resume_irqs(bool want_early) { struct irq_desc *desc; int irq; for_each_irq_desc(irq, desc) { unsigned long flags; bool is_early = desc->action && desc->action->flags & IRQF_EARLY_RESUME; if (!is_early && want_early) continue; if (irq_settings_is_nested_thread(desc)) continue; raw_spin_lock_irqsave(&desc->lock, flags); resume_irq(desc); raw_spin_unlock_irqrestore(&desc->lock, flags); } } /** * rearm_wake_irq - rearm a wakeup interrupt line after signaling wakeup * @irq: Interrupt to rearm */ void rearm_wake_irq(unsigned int irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); if (!desc) return; if (!(desc->istate & IRQS_SUSPENDED) || !irqd_is_wakeup_set(&desc->irq_data)) goto unlock; desc->istate &= ~IRQS_SUSPENDED; irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); __enable_irq(desc); unlock: irq_put_desc_busunlock(desc, flags); } /** * irq_pm_syscore_resume - enable interrupt lines early * * Enable all interrupt lines with %IRQF_EARLY_RESUME set. */ static void irq_pm_syscore_resume(void) { resume_irqs(true); } static struct syscore_ops irq_pm_syscore_ops = { .resume = irq_pm_syscore_resume, }; static int __init irq_pm_init_ops(void) { register_syscore_ops(&irq_pm_syscore_ops); return 0; } device_initcall(irq_pm_init_ops); /** * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() * * Enable all non-%IRQF_EARLY_RESUME interrupt lines previously * disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag * set as well as those with %IRQF_FORCE_RESUME. */ void resume_device_irqs(void) { resume_irqs(false); } |
11 7 14 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | // SPDX-License-Identifier: GPL-2.0-or-later /* * geniv: Shared IV generator code * * This file provides common code to IV generators such as seqiv. * * Copyright (c) 2007-2019 Herbert Xu <herbert@gondor.apana.org.au> */ #include <crypto/internal/geniv.h> #include <crypto/internal/rng.h> #include <crypto/null.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/slab.h> static int aead_geniv_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); return crypto_aead_setkey(ctx->child, key, keylen); } static int aead_geniv_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); return crypto_aead_setauthsize(ctx->child, authsize); } static void aead_geniv_free(struct aead_instance *inst) { crypto_drop_aead(aead_instance_ctx(inst)); kfree(inst); } struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_aead_spawn *spawn; struct aead_instance *inst; struct aead_alg *alg; unsigned int ivsize; unsigned int maxauthsize; u32 mask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return ERR_PTR(err); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); spawn = aead_instance_ctx(inst); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_aead_alg(spawn); ivsize = crypto_aead_alg_ivsize(alg); maxauthsize = crypto_aead_alg_maxauthsize(alg); err = -EINVAL; if (ivsize < sizeof(u64)) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", tmpl->name, alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = alg->base.cra_blocksize; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); inst->alg.setkey = aead_geniv_setkey; inst->alg.setauthsize = aead_geniv_setauthsize; inst->alg.ivsize = ivsize; inst->alg.maxauthsize = maxauthsize; inst->free = aead_geniv_free; out: return inst; err_free_inst: aead_geniv_free(inst); inst = ERR_PTR(err); goto out; } EXPORT_SYMBOL_GPL(aead_geniv_alloc); int aead_init_geniv(struct crypto_aead *aead) { struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead); struct aead_instance *inst = aead_alg_instance(aead); struct crypto_aead *child; int err; spin_lock_init(&ctx->lock); err = crypto_get_default_rng(); if (err) goto out; err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, crypto_aead_ivsize(aead)); crypto_put_default_rng(); if (err) goto out; ctx->sknull = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->sknull); if (IS_ERR(ctx->sknull)) goto out; child = crypto_spawn_aead(aead_instance_ctx(inst)); err = PTR_ERR(child); if (IS_ERR(child)) goto drop_null; ctx->child = child; crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) + sizeof(struct aead_request)); err = 0; out: return err; drop_null: crypto_put_default_null_skcipher(); goto out; } EXPORT_SYMBOL_GPL(aead_init_geniv); void aead_exit_geniv(struct crypto_aead *tfm) { struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); crypto_put_default_null_skcipher(); } EXPORT_SYMBOL_GPL(aead_exit_geniv); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Shared IV generator code"); |
49 49 186 186 103 103 130 4 4 4 5 5 4 4 4 4 4 277 313 281 3 281 146 193 441 114 282 194 442 441 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/kernel.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/flow_offload.h> #include <linux/rtnetlink.h> #include <linux/mutex.h> #include <linux/rhashtable.h> struct flow_rule *flow_rule_alloc(unsigned int num_actions) { struct flow_rule *rule; int i; rule = kzalloc(struct_size(rule, action.entries, num_actions), GFP_KERNEL); if (!rule) return NULL; rule->action.num_entries = num_actions; /* Pre-fill each action hw_stats with DONT_CARE. * Caller can override this if it wants stats for a given action. */ for (i = 0; i < num_actions; i++) rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; return rule; } EXPORT_SYMBOL(flow_rule_alloc); struct flow_offload_action *offload_action_alloc(unsigned int num_actions) { struct flow_offload_action *fl_action; int i; fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), GFP_KERNEL); if (!fl_action) return NULL; fl_action->action.num_entries = num_actions; /* Pre-fill each action hw_stats with DONT_CARE. * Caller can override this if it wants stats for a given action. */ for (i = 0; i < num_actions; i++) fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; return fl_action; } #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ const struct flow_match *__m = &(__rule)->match; \ struct flow_dissector *__d = (__m)->dissector; \ \ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ void flow_rule_match_meta(const struct flow_rule *rule, struct flow_match_meta *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); } EXPORT_SYMBOL(flow_rule_match_meta); void flow_rule_match_basic(const struct flow_rule *rule, struct flow_match_basic *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); } EXPORT_SYMBOL(flow_rule_match_basic); void flow_rule_match_control(const struct flow_rule *rule, struct flow_match_control *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); } EXPORT_SYMBOL(flow_rule_match_control); void flow_rule_match_eth_addrs(const struct flow_rule *rule, struct flow_match_eth_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_eth_addrs); void flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); } EXPORT_SYMBOL(flow_rule_match_vlan); void flow_rule_match_cvlan(const struct flow_rule *rule, struct flow_match_vlan *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); } EXPORT_SYMBOL(flow_rule_match_cvlan); void flow_rule_match_arp(const struct flow_rule *rule, struct flow_match_arp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out); } EXPORT_SYMBOL(flow_rule_match_arp); void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, struct flow_match_ipv4_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_ipv4_addrs); void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, struct flow_match_ipv6_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_ipv6_addrs); void flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); } EXPORT_SYMBOL(flow_rule_match_ip); void flow_rule_match_ports(const struct flow_rule *rule, struct flow_match_ports *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); } EXPORT_SYMBOL(flow_rule_match_ports); void flow_rule_match_ports_range(const struct flow_rule *rule, struct flow_match_ports_range *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out); } EXPORT_SYMBOL(flow_rule_match_ports_range); void flow_rule_match_tcp(const struct flow_rule *rule, struct flow_match_tcp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); } EXPORT_SYMBOL(flow_rule_match_tcp); void flow_rule_match_ipsec(const struct flow_rule *rule, struct flow_match_ipsec *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out); } EXPORT_SYMBOL(flow_rule_match_ipsec); void flow_rule_match_icmp(const struct flow_rule *rule, struct flow_match_icmp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); } EXPORT_SYMBOL(flow_rule_match_icmp); void flow_rule_match_mpls(const struct flow_rule *rule, struct flow_match_mpls *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); } EXPORT_SYMBOL(flow_rule_match_mpls); void flow_rule_match_enc_control(const struct flow_rule *rule, struct flow_match_control *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); } EXPORT_SYMBOL(flow_rule_match_enc_control); void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, struct flow_match_ipv4_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs); void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, struct flow_match_ipv6_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs); void flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); } EXPORT_SYMBOL(flow_rule_match_enc_ip); void flow_rule_match_enc_ports(const struct flow_rule *rule, struct flow_match_ports *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ports); void flow_rule_match_enc_keyid(const struct flow_rule *rule, struct flow_match_enc_keyid *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); } EXPORT_SYMBOL(flow_rule_match_enc_keyid); void flow_rule_match_enc_opts(const struct flow_rule *rule, struct flow_match_enc_opts *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_opts); struct flow_action_cookie *flow_action_cookie_create(void *data, unsigned int len, gfp_t gfp) { struct flow_action_cookie *cookie; cookie = kmalloc(sizeof(*cookie) + len, gfp); if (!cookie) return NULL; cookie->cookie_len = len; memcpy(cookie->cookie, data, len); return cookie; } EXPORT_SYMBOL(flow_action_cookie_create); void flow_action_cookie_destroy(struct flow_action_cookie *cookie) { kfree(cookie); } EXPORT_SYMBOL(flow_action_cookie_destroy); void flow_rule_match_ct(const struct flow_rule *rule, struct flow_match_ct *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out); } EXPORT_SYMBOL(flow_rule_match_ct); void flow_rule_match_pppoe(const struct flow_rule *rule, struct flow_match_pppoe *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out); } EXPORT_SYMBOL(flow_rule_match_pppoe); void flow_rule_match_l2tpv3(const struct flow_rule *rule, struct flow_match_l2tpv3 *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out); } EXPORT_SYMBOL(flow_rule_match_l2tpv3); struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv)) { struct flow_block_cb *block_cb; block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); if (!block_cb) return ERR_PTR(-ENOMEM); block_cb->cb = cb; block_cb->cb_ident = cb_ident; block_cb->cb_priv = cb_priv; block_cb->release = release; return block_cb; } EXPORT_SYMBOL(flow_block_cb_alloc); void flow_block_cb_free(struct flow_block_cb *block_cb) { if (block_cb->release) block_cb->release(block_cb->cb_priv); kfree(block_cb); } EXPORT_SYMBOL(flow_block_cb_free); struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, flow_setup_cb_t *cb, void *cb_ident) { struct flow_block_cb *block_cb; list_for_each_entry(block_cb, &block->cb_list, list) { if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return block_cb; } return NULL; } EXPORT_SYMBOL(flow_block_cb_lookup); void *flow_block_cb_priv(struct flow_block_cb *block_cb) { return block_cb->cb_priv; } EXPORT_SYMBOL(flow_block_cb_priv); void flow_block_cb_incref(struct flow_block_cb *block_cb) { block_cb->refcnt++; } EXPORT_SYMBOL(flow_block_cb_incref); unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) { return --block_cb->refcnt; } EXPORT_SYMBOL(flow_block_cb_decref); bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, struct list_head *driver_block_list) { struct flow_block_cb *block_cb; list_for_each_entry(block_cb, driver_block_list, driver_list) { if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return true; } return false; } EXPORT_SYMBOL(flow_block_cb_is_busy); int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only) { struct flow_block_cb *block_cb; if (ingress_only && f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; f->driver_block_list = driver_block_list; switch (f->command) { case FLOW_BLOCK_BIND: if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) return -EBUSY; block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); flow_block_cb_add(block_cb, f); list_add_tail(&block_cb->driver_list, driver_block_list); return 0; case FLOW_BLOCK_UNBIND: block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); if (!block_cb) return -ENOENT; flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; } } EXPORT_SYMBOL(flow_block_cb_setup_simple); static DEFINE_MUTEX(flow_indr_block_lock); static LIST_HEAD(flow_block_indr_list); static LIST_HEAD(flow_block_indr_dev_list); static LIST_HEAD(flow_indir_dev_list); struct flow_indr_dev { struct list_head list; flow_indr_block_bind_cb_t *cb; void *cb_priv; refcount_t refcnt; }; static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_indr_dev *indr_dev; indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL); if (!indr_dev) return NULL; indr_dev->cb = cb; indr_dev->cb_priv = cb_priv; refcount_set(&indr_dev->refcnt, 1); return indr_dev; } struct flow_indir_dev_info { void *data; struct net_device *dev; struct Qdisc *sch; enum tc_setup_type type; void (*cleanup)(struct flow_block_cb *block_cb); struct list_head list; enum flow_block_command command; enum flow_block_binder_type binder_type; struct list_head *cb_list; }; static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_block_offload bo; struct flow_indir_dev_info *cur; list_for_each_entry(cur, &flow_indir_dev_list, list) { memset(&bo, 0, sizeof(bo)); bo.command = cur->command; bo.binder_type = cur->binder_type; INIT_LIST_HEAD(&bo.cb_list); cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup); list_splice(&bo.cb_list, cur->cb_list); } } int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_indr_dev *indr_dev; mutex_lock(&flow_indr_block_lock); list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) { if (indr_dev->cb == cb && indr_dev->cb_priv == cb_priv) { refcount_inc(&indr_dev->refcnt); mutex_unlock(&flow_indr_block_lock); return 0; } } indr_dev = flow_indr_dev_alloc(cb, cb_priv); if (!indr_dev) { mutex_unlock(&flow_indr_block_lock); return -ENOMEM; } list_add(&indr_dev->list, &flow_block_indr_dev_list); existing_qdiscs_register(cb, cb_priv); mutex_unlock(&flow_indr_block_lock); tcf_action_reoffload_cb(cb, cb_priv, true); return 0; } EXPORT_SYMBOL(flow_indr_dev_register); static void __flow_block_indr_cleanup(void (*release)(void *cb_priv), void *cb_priv, struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { if (this->release == release && this->indr.cb_priv == cb_priv) list_move(&this->indr.list, cleanup_list); } } static void flow_block_indr_notify(struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, cleanup_list, indr.list) { list_del(&this->indr.list); this->indr.cleanup(this); } } void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void (*release)(void *cb_priv)) { struct flow_indr_dev *this, *next, *indr_dev = NULL; LIST_HEAD(cleanup_list); mutex_lock(&flow_indr_block_lock); list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) { if (this->cb == cb && this->cb_priv == cb_priv && refcount_dec_and_test(&this->refcnt)) { indr_dev = this; list_del(&indr_dev->list); break; } } if (!indr_dev) { mutex_unlock(&flow_indr_block_lock); return; } __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); tcf_action_reoffload_cb(cb, cb_priv, false); flow_block_indr_notify(&cleanup_list); kfree(indr_dev); } EXPORT_SYMBOL(flow_indr_dev_unregister); static void flow_block_indr_init(struct flow_block_cb *flow_block, struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, void *data, void *cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { flow_block->indr.binder_type = bo->binder_type; flow_block->indr.data = data; flow_block->indr.cb_priv = cb_priv; flow_block->indr.dev = dev; flow_block->indr.sch = sch; flow_block->indr.cleanup = cleanup; } struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_block_cb *block_cb; block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release); if (IS_ERR(block_cb)) goto out; flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); list_add(&block_cb->indr.list, &flow_block_indr_list); out: return block_cb; } EXPORT_SYMBOL(flow_indr_block_cb_alloc); static struct flow_indir_dev_info *find_indir_dev(void *data) { struct flow_indir_dev_info *cur; list_for_each_entry(cur, &flow_indir_dev_list, list) { if (cur->data == data) return cur; } return NULL; } static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb), struct flow_block_offload *bo) { struct flow_indir_dev_info *info; info = find_indir_dev(data); if (info) return -EEXIST; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->data = data; info->dev = dev; info->sch = sch; info->type = type; info->cleanup = cleanup; info->command = bo->command; info->binder_type = bo->binder_type; info->cb_list = bo->cb_list_head; list_add(&info->list, &flow_indir_dev_list); return 0; } static int indir_dev_remove(void *data) { struct flow_indir_dev_info *info; info = find_indir_dev(data); if (!info) return -ENOENT; list_del(&info->list); kfree(info); return 0; } int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_indr_dev *this; u32 count = 0; int err; mutex_lock(&flow_indr_block_lock); if (bo) { if (bo->command == FLOW_BLOCK_BIND) indir_dev_add(data, dev, sch, type, cleanup, bo); else if (bo->command == FLOW_BLOCK_UNBIND) indir_dev_remove(data); } list_for_each_entry(this, &flow_block_indr_dev_list, list) { err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); if (!err) count++; } mutex_unlock(&flow_indr_block_lock); return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); bool flow_indr_dev_exists(void) { return !list_empty(&flow_block_indr_dev_list); } EXPORT_SYMBOL(flow_indr_dev_exists); |
5 5 3 5 64 64 6 1 3 3 54 3 54 54 54 53 50 49 3 50 50 50 50 65 65 65 65 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" /* Internal logging interface, which relies on the real LOG target modules */ #define NFLOGGER_NAME_LEN 64 int sysctl_nf_log_all_netns __read_mostly; EXPORT_SYMBOL(sysctl_nf_log_all_netns); static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; static DEFINE_MUTEX(nf_log_mutex); #define nft_log_dereference(logger) \ rcu_dereference_protected(logger, lockdep_is_held(&nf_log_mutex)) static struct nf_logger *__find_logger(int pf, const char *str_logger) { struct nf_logger *log; int i; for (i = 0; i < NF_LOG_TYPE_MAX; i++) { log = nft_log_dereference(loggers[pf][i]); if (!log) continue; if (!strncasecmp(str_logger, log->name, strlen(log->name))) return log; } return NULL; } int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger) { const struct nf_logger *log; if (pf == NFPROTO_UNSPEC || pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EOPNOTSUPP; mutex_lock(&nf_log_mutex); log = nft_log_dereference(net->nf.nf_loggers[pf]); if (log == NULL) rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_set); void nf_log_unset(struct net *net, const struct nf_logger *logger) { int i; const struct nf_logger *log; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = nft_log_dereference(net->nf.nf_loggers[i]); if (log == logger) RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL); } mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unset); /* return EEXIST if the same logger is registered, 0 on success. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger) { int i; int ret = 0; if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (pf == NFPROTO_UNSPEC) { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { if (rcu_access_pointer(loggers[i][logger->type])) { ret = -EEXIST; goto unlock; } } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) rcu_assign_pointer(loggers[i][logger->type], logger); } else { if (rcu_access_pointer(loggers[pf][logger->type])) { ret = -EEXIST; goto unlock; } rcu_assign_pointer(loggers[pf][logger->type], logger); } unlock: mutex_unlock(&nf_log_mutex); return ret; } EXPORT_SYMBOL(nf_log_register); void nf_log_unregister(struct nf_logger *logger) { const struct nf_logger *log; int i; mutex_lock(&nf_log_mutex); for (i = 0; i < NFPROTO_NUMPROTO; i++) { log = nft_log_dereference(loggers[i][logger->type]); if (log == logger) RCU_INIT_POINTER(loggers[i][logger->type], NULL); } mutex_unlock(&nf_log_mutex); synchronize_rcu(); } EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(struct net *net, u_int8_t pf, const struct nf_logger *logger) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[pf], logger); mutex_unlock(&nf_log_mutex); return 0; } EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(struct net *net, u_int8_t pf) { if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) return; mutex_lock(&nf_log_mutex); RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); } EXPORT_SYMBOL(nf_log_unbind_pf); int nf_logger_find_get(int pf, enum nf_log_type type) { struct nf_logger *logger; int ret = -ENOENT; if (pf >= ARRAY_SIZE(loggers)) return -EINVAL; if (type >= NF_LOG_TYPE_MAX) return -EINVAL; if (pf == NFPROTO_INET) { ret = nf_logger_find_get(NFPROTO_IPV4, type); if (ret < 0) return ret; ret = nf_logger_find_get(NFPROTO_IPV6, type); if (ret < 0) { nf_logger_put(NFPROTO_IPV4, type); return ret; } return 0; } rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (logger == NULL) goto out; if (try_module_get(logger->me)) ret = 0; out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_logger_find_get); void nf_logger_put(int pf, enum nf_log_type type) { struct nf_logger *logger; if (pf == NFPROTO_INET) { nf_logger_put(NFPROTO_IPV4, type); nf_logger_put(NFPROTO_IPV6, type); return; } rcu_read_lock(); logger = rcu_dereference(loggers[pf][type]); if (!logger) WARN_ON_ONCE(1); else module_put(logger->me); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(nf_logger_put); void nf_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); if (loginfo != NULL) logger = rcu_dereference(loggers[pf][loginfo->type]); else logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_packet); void nf_log_trace(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) { va_list args; char prefix[NF_LOG_PREFIXLEN]; const struct nf_logger *logger; rcu_read_lock(); logger = rcu_dereference(net->nf.nf_loggers[pf]); if (logger) { va_start(args, fmt); vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); } rcu_read_unlock(); } EXPORT_SYMBOL(nf_log_trace); #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) struct nf_log_buf { unsigned int count; char buf[S_SIZE + 1]; }; static struct nf_log_buf emergency, *emergency_ptr = &emergency; __printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...) { va_list args; int len; if (likely(m->count < S_SIZE)) { va_start(args, f); len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args); va_end(args); if (likely(m->count + len < S_SIZE)) { m->count += len; return 0; } } m->count = S_SIZE; printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n"); return -1; } EXPORT_SYMBOL_GPL(nf_log_buf_add); struct nf_log_buf *nf_log_buf_open(void) { struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC); if (unlikely(!m)) { local_bh_disable(); do { m = xchg(&emergency_ptr, NULL); } while (!m); } m->count = 0; return m; } EXPORT_SYMBOL_GPL(nf_log_buf_open); void nf_log_buf_close(struct nf_log_buf *m) { m->buf[m->count] = 0; printk("%s\n", m->buf); if (likely(m != &emergency)) kfree(m); else { emergency_ptr = m; local_bh_enable(); } } EXPORT_SYMBOL_GPL(nf_log_buf_close); #ifdef CONFIG_PROC_FS static void *seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); mutex_lock(&nf_log_mutex); if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void *seq_next(struct seq_file *s, void *v, loff_t *pos) { struct net *net = seq_file_net(s); (*pos)++; if (*pos >= ARRAY_SIZE(net->nf.nf_loggers)) return NULL; return pos; } static void seq_stop(struct seq_file *s, void *v) { mutex_unlock(&nf_log_mutex); } static int seq_show(struct seq_file *s, void *v) { loff_t *pos = v; const struct nf_logger *logger; int i; struct net *net = seq_file_net(s); logger = nft_log_dereference(net->nf.nf_loggers[*pos]); if (!logger) seq_printf(s, "%2lld NONE (", *pos); else seq_printf(s, "%2lld %s (", *pos, logger->name); if (seq_has_overflowed(s)) return -ENOSPC; for (i = 0; i < NF_LOG_TYPE_MAX; i++) { if (loggers[*pos][i] == NULL) continue; logger = nft_log_dereference(loggers[*pos][i]); seq_puts(s, logger->name); if (i == 0 && loggers[*pos][i + 1] != NULL) seq_puts(s, ","); if (seq_has_overflowed(s)) return -ENOSPC; } seq_puts(s, ")\n"); if (seq_has_overflowed(s)) return -ENOSPC; return 0; } static const struct seq_operations nflog_seq_ops = { .start = seq_start, .next = seq_next, .stop = seq_stop, .show = seq_show, }; #endif /* PROC_FS */ #ifdef CONFIG_SYSCTL static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO]; static struct ctl_table_header *nf_log_sysctl_fhdr; static struct ctl_table nf_log_sysctl_ftable[] = { { .procname = "nf_log_all_netns", .data = &sysctl_nf_log_all_netns, .maxlen = sizeof(sysctl_nf_log_all_netns), .mode = 0644, .proc_handler = proc_dointvec, }, }; static int nf_log_proc_dostring(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { const struct nf_logger *logger; char buf[NFLOGGER_NAME_LEN]; int r = 0; int tindex = (unsigned long)table->extra1; struct net *net = table->extra2; if (write) { struct ctl_table tmp = *table; /* proc_dostring() can append to existing strings, so we need to * initialize it as an empty string. */ buf[0] = '\0'; tmp.data = buf; r = proc_dostring(&tmp, write, buffer, lenp, ppos); if (r) return r; if (!strcmp(buf, "NONE")) { nf_log_unbind_pf(net, tindex); return 0; } mutex_lock(&nf_log_mutex); logger = __find_logger(tindex, buf); if (logger == NULL) { mutex_unlock(&nf_log_mutex); return -ENOENT; } rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); mutex_unlock(&nf_log_mutex); } else { struct ctl_table tmp = *table; tmp.data = buf; mutex_lock(&nf_log_mutex); logger = nft_log_dereference(net->nf.nf_loggers[tindex]); if (!logger) strscpy(buf, "NONE", sizeof(buf)); else strscpy(buf, logger->name, sizeof(buf)); mutex_unlock(&nf_log_mutex); r = proc_dostring(&tmp, write, buffer, lenp, ppos); } return r; } static int netfilter_log_sysctl_init(struct net *net) { int i; struct ctl_table *table; table = nf_log_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(nf_log_sysctl_table, sizeof(nf_log_sysctl_table), GFP_KERNEL); if (!table) goto err_alloc; } else { for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { snprintf(nf_log_sysctl_fnames[i], 3, "%d", i); nf_log_sysctl_table[i].procname = nf_log_sysctl_fnames[i]; nf_log_sysctl_table[i].maxlen = NFLOGGER_NAME_LEN; nf_log_sysctl_table[i].mode = 0644; nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; } nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter", nf_log_sysctl_ftable); if (!nf_log_sysctl_fhdr) goto err_freg; } for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) table[i].extra2 = net; net->nf.nf_log_dir_header = register_net_sysctl_sz(net, "net/netfilter/nf_log", table, ARRAY_SIZE(nf_log_sysctl_table)); if (!net->nf.nf_log_dir_header) goto err_reg; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); else unregister_net_sysctl_table(nf_log_sysctl_fhdr); err_freg: err_alloc: return -ENOMEM; } static void netfilter_log_sysctl_exit(struct net *net) { const struct ctl_table *table; table = net->nf.nf_log_dir_header->ctl_table_arg; unregister_net_sysctl_table(net->nf.nf_log_dir_header); if (!net_eq(net, &init_net)) kfree(table); else unregister_net_sysctl_table(nf_log_sysctl_fhdr); } #else static int netfilter_log_sysctl_init(struct net *net) { return 0; } static void netfilter_log_sysctl_exit(struct net *net) { } #endif /* CONFIG_SYSCTL */ static int __net_init nf_log_net_init(struct net *net) { int ret = -ENOMEM; #ifdef CONFIG_PROC_FS if (!proc_create_net("nf_log", 0444, net->nf.proc_netfilter, &nflog_seq_ops, sizeof(struct seq_net_private))) return ret; #endif ret = netfilter_log_sysctl_init(net); if (ret < 0) goto out_sysctl; return 0; out_sysctl: #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif return ret; } static void __net_exit nf_log_net_exit(struct net *net) { netfilter_log_sysctl_exit(net); #ifdef CONFIG_PROC_FS remove_proc_entry("nf_log", net->nf.proc_netfilter); #endif } static struct pernet_operations nf_log_net_ops = { .init = nf_log_net_init, .exit = nf_log_net_exit, }; int __init netfilter_log_init(void) { return register_pernet_subsys(&nf_log_net_ops); } |
1 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/module.h> /* * n_null.c - Null line discipline used in the failure path * * Copyright (C) Intel 2017 */ static ssize_t n_null_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return -EOPNOTSUPP; } static ssize_t n_null_write(struct tty_struct *tty, struct file *file, const u8 *buf, size_t nr) { return -EOPNOTSUPP; } static struct tty_ldisc_ops null_ldisc = { .owner = THIS_MODULE, .num = N_NULL, .name = "n_null", .read = n_null_read, .write = n_null_write, }; static int __init n_null_init(void) { BUG_ON(tty_register_ldisc(&null_ldisc)); return 0; } static void __exit n_null_exit(void) { tty_unregister_ldisc(&null_ldisc); } module_init(n_null_init); module_exit(n_null_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alan Cox"); MODULE_ALIAS_LDISC(N_NULL); MODULE_DESCRIPTION("Null ldisc driver"); |
2 2 2 2 146 148 79 148 148 79 294 294 3 293 291 290 291 290 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Virtio SCSI HBA driver * * Copyright IBM Corp. 2010 * Copyright Red Hat, Inc. 2011 * * Authors: * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> * Paolo Bonzini <pbonzini@redhat.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_scsi.h> #include <linux/cpu.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_devinfo.h> #include <linux/seqlock.h> #include <linux/blk-mq-virtio.h> #include "sd.h" #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 #define VIRTIO_SCSI_VQ_BASE 2 static unsigned int virtscsi_poll_queues; module_param(virtscsi_poll_queues, uint, 0644); MODULE_PARM_DESC(virtscsi_poll_queues, "The number of dedicated virtqueues for polling I/O"); /* Command queue element */ struct virtio_scsi_cmd { struct scsi_cmnd *sc; struct completion *comp; union { struct virtio_scsi_cmd_req cmd; struct virtio_scsi_cmd_req_pi cmd_pi; struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_an_req an; } req; union { struct virtio_scsi_cmd_resp cmd; struct virtio_scsi_ctrl_tmf_resp tmf; struct virtio_scsi_ctrl_an_resp an; struct virtio_scsi_event evt; } resp; } ____cacheline_aligned_in_smp; struct virtio_scsi_event_node { struct virtio_scsi *vscsi; struct virtio_scsi_event event; struct work_struct work; }; struct virtio_scsi_vq { /* Protects vq */ spinlock_t vq_lock; struct virtqueue *vq; }; /* Driver instance state */ struct virtio_scsi { struct virtio_device *vdev; /* Get some buffers ready for event vq */ struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; u32 num_queues; int io_queues[HCTX_MAX_TYPES]; struct hlist_node node; /* Protected by event_vq lock */ bool stop_events; struct virtio_scsi_vq ctrl_vq; struct virtio_scsi_vq event_vq; struct virtio_scsi_vq req_vqs[]; }; static struct kmem_cache *virtscsi_cmd_cache; static mempool_t *virtscsi_cmd_pool; static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) { return vdev->priv; } static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) { if (resid) scsi_set_resid(sc, min(resid, scsi_bufflen(sc))); } /* * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done * * Called with vq_lock held. */ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", sc, resp->response, resp->status, resp->sense_len); sc->result = resp->status; virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); switch (resp->response) { case VIRTIO_SCSI_S_OK: set_host_byte(sc, DID_OK); break; case VIRTIO_SCSI_S_OVERRUN: set_host_byte(sc, DID_ERROR); break; case VIRTIO_SCSI_S_ABORTED: set_host_byte(sc, DID_ABORT); break; case VIRTIO_SCSI_S_BAD_TARGET: set_host_byte(sc, DID_BAD_TARGET); break; case VIRTIO_SCSI_S_RESET: set_host_byte(sc, DID_RESET); break; case VIRTIO_SCSI_S_BUSY: set_host_byte(sc, DID_BUS_BUSY); break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: set_host_byte(sc, DID_TRANSPORT_DISRUPTED); break; case VIRTIO_SCSI_S_TARGET_FAILURE: set_host_byte(sc, DID_BAD_TARGET); break; case VIRTIO_SCSI_S_NEXUS_FAILURE: set_status_byte(sc, SAM_STAT_RESERVATION_CONFLICT); break; default: scmd_printk(KERN_WARNING, sc, "Unknown response %d", resp->response); fallthrough; case VIRTIO_SCSI_S_FAILURE: set_host_byte(sc, DID_ERROR); break; } WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > VIRTIO_SCSI_SENSE_SIZE); if (resp->sense_len) { memcpy(sc->sense_buffer, resp->sense, min_t(u32, virtio32_to_cpu(vscsi->vdev, resp->sense_len), VIRTIO_SCSI_SENSE_SIZE)); } scsi_done(sc); } static void virtscsi_vq_done(struct virtio_scsi *vscsi, struct virtio_scsi_vq *virtscsi_vq, void (*fn)(struct virtio_scsi *vscsi, void *buf)) { void *buf; unsigned int len; unsigned long flags; struct virtqueue *vq = virtscsi_vq->vq; spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(vscsi, buf); } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); } static void virtscsi_req_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); int index = vq->index - VIRTIO_SCSI_VQ_BASE; struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; static void virtscsi_poll_requests(struct virtio_scsi *vscsi) { int i, num_vqs; num_vqs = vscsi->num_queues; for (i = 0; i < num_vqs; i++) virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], virtscsi_complete_cmd); } static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; if (cmd->comp) complete(cmd->comp); } static void virtscsi_ctrl_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; static void virtscsi_handle_event(struct work_struct *work); static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { int err; struct scatterlist sg; unsigned long flags; INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, GFP_ATOMIC); if (!err) virtqueue_kick(vscsi->event_vq.vq); spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); return err; } static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) { int i; for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { vscsi->event_list[i].vscsi = vscsi; virtscsi_kick_event(vscsi, &vscsi->event_list[i]); } return 0; } static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) { int i; /* Stop scheduling work before calling cancel_work_sync. */ spin_lock_irq(&vscsi->event_vq.vq_lock); vscsi->stop_events = true; spin_unlock_irq(&vscsi->event_vq.vq_lock); for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) cancel_work_sync(&vscsi->event_list[i].work); } static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned int target = event->lun[1]; unsigned int lun = (event->lun[2] << 8) | event->lun[3]; switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { case VIRTIO_SCSI_EVT_RESET_RESCAN: if (lun == 0) { scsi_scan_target(&shost->shost_gendev, 0, target, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); } else { scsi_add_device(shost, 0, target, lun); } break; case VIRTIO_SCSI_EVT_RESET_REMOVED: sdev = scsi_device_lookup(shost, 0, target, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else { pr_err("SCSI device %d 0 %d %d not found\n", shost->host_no, target, lun); } break; default: pr_info("Unsupported virtio scsi event reason %x\n", event->reason); } } static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned int target = event->lun[1]; unsigned int lun = (event->lun[2] << 8) | event->lun[3]; u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; sdev = scsi_device_lookup(shost, 0, target, lun); if (!sdev) { pr_err("SCSI device %d 0 %d %d not found\n", shost->host_no, target, lun); return; } /* Handle "Parameters changed", "Mode parameters changed", and "Capacity data has changed". */ if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) scsi_rescan_device(sdev); scsi_device_put(sdev); } static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned char scsi_cmd[MAX_COMMAND_SIZE]; int result, inquiry_len, inq_result_len = 256; char *inq_result = kmalloc(inq_result_len, GFP_KERNEL); if (!inq_result) return -ENOMEM; shost_for_each_device(sdev, shost) { inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; memset(scsi_cmd, 0, sizeof(scsi_cmd)); scsi_cmd[0] = INQUIRY; scsi_cmd[4] = (unsigned char) inquiry_len; memset(inq_result, 0, inq_result_len); result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, inq_result, inquiry_len, SD_TIMEOUT, SD_MAX_RETRIES, NULL); if (result == 0 && inq_result[0] >> 5) { /* PQ indicates the LUN is not attached */ scsi_remove_device(sdev); } else if (result > 0 && host_byte(result) == DID_BAD_TARGET) { /* * If all LUNs of a virtio-scsi device are unplugged * it will respond with BAD TARGET on any INQUIRY * command. * Remove the device in this case as well. */ scsi_remove_device(sdev); } } kfree(inq_result); return 0; } static void virtscsi_handle_event(struct work_struct *work) { struct virtio_scsi_event_node *event_node = container_of(work, struct virtio_scsi_event_node, work); struct virtio_scsi *vscsi = event_node->vscsi; struct virtio_scsi_event *event = &event_node->event; if (event->event & cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { int ret; event->event &= ~cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED); ret = virtscsi_rescan_hotunplug(vscsi); if (ret) return; scsi_scan_host(virtio_scsi_host(vscsi->vdev)); } switch (virtio32_to_cpu(vscsi->vdev, event->event)) { case VIRTIO_SCSI_T_NO_EVENT: break; case VIRTIO_SCSI_T_TRANSPORT_RESET: virtscsi_handle_transport_reset(vscsi, event); break; case VIRTIO_SCSI_T_PARAM_CHANGE: virtscsi_handle_param_change(vscsi, event); break; default: pr_err("Unsupported virtio scsi event %x\n", event->event); } virtscsi_kick_event(vscsi, event_node); } static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; if (!vscsi->stop_events) queue_work(system_freezable_wq, &event_node->work); } static void virtscsi_event_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); }; static int __virtscsi_add_cmd(struct virtqueue *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; struct scatterlist *sgs[6], req, resp; struct sg_table *out, *in; unsigned out_num = 0, in_num = 0; out = in = NULL; if (sc && sc->sc_data_direction != DMA_NONE) { if (sc->sc_data_direction != DMA_FROM_DEVICE) out = &sc->sdb.table; if (sc->sc_data_direction != DMA_TO_DEVICE) in = &sc->sdb.table; } /* Request header. */ sg_init_one(&req, &cmd->req, req_size); sgs[out_num++] = &req; /* Data-out buffer. */ if (out) { /* Place WRITE protection SGLs before Data OUT payload */ if (scsi_prot_sg_count(sc)) sgs[out_num++] = scsi_prot_sglist(sc); sgs[out_num++] = out->sgl; } /* Response header. */ sg_init_one(&resp, &cmd->resp, resp_size); sgs[out_num + in_num++] = &resp; /* Data-in buffer */ if (in) { /* Place READ protection SGLs before Data IN payload */ if (scsi_prot_sg_count(sc)) sgs[out_num + in_num++] = scsi_prot_sglist(sc); sgs[out_num + in_num++] = in->sgl; } return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); } static void virtscsi_kick_vq(struct virtio_scsi_vq *vq) { bool needs_kick; unsigned long flags; spin_lock_irqsave(&vq->vq_lock, flags); needs_kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->vq_lock, flags); if (needs_kick) virtqueue_notify(vq->vq); } /** * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it * @vq : the struct virtqueue we're talking about * @cmd : command structure * @req_size : size of the request buffer * @resp_size : size of the response buffer * @kick : whether to kick the virtqueue immediately */ static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size, bool kick) { unsigned long flags; int err; bool needs_kick = false; spin_lock_irqsave(&vq->vq_lock, flags); err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); if (!err && kick) needs_kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->vq_lock, flags); if (needs_kick) virtqueue_notify(vq->vq); return err; } static void virtio_scsi_init_hdr(struct virtio_device *vdev, struct virtio_scsi_cmd_req *cmd, struct scsi_cmnd *sc) { cmd->lun[0] = 1; cmd->lun[1] = sc->device->id; cmd->lun[2] = (sc->device->lun >> 8) | 0x40; cmd->lun[3] = sc->device->lun & 0xff; cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; cmd->prio = 0; cmd->crn = 0; } #ifdef CONFIG_BLK_DEV_INTEGRITY static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, struct virtio_scsi_cmd_req_pi *cmd_pi, struct scsi_cmnd *sc) { struct request *rq = scsi_cmd_to_rq(sc); struct blk_integrity *bi; virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); if (!rq || !scsi_prot_sg_count(sc)) return; bi = blk_get_integrity(rq->q->disk); if (sc->sc_data_direction == DMA_TO_DEVICE) cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, bio_integrity_bytes(bi, blk_rq_sectors(rq))); else if (sc->sc_data_direction == DMA_FROM_DEVICE) cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, bio_integrity_bytes(bi, blk_rq_sectors(rq))); } #endif static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, struct scsi_cmnd *sc) { u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc)); u16 hwq = blk_mq_unique_tag_to_hwq(tag); return &vscsi->req_vqs[hwq]; } static int virtscsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(shost); struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); bool kick; unsigned long flags; int req_size; int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); /* TODO: check feature bit and fail if unsupported? */ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); cmd->sc = sc; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd_pi); } else #endif { virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd); } kick = (sc->flags & SCMD_LAST) != 0; ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick); if (ret == -EIO) { cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; spin_lock_irqsave(&req_vq->vq_lock, flags); virtscsi_complete_cmd(vscsi, cmd); spin_unlock_irqrestore(&req_vq->vq_lock, flags); } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; } return 0; } static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(comp); int ret = FAILED; cmd->comp = ∁ if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd, sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0) goto out; wait_for_completion(&comp); if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; /* * The spec guarantees that all requests related to the TMF have * been completed, but the callback might not have run yet if * we're using independent interrupts (e.g. MSI). Poll the * virtqueues once. * * In the abort case, scsi_done() will do nothing, because the * command timed out and hence SCMD_STATE_COMPLETE has been set. */ virtscsi_poll_requests(vscsi); out: mempool_free(cmd, virtscsi_cmd_pool); return ret; } static int virtscsi_device_reset(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; sdev_printk(KERN_INFO, sc->device, "device reset\n"); cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; memset(cmd, 0, sizeof(*cmd)); cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, }; return virtscsi_tmf(vscsi, cmd); } static int virtscsi_device_alloc(struct scsi_device *sdevice) { /* * Passed through SCSI targets (e.g. with qemu's 'scsi-block') * may have transfer limits which come from the host SCSI * controller or something on the host side other than the * target itself. * * To make this work properly, the hypervisor can adjust the * target's VPD information to advertise these limits. But * for that to work, the guest has to look at the VPD pages, * which we won't do by default if it is an SPC-2 device, even * if it does actually support it. * * So, set the blist to always try to read the VPD pages. */ sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; return 0; } /** * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth * @sdev: Virtscsi target whose queue depth to change * @qdepth: New queue depth */ static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth = shost->cmd_per_lun; return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); } static int virtscsi_abort(struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sc->device->host); struct virtio_scsi_cmd *cmd; scmd_printk(KERN_INFO, sc, "abort\n"); cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); if (!cmd) return FAILED; memset(cmd, 0, sizeof(*cmd)); cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ .type = VIRTIO_SCSI_T_TMF, .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, .lun[0] = 1, .lun[1] = sc->device->id, .lun[2] = (sc->device->lun >> 8) | 0x40, .lun[3] = sc->device->lun & 0xff, .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc), }; return virtscsi_tmf(vscsi, cmd); } static void virtscsi_map_queues(struct Scsi_Host *shost) { struct virtio_scsi *vscsi = shost_priv(shost); int i, qoff; for (i = 0, qoff = 0; i < shost->nr_maps; i++) { struct blk_mq_queue_map *map = &shost->tag_set.map[i]; map->nr_queues = vscsi->io_queues[i]; map->queue_offset = qoff; qoff += map->nr_queues; if (map->nr_queues == 0) continue; /* * Regular queues have interrupts and hence CPU affinity is * defined by the core virtio code, but polling queues have * no interrupts so we let the block layer assign CPU affinity. */ if (i == HCTX_TYPE_POLL) blk_mq_map_queues(map); else blk_mq_virtio_map_queues(map, vscsi->vdev, 2); } } static int virtscsi_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) { struct virtio_scsi *vscsi = shost_priv(shost); struct virtio_scsi_vq *virtscsi_vq = &vscsi->req_vqs[queue_num]; unsigned long flags; unsigned int len; int found = 0; void *buf; spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); while ((buf = virtqueue_get_buf(virtscsi_vq->vq, &len)) != NULL) { virtscsi_complete_cmd(vscsi, buf); found++; } spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); return found; } static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) { struct virtio_scsi *vscsi = shost_priv(shost); virtscsi_kick_vq(&vscsi->req_vqs[hwq]); } /* * The host guarantees to respond to each command, although I/O * latencies might be higher than on bare metal. Reset the timer * unconditionally to give the host a chance to perform EH. */ static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) { return SCSI_EH_RESET_TIMER; } static const struct scsi_host_template virtscsi_host_template = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", .proc_name = "virtio_scsi", .this_id = -1, .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand, .mq_poll = virtscsi_mq_poll, .commit_rqs = virtscsi_commit_rqs, .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, .eh_timed_out = virtscsi_eh_timed_out, .slave_alloc = virtscsi_device_alloc, .dma_boundary = UINT_MAX, .map_queues = virtscsi_map_queues, .track_queue_depth = 1, }; #define virtscsi_config_get(vdev, fld) \ ({ \ __virtio_native_type(struct virtio_scsi_config, fld) __val; \ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ do { \ __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, struct virtqueue *vq) { spin_lock_init(&virtscsi_vq->vq_lock); virtscsi_vq->vq = vq; } static void virtscsi_remove_vqs(struct virtio_device *vdev) { /* Stop all the virtqueues. */ virtio_reset_device(vdev); vdev->config->del_vqs(vdev); } static int virtscsi_init(struct virtio_device *vdev, struct virtio_scsi *vscsi) { int err; u32 i; u32 num_vqs, num_poll_vqs, num_req_vqs; struct virtqueue_info *vqs_info; struct virtqueue **vqs; struct irq_affinity desc = { .pre_vectors = 2 }; num_req_vqs = vscsi->num_queues; num_vqs = num_req_vqs + VIRTIO_SCSI_VQ_BASE; vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); vqs_info = kcalloc(num_vqs, sizeof(*vqs_info), GFP_KERNEL); if (!vqs || !vqs_info) { err = -ENOMEM; goto out; } num_poll_vqs = min_t(unsigned int, virtscsi_poll_queues, num_req_vqs - 1); vscsi->io_queues[HCTX_TYPE_DEFAULT] = num_req_vqs - num_poll_vqs; vscsi->io_queues[HCTX_TYPE_READ] = 0; vscsi->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", vscsi->io_queues[HCTX_TYPE_DEFAULT], vscsi->io_queues[HCTX_TYPE_READ], vscsi->io_queues[HCTX_TYPE_POLL]); vqs_info[0].callback = virtscsi_ctrl_done; vqs_info[0].name = "control"; vqs_info[1].callback = virtscsi_event_done; vqs_info[1].name = "event"; for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs - num_poll_vqs; i++) { vqs_info[i].callback = virtscsi_req_done; vqs_info[i].name = "request"; } for (; i < num_vqs; i++) vqs_info[i].name = "request_poll"; /* Discover virtqueues and write information to configuration. */ err = virtio_find_vqs(vdev, num_vqs, vqs, vqs_info, &desc); if (err) goto out; virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); virtscsi_init_vq(&vscsi->event_vq, vqs[1]); for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], vqs[i]); virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); err = 0; out: kfree(vqs_info); kfree(vqs); if (err) virtscsi_remove_vqs(vdev); return err; } static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; int err; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; num_queues = min_t(unsigned int, nr_cpu_ids, num_queues); num_targets = virtscsi_config_get(vdev, max_target) + 1; shost = scsi_host_alloc(&virtscsi_host_template, struct_size(vscsi, req_vqs, num_queues)); if (!shost) return -ENOMEM; sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; shost->sg_tablesize = sg_elems; shost->nr_maps = 1; vscsi = shost_priv(shost); vscsi->vdev = vdev; vscsi->num_queues = num_queues; vdev->priv = shost; err = virtscsi_init(vdev, vscsi); if (err) goto virtscsi_init_failed; if (vscsi->io_queues[HCTX_TYPE_POLL]) shost->nr_maps = HCTX_TYPE_POLL + 1; shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; /* LUNs > 256 are reported with format 1, so they go in the range * 16640-32767. */ shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; shost->nr_hw_queues = num_queues; #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { int host_prot; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; scsi_host_set_prot(shost, host_prot); scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); } #endif err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_kick_event_all(vscsi); scsi_scan_host(shost); return 0; scsi_add_host_failed: vdev->config->del_vqs(vdev); virtscsi_init_failed: scsi_host_put(shost); return err; } static void virtscsi_remove(struct virtio_device *vdev) { struct Scsi_Host *shost = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(shost); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_cancel_event_work(vscsi); scsi_remove_host(shost); virtscsi_remove_vqs(vdev); scsi_host_put(shost); } #ifdef CONFIG_PM_SLEEP static int virtscsi_freeze(struct virtio_device *vdev) { virtscsi_remove_vqs(vdev); return 0; } static int virtscsi_restore(struct virtio_device *vdev) { struct Scsi_Host *sh = virtio_scsi_host(vdev); struct virtio_scsi *vscsi = shost_priv(sh); int err; err = virtscsi_init(vdev, vscsi); if (err) return err; virtio_device_ready(vdev); if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) virtscsi_kick_event_all(vscsi); return err; } #endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_CHANGE, #ifdef CONFIG_BLK_DEV_INTEGRITY VIRTIO_SCSI_F_T10_PI, #endif }; static struct virtio_driver virtio_scsi_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .id_table = id_table, .probe = virtscsi_probe, #ifdef CONFIG_PM_SLEEP .freeze = virtscsi_freeze, .restore = virtscsi_restore, #endif .remove = virtscsi_remove, }; static int __init virtio_scsi_init(void) { int ret = -ENOMEM; virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); if (!virtscsi_cmd_cache) { pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); goto error; } virtscsi_cmd_pool = mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, virtscsi_cmd_cache); if (!virtscsi_cmd_pool) { pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); goto error; } ret = register_virtio_driver(&virtio_scsi_driver); if (ret < 0) goto error; return 0; error: mempool_destroy(virtscsi_cmd_pool); virtscsi_cmd_pool = NULL; kmem_cache_destroy(virtscsi_cmd_cache); virtscsi_cmd_cache = NULL; return ret; } static void __exit virtio_scsi_fini(void) { unregister_virtio_driver(&virtio_scsi_driver); mempool_destroy(virtscsi_cmd_pool); kmem_cache_destroy(virtscsi_cmd_cache); } module_init(virtio_scsi_init); module_exit(virtio_scsi_fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio SCSI HBA driver"); MODULE_LICENSE("GPL"); |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 5 5 2 1 5 5 5 5 2 3 5 3 2 1 1 1 6 6 1 1 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 | // SPDX-License-Identifier: GPL-2.0-or-later /* * imon.c: input and display driver for SoundGraph iMON IR/VFD/LCD * * Copyright(C) 2010 Jarod Wilson <jarod@wilsonet.com> * Portions based on the original lirc_imon driver, * Copyright(C) 2004 Venky Raju(dev@venky.ws) * * Huge thanks to R. Geoff Newbury for invaluable debugging on the * 0xffdc iMON devices, and for sending me one to hack on, without * which the support for them wouldn't be nearly as good. Thanks * also to the numerous 0xffdc device owners that tested auto-config * support for me and provided debug dumps from their devices. */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ratelimit.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #include <linux/timer.h> #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>" #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" #define MOD_NAME "imon" #define MOD_VERSION "0.9.4" #define DISPLAY_MINOR_BASE 144 #define DEVICE_NAME "lcd%d" #define BUF_CHUNK_SIZE 8 #define BUF_SIZE 128 #define BIT_DURATION 250 /* each bit received is 250us */ #define IMON_CLOCK_ENABLE_PACKETS 2 /*** P R O T O T Y P E S ***/ /* USB Callback prototypes */ static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id); static void imon_disconnect(struct usb_interface *interface); static void usb_rx_callback_intf0(struct urb *urb); static void usb_rx_callback_intf1(struct urb *urb); static void usb_tx_callback(struct urb *urb); /* suspend/resume support */ static int imon_resume(struct usb_interface *intf); static int imon_suspend(struct usb_interface *intf, pm_message_t message); /* Display file_operations function prototypes */ static int display_open(struct inode *inode, struct file *file); static int display_close(struct inode *inode, struct file *file); /* VFD write operation */ static ssize_t vfd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos); /* LCD file_operations override function prototypes */ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos); /*** G L O B A L S ***/ struct imon_panel_key_table { u64 hw_code; u32 keycode; }; struct imon_usb_dev_descr { __u16 flags; #define IMON_NO_FLAGS 0 #define IMON_NEED_20MS_PKT_DELAY 1 #define IMON_SUPPRESS_REPEATED_KEYS 2 struct imon_panel_key_table key_table[]; }; struct imon_context { struct device *dev; /* Newer devices have two interfaces */ struct usb_device *usbdev_intf0; struct usb_device *usbdev_intf1; bool display_supported; /* not all controllers do */ bool display_isopen; /* display port has been opened */ bool rf_device; /* true if iMON 2.4G LT/DT RF device */ bool rf_isassociating; /* RF remote associating */ bool dev_present_intf0; /* USB device presence, interface 0 */ bool dev_present_intf1; /* USB device presence, interface 1 */ struct mutex lock; /* to lock this object */ wait_queue_head_t remove_ok; /* For unexpected USB disconnects */ struct usb_endpoint_descriptor *rx_endpoint_intf0; struct usb_endpoint_descriptor *rx_endpoint_intf1; struct usb_endpoint_descriptor *tx_endpoint; struct urb *rx_urb_intf0; struct urb *rx_urb_intf1; struct urb *tx_urb; bool tx_control; unsigned char usb_rx_buf[8]; unsigned char usb_tx_buf[8]; unsigned int send_packet_delay; struct tx_t { unsigned char data_buf[35]; /* user data buffer */ struct completion finished; /* wait for write to finish */ bool busy; /* write in progress */ int status; /* status of tx completion */ } tx; u16 vendor; /* usb vendor ID */ u16 product; /* usb product ID */ struct rc_dev *rdev; /* rc-core device for remote */ struct input_dev *idev; /* input device for panel & IR mouse */ struct input_dev *touch; /* input device for touchscreen */ spinlock_t kc_lock; /* make sure we get keycodes right */ u32 kc; /* current input keycode */ u32 last_keycode; /* last reported input keycode */ u32 rc_scancode; /* the computed remote scancode */ u8 rc_toggle; /* the computed remote toggle bit */ u64 rc_proto; /* iMON or MCE (RC6) IR protocol? */ bool release_code; /* some keys send a release code */ u8 display_type; /* store the display type */ bool pad_mouse; /* toggle kbd(0)/mouse(1) mode */ char name_rdev[128]; /* rc input device name */ char phys_rdev[64]; /* rc input device phys path */ char name_idev[128]; /* input device name */ char phys_idev[64]; /* input device phys path */ char name_touch[128]; /* touch screen name */ char phys_touch[64]; /* touch screen phys path */ struct timer_list ttimer; /* touch screen timer */ int touch_x; /* x coordinate on touchscreen */ int touch_y; /* y coordinate on touchscreen */ const struct imon_usb_dev_descr *dev_descr; /* device description with key */ /* table for front panels */ /* * Fields for deferring free_imon_context(). * * Since reference to "struct imon_context" is stored into * "struct file"->private_data, we need to remember * how many file descriptors might access this "struct imon_context". */ refcount_t users; /* * Use a flag for telling display_open()/vfd_write()/lcd_write() that * imon_disconnect() was already called. */ bool disconnected; /* * We need to wait for RCU grace period in order to allow * display_open() to safely check ->disconnected and increment ->users. */ struct rcu_head rcu; }; #define TOUCH_TIMEOUT (HZ/30) /* vfd character device file operations */ static const struct file_operations vfd_fops = { .owner = THIS_MODULE, .open = display_open, .write = vfd_write, .release = display_close, .llseek = noop_llseek, }; /* lcd character device file operations */ static const struct file_operations lcd_fops = { .owner = THIS_MODULE, .open = display_open, .write = lcd_write, .release = display_close, .llseek = noop_llseek, }; enum { IMON_DISPLAY_TYPE_AUTO = 0, IMON_DISPLAY_TYPE_VFD = 1, IMON_DISPLAY_TYPE_LCD = 2, IMON_DISPLAY_TYPE_VGA = 3, IMON_DISPLAY_TYPE_NONE = 4, }; enum { IMON_KEY_IMON = 0, IMON_KEY_MCE = 1, IMON_KEY_PANEL = 2, }; static struct usb_class_driver imon_vfd_class = { .name = DEVICE_NAME, .fops = &vfd_fops, .minor_base = DISPLAY_MINOR_BASE, }; static struct usb_class_driver imon_lcd_class = { .name = DEVICE_NAME, .fops = &lcd_fops, .minor_base = DISPLAY_MINOR_BASE, }; /* imon receiver front panel/knob key table */ static const struct imon_usb_dev_descr imon_default_table = { .flags = IMON_NO_FLAGS, .key_table = { { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */ { 0x000000001200ffeell, KEY_UP }, { 0x000000001300ffeell, KEY_DOWN }, { 0x000000001400ffeell, KEY_LEFT }, { 0x000000001500ffeell, KEY_RIGHT }, { 0x000000001600ffeell, KEY_ENTER }, { 0x000000001700ffeell, KEY_ESC }, { 0x000000001f00ffeell, KEY_AUDIO }, { 0x000000002000ffeell, KEY_VIDEO }, { 0x000000002100ffeell, KEY_CAMERA }, { 0x000000002700ffeell, KEY_DVD }, { 0x000000002300ffeell, KEY_TV }, { 0x000000002b00ffeell, KEY_EXIT }, { 0x000000002c00ffeell, KEY_SELECT }, { 0x000000002d00ffeell, KEY_MENU }, { 0x000000000500ffeell, KEY_PREVIOUS }, { 0x000000000700ffeell, KEY_REWIND }, { 0x000000000400ffeell, KEY_STOP }, { 0x000000003c00ffeell, KEY_PLAYPAUSE }, { 0x000000000800ffeell, KEY_FASTFORWARD }, { 0x000000000600ffeell, KEY_NEXT }, { 0x000000010000ffeell, KEY_RIGHT }, { 0x000001000000ffeell, KEY_LEFT }, { 0x000000003d00ffeell, KEY_SELECT }, { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000000100ffeell, KEY_MUTE }, /* 0xffdc iMON MCE VFD */ { 0x00010000ffffffeell, KEY_VOLUMEUP }, { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, { 0x00000001ffffffeell, KEY_MUTE }, { 0x0000000fffffffeell, KEY_MEDIA }, { 0x00000012ffffffeell, KEY_UP }, { 0x00000013ffffffeell, KEY_DOWN }, { 0x00000014ffffffeell, KEY_LEFT }, { 0x00000015ffffffeell, KEY_RIGHT }, { 0x00000016ffffffeell, KEY_ENTER }, { 0x00000017ffffffeell, KEY_ESC }, /* iMON Knob values */ { 0x000100ffffffffeell, KEY_VOLUMEUP }, { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, { 0x000008ffffffffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; static const struct imon_usb_dev_descr imon_OEM_VFD = { .flags = IMON_NEED_20MS_PKT_DELAY, .key_table = { { 0x000000000f00ffeell, KEY_MEDIA }, /* Go */ { 0x000000001200ffeell, KEY_UP }, { 0x000000001300ffeell, KEY_DOWN }, { 0x000000001400ffeell, KEY_LEFT }, { 0x000000001500ffeell, KEY_RIGHT }, { 0x000000001600ffeell, KEY_ENTER }, { 0x000000001700ffeell, KEY_ESC }, { 0x000000001f00ffeell, KEY_AUDIO }, { 0x000000002b00ffeell, KEY_EXIT }, { 0x000000002c00ffeell, KEY_SELECT }, { 0x000000002d00ffeell, KEY_MENU }, { 0x000000000500ffeell, KEY_PREVIOUS }, { 0x000000000700ffeell, KEY_REWIND }, { 0x000000000400ffeell, KEY_STOP }, { 0x000000003c00ffeell, KEY_PLAYPAUSE }, { 0x000000000800ffeell, KEY_FASTFORWARD }, { 0x000000000600ffeell, KEY_NEXT }, { 0x000000010000ffeell, KEY_RIGHT }, { 0x000001000000ffeell, KEY_LEFT }, { 0x000000003d00ffeell, KEY_SELECT }, { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000000100ffeell, KEY_MUTE }, /* 0xffdc iMON MCE VFD */ { 0x00010000ffffffeell, KEY_VOLUMEUP }, { 0x01000000ffffffeell, KEY_VOLUMEDOWN }, { 0x00000001ffffffeell, KEY_MUTE }, { 0x0000000fffffffeell, KEY_MEDIA }, { 0x00000012ffffffeell, KEY_UP }, { 0x00000013ffffffeell, KEY_DOWN }, { 0x00000014ffffffeell, KEY_LEFT }, { 0x00000015ffffffeell, KEY_RIGHT }, { 0x00000016ffffffeell, KEY_ENTER }, { 0x00000017ffffffeell, KEY_ESC }, /* iMON Knob values */ { 0x000100ffffffffeell, KEY_VOLUMEUP }, { 0x010000ffffffffeell, KEY_VOLUMEDOWN }, { 0x000008ffffffffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; /* imon receiver front panel/knob key table for DH102*/ static const struct imon_usb_dev_descr imon_DH102 = { .flags = IMON_NO_FLAGS, .key_table = { { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000010000ffeell, KEY_MUTE }, { 0x0000000f0000ffeell, KEY_MEDIA }, { 0x000000120000ffeell, KEY_UP }, { 0x000000130000ffeell, KEY_DOWN }, { 0x000000140000ffeell, KEY_LEFT }, { 0x000000150000ffeell, KEY_RIGHT }, { 0x000000160000ffeell, KEY_ENTER }, { 0x000000170000ffeell, KEY_ESC }, { 0x0000002b0000ffeell, KEY_EXIT }, { 0x0000002c0000ffeell, KEY_SELECT }, { 0x0000002d0000ffeell, KEY_MENU }, { 0, KEY_RESERVED } } }; /* imon ultrabay front panel key table */ static const struct imon_usb_dev_descr ultrabay_table = { .flags = IMON_SUPPRESS_REPEATED_KEYS, .key_table = { { 0x0000000f0000ffeell, KEY_MEDIA }, /* Go */ { 0x000000000100ffeell, KEY_UP }, { 0x000000000001ffeell, KEY_DOWN }, { 0x000000160000ffeell, KEY_ENTER }, { 0x0000001f0000ffeell, KEY_AUDIO }, /* Music */ { 0x000000200000ffeell, KEY_VIDEO }, /* Movie */ { 0x000000210000ffeell, KEY_CAMERA }, /* Photo */ { 0x000000270000ffeell, KEY_DVD }, /* DVD */ { 0x000000230000ffeell, KEY_TV }, /* TV */ { 0x000000050000ffeell, KEY_PREVIOUS }, /* Previous */ { 0x000000070000ffeell, KEY_REWIND }, { 0x000000040000ffeell, KEY_STOP }, { 0x000000020000ffeell, KEY_PLAYPAUSE }, { 0x000000080000ffeell, KEY_FASTFORWARD }, { 0x000000060000ffeell, KEY_NEXT }, /* Next */ { 0x000100000000ffeell, KEY_VOLUMEUP }, { 0x010000000000ffeell, KEY_VOLUMEDOWN }, { 0x000000010000ffeell, KEY_MUTE }, { 0, KEY_RESERVED }, } }; /* * USB Device ID for iMON USB Control Boards * * The Windows drivers contain 6 different inf files, more or less one for * each new device until the 0x0034-0x0046 devices, which all use the same * driver. Some of the devices in the 34-46 range haven't been definitively * identified yet. Early devices have either a TriGem Computer, Inc. or a * Samsung vendor ID (0x0aa8 and 0x04e8 respectively), while all later * devices use the SoundGraph vendor ID (0x15c2). This driver only supports * the ffdc and later devices, which do onboard decoding. */ static const struct usb_device_id imon_usb_id_table[] = { /* * Several devices with this same device ID, all use iMON_PAD.inf * SoundGraph iMON PAD (IR & VFD) * SoundGraph iMON PAD (IR & LCD) * SoundGraph iMON Knob (IR only) */ { USB_DEVICE(0x15c2, 0xffdc), .driver_info = (unsigned long)&imon_default_table }, /* * Newer devices, all driven by the latest iMON Windows driver, full * list of device IDs extracted via 'strings Setup/data1.hdr |grep 15c2' * Need user input to fill in details on unknown devices. */ /* SoundGraph iMON OEM Touch LCD (IR & 7" VGA LCD) */ { USB_DEVICE(0x15c2, 0x0034), .driver_info = (unsigned long)&imon_DH102 }, /* SoundGraph iMON OEM Touch LCD (IR & 4.3" VGA LCD) */ { USB_DEVICE(0x15c2, 0x0035), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM VFD (IR & VFD) */ { USB_DEVICE(0x15c2, 0x0036), .driver_info = (unsigned long)&imon_OEM_VFD }, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0037), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM LCD (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0038), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON UltraBay (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0039), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003a), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003b), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON OEM Inside (IR only) */ { USB_DEVICE(0x15c2, 0x003c), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003d), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003e), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x003f), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0040), .driver_info = (unsigned long)&imon_default_table}, /* SoundGraph iMON MINI (IR only) */ { USB_DEVICE(0x15c2, 0x0041), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station EZ External (IR only) */ { USB_DEVICE(0x15c2, 0x0042), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Basic Internal (IR only) */ { USB_DEVICE(0x15c2, 0x0043), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Elite (IR & VFD) */ { USB_DEVICE(0x15c2, 0x0044), .driver_info = (unsigned long)&imon_default_table}, /* Antec Veris Multimedia Station Premiere (IR & LCD) */ { USB_DEVICE(0x15c2, 0x0045), .driver_info = (unsigned long)&imon_default_table}, /* device specifics unknown */ { USB_DEVICE(0x15c2, 0x0046), .driver_info = (unsigned long)&imon_default_table}, {} }; /* USB Device data */ static struct usb_driver imon_driver = { .name = MOD_NAME, .probe = imon_probe, .disconnect = imon_disconnect, .suspend = imon_suspend, .resume = imon_resume, .id_table = imon_usb_id_table, }; /* Module bookkeeping bits */ MODULE_AUTHOR(MOD_AUTHOR); MODULE_DESCRIPTION(MOD_DESC); MODULE_VERSION(MOD_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, imon_usb_id_table); static bool debug; module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)"); /* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */ static int display_type; module_param(display_type, int, S_IRUGO); MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)"); static int pad_stabilize = 1; module_param(pad_stabilize, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default)."); /* * In certain use cases, mouse mode isn't really helpful, and could actually * cause confusion, so allow disabling it when the IR device is open. */ static bool nomouse; module_param(nomouse, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)"); /* threshold at which a pad push registers as an arrow key in kbd mode */ static int pad_thresh; module_param(pad_thresh, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)"); static void free_imon_context(struct imon_context *ictx) { struct device *dev = ictx->dev; usb_free_urb(ictx->tx_urb); WARN_ON(ictx->dev_present_intf0); usb_free_urb(ictx->rx_urb_intf0); WARN_ON(ictx->dev_present_intf1); usb_free_urb(ictx->rx_urb_intf1); kfree_rcu(ictx, rcu); dev_dbg(dev, "%s: iMON context freed\n", __func__); } /* * Called when the Display device (e.g. /dev/lcd0) * is opened by the application. */ static int display_open(struct inode *inode, struct file *file) { struct usb_interface *interface; struct imon_context *ictx = NULL; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&imon_driver, subminor); if (!interface) { pr_err("could not find interface for minor %d\n", subminor); retval = -ENODEV; goto exit; } rcu_read_lock(); ictx = usb_get_intfdata(interface); if (!ictx || ictx->disconnected || !refcount_inc_not_zero(&ictx->users)) { rcu_read_unlock(); pr_err("no context found for minor %d\n", subminor); retval = -ENODEV; goto exit; } rcu_read_unlock(); mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (ictx->display_isopen) { pr_err("display port is already open\n"); retval = -EBUSY; } else { ictx->display_isopen = true; file->private_data = ictx; dev_dbg(ictx->dev, "display port opened\n"); } mutex_unlock(&ictx->lock); if (retval && refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); exit: return retval; } /* * Called when the display device (e.g. /dev/lcd0) * is closed by the application. */ static int display_close(struct inode *inode, struct file *file) { struct imon_context *ictx = file->private_data; int retval = 0; mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err("display not supported by device\n"); retval = -ENODEV; } else if (!ictx->display_isopen) { pr_err("display is not open\n"); retval = -EIO; } else { ictx->display_isopen = false; dev_dbg(ictx->dev, "display port closed\n"); } mutex_unlock(&ictx->lock); if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); return retval; } /* * Sends a packet to the device -- this function must be called with * ictx->lock held, or its unlock/lock sequence while waiting for tx * to complete can/will lead to a deadlock. */ static int send_packet(struct imon_context *ictx) { unsigned int pipe; unsigned long timeout; int interval = 0; int retval = 0; struct usb_ctrlrequest *control_req = NULL; /* Check if we need to use control or interrupt urb */ if (!ictx->tx_control) { pipe = usb_sndintpipe(ictx->usbdev_intf0, ictx->tx_endpoint->bEndpointAddress); interval = ictx->tx_endpoint->bInterval; usb_fill_int_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, ictx->usb_tx_buf, sizeof(ictx->usb_tx_buf), usb_tx_callback, ictx, interval); ictx->tx_urb->actual_length = 0; } else { /* fill request into kmalloc'ed space: */ control_req = kmalloc(sizeof(*control_req), GFP_KERNEL); if (control_req == NULL) return -ENOMEM; /* setup packet is '21 09 0200 0001 0008' */ control_req->bRequestType = 0x21; control_req->bRequest = 0x09; control_req->wValue = cpu_to_le16(0x0200); control_req->wIndex = cpu_to_le16(0x0001); control_req->wLength = cpu_to_le16(0x0008); /* control pipe is endpoint 0x00 */ pipe = usb_sndctrlpipe(ictx->usbdev_intf0, 0); /* build the control urb */ usb_fill_control_urb(ictx->tx_urb, ictx->usbdev_intf0, pipe, (unsigned char *)control_req, ictx->usb_tx_buf, sizeof(ictx->usb_tx_buf), usb_tx_callback, ictx); ictx->tx_urb->actual_length = 0; } reinit_completion(&ictx->tx.finished); ictx->tx.busy = true; smp_rmb(); /* ensure later readers know we're busy */ retval = usb_submit_urb(ictx->tx_urb, GFP_KERNEL); if (retval) { ictx->tx.busy = false; smp_rmb(); /* ensure later readers know we're not busy */ pr_err_ratelimited("error submitting urb(%d)\n", retval); } else { /* Wait for transmission to complete (or abort) */ retval = wait_for_completion_interruptible( &ictx->tx.finished); if (retval) { usb_kill_urb(ictx->tx_urb); pr_err_ratelimited("task interrupted\n"); } ictx->tx.busy = false; retval = ictx->tx.status; if (retval) pr_err_ratelimited("packet tx failed (%d)\n", retval); } kfree(control_req); /* * Induce a mandatory delay before returning, as otherwise, * send_packet can get called so rapidly as to overwhelm the device, * particularly on faster systems and/or those with quirky usb. */ timeout = msecs_to_jiffies(ictx->send_packet_delay); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(timeout); return retval; } /* * Sends an associate packet to the iMON 2.4G. * * This might not be such a good idea, since it has an id collision with * some versions of the "IR & VFD" combo. The only way to determine if it * is an RF version is to look at the product description string. (Which * we currently do not fetch). */ static int send_associate_24g(struct imon_context *ictx) { const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20 }; if (!ictx) { pr_err("no context for device\n"); return -ENODEV; } if (!ictx->dev_present_intf0) { pr_err("no iMON device present\n"); return -ENODEV; } memcpy(ictx->usb_tx_buf, packet, sizeof(packet)); return send_packet(ictx); } /* * Sends packets to setup and show clock on iMON display * * Arguments: year - last 2 digits of year, month - 1..12, * day - 1..31, dow - day of the week (0-Sun...6-Sat), * hour - 0..23, minute - 0..59, second - 0..59 */ static int send_set_imon_clock(struct imon_context *ictx, unsigned int year, unsigned int month, unsigned int day, unsigned int dow, unsigned int hour, unsigned int minute, unsigned int second) { unsigned char clock_enable_pkt[IMON_CLOCK_ENABLE_PACKETS][8]; int retval = 0; int i; if (!ictx) { pr_err("no context for device\n"); return -ENODEV; } switch (ictx->display_type) { case IMON_DISPLAY_TYPE_LCD: clock_enable_pkt[0][0] = 0x80; clock_enable_pkt[0][1] = year; clock_enable_pkt[0][2] = month-1; clock_enable_pkt[0][3] = day; clock_enable_pkt[0][4] = hour; clock_enable_pkt[0][5] = minute; clock_enable_pkt[0][6] = second; clock_enable_pkt[1][0] = 0x80; clock_enable_pkt[1][1] = 0; clock_enable_pkt[1][2] = 0; clock_enable_pkt[1][3] = 0; clock_enable_pkt[1][4] = 0; clock_enable_pkt[1][5] = 0; clock_enable_pkt[1][6] = 0; if (ictx->product == 0xffdc) { clock_enable_pkt[0][7] = 0x50; clock_enable_pkt[1][7] = 0x51; } else { clock_enable_pkt[0][7] = 0x88; clock_enable_pkt[1][7] = 0x8a; } break; case IMON_DISPLAY_TYPE_VFD: clock_enable_pkt[0][0] = year; clock_enable_pkt[0][1] = month-1; clock_enable_pkt[0][2] = day; clock_enable_pkt[0][3] = dow; clock_enable_pkt[0][4] = hour; clock_enable_pkt[0][5] = minute; clock_enable_pkt[0][6] = second; clock_enable_pkt[0][7] = 0x40; clock_enable_pkt[1][0] = 0; clock_enable_pkt[1][1] = 0; clock_enable_pkt[1][2] = 1; clock_enable_pkt[1][3] = 0; clock_enable_pkt[1][4] = 0; clock_enable_pkt[1][5] = 0; clock_enable_pkt[1][6] = 0; clock_enable_pkt[1][7] = 0x42; break; default: return -ENODEV; } for (i = 0; i < IMON_CLOCK_ENABLE_PACKETS; i++) { memcpy(ictx->usb_tx_buf, clock_enable_pkt[i], 8); retval = send_packet(ictx); if (retval) { pr_err("send_packet failed for packet %d\n", i); break; } } return retval; } /* * These are the sysfs functions to handle the association on the iMON 2.4G LT. */ static ssize_t associate_remote_show(struct device *d, struct device_attribute *attr, char *buf) { struct imon_context *ictx = dev_get_drvdata(d); if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (ictx->rf_isassociating) strscpy(buf, "associating\n", PAGE_SIZE); else strscpy(buf, "closed\n", PAGE_SIZE); dev_info(d, "Visit https://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n"); mutex_unlock(&ictx->lock); return strlen(buf); } static ssize_t associate_remote_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct imon_context *ictx; ictx = dev_get_drvdata(d); if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); ictx->rf_isassociating = true; send_associate_24g(ictx); mutex_unlock(&ictx->lock); return count; } /* * sysfs functions to control internal imon clock */ static ssize_t imon_clock_show(struct device *d, struct device_attribute *attr, char *buf) { struct imon_context *ictx = dev_get_drvdata(d); size_t len; if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { len = sysfs_emit(buf, "Not supported."); } else { len = sysfs_emit(buf, "To set the clock on your iMON display:\n" "# date \"+%%y %%m %%d %%w %%H %%M %%S\" > imon_clock\n" "%s", ictx->display_isopen ? "\nNOTE: imon device must be closed\n" : ""); } mutex_unlock(&ictx->lock); return len; } static ssize_t imon_clock_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct imon_context *ictx = dev_get_drvdata(d); ssize_t retval; unsigned int year, month, day, dow, hour, minute, second; if (!ictx) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { retval = -ENODEV; goto exit; } else if (ictx->display_isopen) { retval = -EBUSY; goto exit; } if (sscanf(buf, "%u %u %u %u %u %u %u", &year, &month, &day, &dow, &hour, &minute, &second) != 7) { retval = -EINVAL; goto exit; } if ((month < 1 || month > 12) || (day < 1 || day > 31) || (dow > 6) || (hour > 23) || (minute > 59) || (second > 59)) { retval = -EINVAL; goto exit; } retval = send_set_imon_clock(ictx, year, month, day, dow, hour, minute, second); if (retval) goto exit; retval = count; exit: mutex_unlock(&ictx->lock); return retval; } static DEVICE_ATTR_RW(imon_clock); static DEVICE_ATTR_RW(associate_remote); static struct attribute *imon_display_sysfs_entries[] = { &dev_attr_imon_clock.attr, NULL }; static const struct attribute_group imon_display_attr_group = { .attrs = imon_display_sysfs_entries }; static struct attribute *imon_rf_sysfs_entries[] = { &dev_attr_associate_remote.attr, NULL }; static const struct attribute_group imon_rf_attr_group = { .attrs = imon_rf_sysfs_entries }; /* * Writes data to the VFD. The iMON VFD is 2x16 characters * and requires data in 5 consecutive USB interrupt packets, * each packet but the last carrying 7 bytes. * * I don't know if the VFD board supports features such as * scrolling, clearing rows, blanking, etc. so at * the caller must provide a full screen of data. If fewer * than 32 bytes are provided spaces will be appended to * generate a full screen. */ static ssize_t vfd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int i; int offset; int seq; int retval = 0; struct imon_context *ictx = file->private_data; static const unsigned char vfd_packet6[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF }; if (ictx->disconnected) return -ENODEV; if (mutex_lock_interruptible(&ictx->lock)) return -ERESTARTSYS; if (!ictx->dev_present_intf0) { pr_err_ratelimited("no iMON device present\n"); retval = -ENODEV; goto exit; } if (n_bytes <= 0 || n_bytes > 32) { pr_err_ratelimited("invalid payload size\n"); retval = -EINVAL; goto exit; } if (copy_from_user(ictx->tx.data_buf, buf, n_bytes)) { retval = -EFAULT; goto exit; } /* Pad with spaces */ for (i = n_bytes; i < 32; ++i) ictx->tx.data_buf[i] = ' '; for (i = 32; i < 35; ++i) ictx->tx.data_buf[i] = 0xFF; offset = 0; seq = 0; do { memcpy(ictx->usb_tx_buf, ictx->tx.data_buf + offset, 7); ictx->usb_tx_buf[7] = (unsigned char) seq; retval = send_packet(ictx); if (retval) { pr_err_ratelimited("send packet #%d failed\n", seq / 2); goto exit; } else { seq += 2; offset += 7; } } while (offset < 35); /* Send packet #6 */ memcpy(ictx->usb_tx_buf, &vfd_packet6, sizeof(vfd_packet6)); ictx->usb_tx_buf[7] = (unsigned char) seq; retval = send_packet(ictx); if (retval) pr_err_ratelimited("send packet #%d failed\n", seq / 2); exit: mutex_unlock(&ictx->lock); return (!retval) ? n_bytes : retval; } /* * Writes data to the LCD. The iMON OEM LCD screen expects 8-byte * packets. We accept data as 16 hexadecimal digits, followed by a * newline (to make it easy to drive the device from a command-line * -- even though the actual binary data is a bit complicated). * * The device itself is not a "traditional" text-mode display. It's * actually a 16x96 pixel bitmap display. That means if you want to * display text, you've got to have your own "font" and translate the * text into bitmaps for display. This is really flexible (you can * display whatever diacritics you need, and so on), but it's also * a lot more complicated than most LCDs... */ static ssize_t lcd_write(struct file *file, const char __user *buf, size_t n_bytes, loff_t *pos) { int retval = 0; struct imon_context *ictx = file->private_data; if (ictx->disconnected) return -ENODEV; mutex_lock(&ictx->lock); if (!ictx->display_supported) { pr_err_ratelimited("no iMON display present\n"); retval = -ENODEV; goto exit; } if (n_bytes != 8) { pr_err_ratelimited("invalid payload size: %d (expected 8)\n", (int)n_bytes); retval = -EINVAL; goto exit; } if (copy_from_user(ictx->usb_tx_buf, buf, 8)) { retval = -EFAULT; goto exit; } retval = send_packet(ictx); if (retval) { pr_err_ratelimited("send packet failed!\n"); goto exit; } else { dev_dbg(ictx->dev, "%s: write %d bytes to LCD\n", __func__, (int) n_bytes); } exit: mutex_unlock(&ictx->lock); return (!retval) ? n_bytes : retval; } /* * Callback function for USB core API: transmit data */ static void usb_tx_callback(struct urb *urb) { struct imon_context *ictx; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; ictx->tx.status = urb->status; /* notify waiters that write has finished */ ictx->tx.busy = false; smp_rmb(); /* ensure later readers know we're not busy */ complete(&ictx->tx.finished); } /* * report touchscreen input */ static void imon_touch_display_timeout(struct timer_list *t) { struct imon_context *ictx = from_timer(ictx, t, ttimer); if (ictx->display_type != IMON_DISPLAY_TYPE_VGA) return; input_report_abs(ictx->touch, ABS_X, ictx->touch_x); input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); input_report_key(ictx->touch, BTN_TOUCH, 0x00); input_sync(ictx->touch); } /* * iMON IR receivers support two different signal sets -- those used by * the iMON remotes, and those used by the Windows MCE remotes (which is * really just RC-6), but only one or the other at a time, as the signals * are decoded onboard the receiver. * * This function gets called two different ways, one way is from * rc_register_device, for initial protocol selection/setup, and the other is * via a userspace-initiated protocol change request, either by direct sysfs * prodding or by something like ir-keytable. In the rc_register_device case, * the imon context lock is already held, but when initiated from userspace, * it is not, so we must acquire it prior to calling send_packet, which * requires that the lock is held. */ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_proto) { int retval; struct imon_context *ictx = rc->priv; struct device *dev = ictx->dev; bool unlock = false; unsigned char ir_proto_packet[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 }; if (*rc_proto && !(*rc_proto & rc->allowed_protocols)) dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n"); if (*rc_proto & RC_PROTO_BIT_RC6_MCE) { dev_dbg(dev, "Configuring IR receiver for MCE protocol\n"); ir_proto_packet[0] = 0x01; *rc_proto = RC_PROTO_BIT_RC6_MCE; } else if (*rc_proto & RC_PROTO_BIT_IMON) { dev_dbg(dev, "Configuring IR receiver for iMON protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ *rc_proto = RC_PROTO_BIT_IMON; } else { dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n"); if (!pad_stabilize) dev_dbg(dev, "PAD stabilize functionality disabled\n"); /* ir_proto_packet[0] = 0x00; // already the default */ *rc_proto = RC_PROTO_BIT_IMON; } memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet)); unlock = mutex_trylock(&ictx->lock); retval = send_packet(ictx); if (retval) goto out; ictx->rc_proto = *rc_proto; ictx->pad_mouse = false; out: if (unlock) mutex_unlock(&ictx->lock); return retval; } /* * The directional pad behaves a bit differently, depending on whether this is * one of the older ffdc devices or a newer device. Newer devices appear to * have a higher resolution matrix for more precise mouse movement, but it * makes things overly sensitive in keyboard mode, so we do some interesting * contortions to make it less touchy. Older devices run through the same * routine with shorter timeout and a smaller threshold. */ static int stabilize(int a, int b, u16 timeout, u16 threshold) { ktime_t ct; static ktime_t prev_time; static ktime_t hit_time; static int x, y, prev_result, hits; int result = 0; long msec, msec_hit; ct = ktime_get(); msec = ktime_ms_delta(ct, prev_time); msec_hit = ktime_ms_delta(ct, hit_time); if (msec > 100) { x = 0; y = 0; hits = 0; } x += a; y += b; prev_time = ct; if (abs(x) > threshold || abs(y) > threshold) { if (abs(y) > abs(x)) result = (y > 0) ? 0x7F : 0x80; else result = (x > 0) ? 0x7F00 : 0x8000; x = 0; y = 0; if (result == prev_result) { hits++; if (hits > 3) { switch (result) { case 0x7F: y = 17 * threshold / 30; break; case 0x80: y -= 17 * threshold / 30; break; case 0x7F00: x = 17 * threshold / 30; break; case 0x8000: x -= 17 * threshold / 30; break; } } if (hits == 2 && msec_hit < timeout) { result = 0; hits = 1; } } else { prev_result = result; hits = 1; hit_time = ct; } } return result; } static u32 imon_remote_key_lookup(struct imon_context *ictx, u32 scancode) { u32 keycode; u32 release; bool is_release_code = false; /* Look for the initial press of a button */ keycode = rc_g_keycode_from_table(ictx->rdev, scancode); ictx->rc_toggle = 0x0; ictx->rc_scancode = scancode; /* Look for the release of a button */ if (keycode == KEY_RESERVED) { release = scancode & ~0x4000; keycode = rc_g_keycode_from_table(ictx->rdev, release); if (keycode != KEY_RESERVED) is_release_code = true; } ictx->release_code = is_release_code; return keycode; } static u32 imon_mce_key_lookup(struct imon_context *ictx, u32 scancode) { u32 keycode; #define MCE_KEY_MASK 0x7000 #define MCE_TOGGLE_BIT 0x8000 /* * On some receivers, mce keys decode to 0x8000f04xx and 0x8000f84xx * (the toggle bit flipping between alternating key presses), while * on other receivers, we see 0x8000f74xx and 0x8000ff4xx. To keep * the table trim, we always or in the bits to look up 0x8000ff4xx, * but we can't or them into all codes, as some keys are decoded in * a different way w/o the same use of the toggle bit... */ if (scancode & 0x80000000) scancode = scancode | MCE_KEY_MASK | MCE_TOGGLE_BIT; ictx->rc_scancode = scancode; keycode = rc_g_keycode_from_table(ictx->rdev, scancode); /* not used in mce mode, but make sure we know its false */ ictx->release_code = false; return keycode; } static u32 imon_panel_key_lookup(struct imon_context *ictx, u64 code) { const struct imon_panel_key_table *key_table; u32 keycode = KEY_RESERVED; int i; key_table = ictx->dev_descr->key_table; for (i = 0; key_table[i].hw_code != 0; i++) { if (key_table[i].hw_code == (code | 0xffee)) { keycode = key_table[i].keycode; break; } } ictx->release_code = false; return keycode; } static bool imon_mouse_event(struct imon_context *ictx, unsigned char *buf, int len) { signed char rel_x = 0x00, rel_y = 0x00; u8 right_shift = 1; bool mouse_input = true; int dir = 0; unsigned long flags; spin_lock_irqsave(&ictx->kc_lock, flags); /* newer iMON device PAD or mouse button */ if (ictx->product != 0xffdc && (buf[0] & 0x01) && len == 5) { rel_x = buf[2]; rel_y = buf[3]; right_shift = 1; /* 0xffdc iMON PAD or mouse button input */ } else if (ictx->product == 0xffdc && (buf[0] & 0x40) && !((buf[1] & 0x01) || ((buf[1] >> 2) & 0x01))) { rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; if (buf[0] & 0x02) rel_x |= ~0x0f; rel_x = rel_x + rel_x / 2; rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; if (buf[0] & 0x01) rel_y |= ~0x0f; rel_y = rel_y + rel_y / 2; right_shift = 2; /* some ffdc devices decode mouse buttons differently... */ } else if (ictx->product == 0xffdc && (buf[0] == 0x68)) { right_shift = 2; /* ch+/- buttons, which we use for an emulated scroll wheel */ } else if (ictx->kc == KEY_CHANNELUP && (buf[2] & 0x40) != 0x40) { dir = 1; } else if (ictx->kc == KEY_CHANNELDOWN && (buf[2] & 0x40) != 0x40) { dir = -1; } else mouse_input = false; spin_unlock_irqrestore(&ictx->kc_lock, flags); if (mouse_input) { dev_dbg(ictx->dev, "sending mouse data via input subsystem\n"); if (dir) { input_report_rel(ictx->idev, REL_WHEEL, dir); } else if (rel_x || rel_y) { input_report_rel(ictx->idev, REL_X, rel_x); input_report_rel(ictx->idev, REL_Y, rel_y); } else { input_report_key(ictx->idev, BTN_LEFT, buf[1] & 0x1); input_report_key(ictx->idev, BTN_RIGHT, buf[1] >> right_shift & 0x1); } input_sync(ictx->idev); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); } return mouse_input; } static void imon_touch_event(struct imon_context *ictx, unsigned char *buf) { mod_timer(&ictx->ttimer, jiffies + TOUCH_TIMEOUT); ictx->touch_x = (buf[0] << 4) | (buf[1] >> 4); ictx->touch_y = 0xfff - ((buf[2] << 4) | (buf[1] & 0xf)); input_report_abs(ictx->touch, ABS_X, ictx->touch_x); input_report_abs(ictx->touch, ABS_Y, ictx->touch_y); input_report_key(ictx->touch, BTN_TOUCH, 0x01); input_sync(ictx->touch); } static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf) { int dir = 0; signed char rel_x = 0x00, rel_y = 0x00; u16 timeout, threshold; u32 scancode = KEY_RESERVED; unsigned long flags; /* * The imon directional pad functions more like a touchpad. Bytes 3 & 4 * contain a position coordinate (x,y), with each component ranging * from -14 to 14. We want to down-sample this to only 4 discrete values * for up/down/left/right arrow keys. Also, when you get too close to * diagonals, it has a tendency to jump back and forth, so lets try to * ignore when they get too close. */ if (ictx->product != 0xffdc) { /* first, pad to 8 bytes so it conforms with everything else */ buf[5] = buf[6] = buf[7] = 0; timeout = 500; /* in msecs */ /* (2*threshold) x (2*threshold) square */ threshold = pad_thresh ? pad_thresh : 28; rel_x = buf[2]; rel_y = buf[3]; if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) { if ((buf[1] == 0) && ((rel_x != 0) || (rel_y != 0))) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = KEY_UNKNOWN; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } buf[2] = dir & 0xFF; buf[3] = (dir >> 8) & 0xFF; scancode = be32_to_cpu(*((__be32 *)buf)); } } else { /* * Hack alert: instead of using keycodes, we have * to use hard-coded scancodes here... */ if (abs(rel_y) > abs(rel_x)) { buf[2] = (rel_y > 0) ? 0x7F : 0x80; buf[3] = 0; if (rel_y > 0) scancode = 0x01007f00; /* KEY_DOWN */ else scancode = 0x01008000; /* KEY_UP */ } else { buf[2] = 0; buf[3] = (rel_x > 0) ? 0x7F : 0x80; if (rel_x > 0) scancode = 0x0100007f; /* KEY_RIGHT */ else scancode = 0x01000080; /* KEY_LEFT */ } } /* * Handle on-board decoded pad events for e.g. older VFD/iMON-Pad * device (15c2:ffdc). The remote generates various codes from * 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates * 0x688301b7 and the right one 0x688481b7. All other keys generate * 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with * reversed endianness. Extract direction from buffer, rotate endianness, * adjust sign and feed the values into stabilize(). The resulting codes * will be 0x01008000, 0x01007F00, which match the newer devices. */ } else { timeout = 10; /* in msecs */ /* (2*threshold) x (2*threshold) square */ threshold = pad_thresh ? pad_thresh : 15; /* buf[1] is x */ rel_x = (buf[1] & 0x08) | (buf[1] & 0x10) >> 2 | (buf[1] & 0x20) >> 4 | (buf[1] & 0x40) >> 6; if (buf[0] & 0x02) rel_x |= ~0x10+1; /* buf[2] is y */ rel_y = (buf[2] & 0x08) | (buf[2] & 0x10) >> 2 | (buf[2] & 0x20) >> 4 | (buf[2] & 0x40) >> 6; if (buf[0] & 0x01) rel_y |= ~0x10+1; buf[0] = 0x01; buf[1] = buf[4] = buf[5] = buf[6] = buf[7] = 0; if (ictx->rc_proto == RC_PROTO_BIT_IMON && pad_stabilize) { dir = stabilize((int)rel_x, (int)rel_y, timeout, threshold); if (!dir) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = KEY_UNKNOWN; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } buf[2] = dir & 0xFF; buf[3] = (dir >> 8) & 0xFF; scancode = be32_to_cpu(*((__be32 *)buf)); } else { /* * Hack alert: instead of using keycodes, we have * to use hard-coded scancodes here... */ if (abs(rel_y) > abs(rel_x)) { buf[2] = (rel_y > 0) ? 0x7F : 0x80; buf[3] = 0; if (rel_y > 0) scancode = 0x01007f00; /* KEY_DOWN */ else scancode = 0x01008000; /* KEY_UP */ } else { buf[2] = 0; buf[3] = (rel_x > 0) ? 0x7F : 0x80; if (rel_x > 0) scancode = 0x0100007f; /* KEY_RIGHT */ else scancode = 0x01000080; /* KEY_LEFT */ } } } if (scancode) { spin_lock_irqsave(&ictx->kc_lock, flags); ictx->kc = imon_remote_key_lookup(ictx, scancode); spin_unlock_irqrestore(&ictx->kc_lock, flags); } } /* * figure out if these is a press or a release. We don't actually * care about repeats, as those will be auto-generated within the IR * subsystem for repeating scancodes. */ static int imon_parse_press_type(struct imon_context *ictx, unsigned char *buf, u8 ktype) { int press_type = 0; unsigned long flags; spin_lock_irqsave(&ictx->kc_lock, flags); /* key release of 0x02XXXXXX key */ if (ictx->kc == KEY_RESERVED && buf[0] == 0x02 && buf[3] == 0x00) ictx->kc = ictx->last_keycode; /* mouse button release on (some) 0xffdc devices */ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x68 && buf[1] == 0x82 && buf[2] == 0x81 && buf[3] == 0xb7) ictx->kc = ictx->last_keycode; /* mouse button release on (some other) 0xffdc devices */ else if (ictx->kc == KEY_RESERVED && buf[0] == 0x01 && buf[1] == 0x00 && buf[2] == 0x81 && buf[3] == 0xb7) ictx->kc = ictx->last_keycode; /* mce-specific button handling, no keyup events */ else if (ktype == IMON_KEY_MCE) { ictx->rc_toggle = buf[2]; press_type = 1; /* incoherent or irrelevant data */ } else if (ictx->kc == KEY_RESERVED) press_type = -EINVAL; /* key release of 0xXXXXXXb7 key */ else if (ictx->release_code) press_type = 0; /* this is a button press */ else press_type = 1; spin_unlock_irqrestore(&ictx->kc_lock, flags); return press_type; } /* * Process the incoming packet */ static void imon_incoming_packet(struct imon_context *ictx, struct urb *urb, int intf) { int len = urb->actual_length; unsigned char *buf = urb->transfer_buffer; struct device *dev = ictx->dev; unsigned long flags; u32 kc; u64 scancode; int press_type = 0; ktime_t t; static ktime_t prev_time; u8 ktype; /* filter out junk data on the older 0xffdc imon devices */ if ((buf[0] == 0xff) && (buf[1] == 0xff) && (buf[2] == 0xff)) return; /* Figure out what key was pressed */ if (len == 8 && buf[7] == 0xee) { scancode = be64_to_cpu(*((__be64 *)buf)); ktype = IMON_KEY_PANEL; kc = imon_panel_key_lookup(ictx, scancode); ictx->release_code = false; } else { scancode = be32_to_cpu(*((__be32 *)buf)); if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) { ktype = IMON_KEY_IMON; if (buf[0] == 0x80) ktype = IMON_KEY_MCE; kc = imon_mce_key_lookup(ictx, scancode); } else { ktype = IMON_KEY_IMON; kc = imon_remote_key_lookup(ictx, scancode); } } spin_lock_irqsave(&ictx->kc_lock, flags); /* keyboard/mouse mode toggle button */ if (kc == KEY_KEYBOARD && !ictx->release_code) { ictx->last_keycode = kc; if (!nomouse) { ictx->pad_mouse = !ictx->pad_mouse; dev_dbg(dev, "toggling to %s mode\n", ictx->pad_mouse ? "mouse" : "keyboard"); spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } else { ictx->pad_mouse = false; dev_dbg(dev, "mouse mode disabled, passing key value\n"); } } ictx->kc = kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); /* send touchscreen events through input subsystem if touchpad data */ if (ictx->touch && len == 8 && buf[7] == 0x86) { imon_touch_event(ictx, buf); return; /* look for mouse events with pad in mouse mode */ } else if (ictx->pad_mouse) { if (imon_mouse_event(ictx, buf, len)) return; } /* Now for some special handling to convert pad input to arrow keys */ if (((len == 5) && (buf[0] == 0x01) && (buf[4] == 0x00)) || ((len == 8) && (buf[0] & 0x40) && !(buf[1] & 0x1 || buf[1] >> 2 & 0x1))) { len = 8; imon_pad_to_keys(ictx, buf); } if (debug) { printk(KERN_INFO "intf%d decoded packet: %*ph\n", intf, len, buf); } press_type = imon_parse_press_type(ictx, buf, ktype); if (press_type < 0) goto not_input_data; if (ktype != IMON_KEY_PANEL) { if (press_type == 0) rc_keyup(ictx->rdev); else { enum rc_proto proto; if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) proto = RC_PROTO_RC6_MCE; else if (ictx->rc_proto == RC_PROTO_BIT_IMON) proto = RC_PROTO_IMON; else return; rc_keydown(ictx->rdev, proto, ictx->rc_scancode, ictx->rc_toggle); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); } return; } /* Only panel type events left to process now */ spin_lock_irqsave(&ictx->kc_lock, flags); t = ktime_get(); /* KEY repeats from knob and panel that need to be suppressed */ if (ictx->kc == KEY_MUTE || ictx->dev_descr->flags & IMON_SUPPRESS_REPEATED_KEYS) { if (ictx->kc == ictx->last_keycode && ktime_ms_delta(t, prev_time) < ictx->idev->rep[REP_DELAY]) { spin_unlock_irqrestore(&ictx->kc_lock, flags); return; } } prev_time = t; kc = ictx->kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); input_report_key(ictx->idev, kc, press_type); input_sync(ictx->idev); /* panel keys don't generate a release */ input_report_key(ictx->idev, kc, 0); input_sync(ictx->idev); spin_lock_irqsave(&ictx->kc_lock, flags); ictx->last_keycode = kc; spin_unlock_irqrestore(&ictx->kc_lock, flags); return; not_input_data: if (len != 8) { dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n", __func__, len, intf); return; } /* iMON 2.4G associate frame */ if (buf[0] == 0x00 && buf[2] == 0xFF && /* REFID */ buf[3] == 0xFF && buf[4] == 0xFF && buf[5] == 0xFF && /* iMON 2.4G */ ((buf[6] == 0x4E && buf[7] == 0xDF) || /* LT */ (buf[6] == 0x5E && buf[7] == 0xDF))) { /* DT */ dev_warn(dev, "%s: remote associated refid=%02X\n", __func__, buf[1]); ictx->rf_isassociating = false; } } /* * Callback function for USB core API: receive data */ static void usb_rx_callback_intf0(struct urb *urb) { struct imon_context *ictx; int intfnum = 0; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; /* * if we get a callback before we're done configuring the hardware, we * can't yet process the data, as there's nowhere to send it, but we * still need to submit a new rx URB to avoid wedging the hardware */ if (!ictx->dev_present_intf0) goto out; switch (urb->status) { case -ENOENT: /* usbcore unlink successful! */ return; case -ESHUTDOWN: /* transport endpoint was shut down */ break; case 0: imon_incoming_packet(ictx, urb, intfnum); break; default: dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", __func__, urb->status); break; } out: usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); } static void usb_rx_callback_intf1(struct urb *urb) { struct imon_context *ictx; int intfnum = 1; if (!urb) return; ictx = (struct imon_context *)urb->context; if (!ictx) return; /* * if we get a callback before we're done configuring the hardware, we * can't yet process the data, as there's nowhere to send it, but we * still need to submit a new rx URB to avoid wedging the hardware */ if (!ictx->dev_present_intf1) goto out; switch (urb->status) { case -ENOENT: /* usbcore unlink successful! */ return; case -ESHUTDOWN: /* transport endpoint was shut down */ break; case 0: imon_incoming_packet(ictx, urb, intfnum); break; default: dev_warn(ictx->dev, "imon %s: status(%d): ignored\n", __func__, urb->status); break; } out: usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); } /* * The 0x15c2:0xffdc device ID was used for umpteen different imon * devices, and all of them constantly spew interrupts, even when there * is no actual data to report. However, byte 6 of this buffer looks like * its unique across device variants, so we're trying to key off that to * figure out which display type (if any) and what IR protocol the device * actually supports. These devices have their IR protocol hard-coded into * their firmware, they can't be changed on the fly like the newer hardware. */ static void imon_get_ffdc_type(struct imon_context *ictx) { u8 ffdc_cfg_byte = ictx->usb_rx_buf[6]; u8 detected_display_type = IMON_DISPLAY_TYPE_NONE; u64 allowed_protos = RC_PROTO_BIT_IMON; switch (ffdc_cfg_byte) { /* iMON Knob, no display, iMON IR + vol knob */ case 0x21: dev_info(ictx->dev, "0xffdc iMON Knob, iMON IR"); ictx->display_supported = false; break; /* iMON 2.4G LT (usb stick), no display, iMON RF */ case 0x4e: dev_info(ictx->dev, "0xffdc iMON 2.4G LT, iMON RF"); ictx->display_supported = false; ictx->rf_device = true; break; /* iMON VFD, no IR (does have vol knob tho) */ case 0x35: dev_info(ictx->dev, "0xffdc iMON VFD + knob, no IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; break; /* iMON VFD, iMON IR */ case 0x24: case 0x30: case 0x85: dev_info(ictx->dev, "0xffdc iMON VFD, iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; break; /* iMON VFD, MCE IR */ case 0x46: case 0x9e: dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* iMON VFD, iMON or MCE IR */ case 0x7e: dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; /* iMON LCD, MCE IR */ case 0x9f: dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; allowed_protos = RC_PROTO_BIT_RC6_MCE; break; /* no display, iMON IR */ case 0x26: dev_info(ictx->dev, "0xffdc iMON Inside, iMON IR"); ictx->display_supported = false; break; /* Soundgraph iMON UltraBay */ case 0x98: dev_info(ictx->dev, "0xffdc iMON UltraBay, LCD + IR"); detected_display_type = IMON_DISPLAY_TYPE_LCD; allowed_protos = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE; ictx->dev_descr = &ultrabay_table; break; default: dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR"); detected_display_type = IMON_DISPLAY_TYPE_VFD; /* * We don't know which one it is, allow user to set the * RC6 one from userspace if IMON wasn't correct. */ allowed_protos |= RC_PROTO_BIT_RC6_MCE; break; } printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte); ictx->display_type = detected_display_type; ictx->rc_proto = allowed_protos; } static void imon_set_display_type(struct imon_context *ictx) { u8 configured_display_type = IMON_DISPLAY_TYPE_VFD; /* * Try to auto-detect the type of display if the user hasn't set * it by hand via the display_type modparam. Default is VFD. */ if (display_type == IMON_DISPLAY_TYPE_AUTO) { switch (ictx->product) { case 0xffdc: /* set in imon_get_ffdc_type() */ configured_display_type = ictx->display_type; break; case 0x0034: case 0x0035: configured_display_type = IMON_DISPLAY_TYPE_VGA; break; case 0x0038: case 0x0039: case 0x0045: configured_display_type = IMON_DISPLAY_TYPE_LCD; break; case 0x003c: case 0x0041: case 0x0042: case 0x0043: configured_display_type = IMON_DISPLAY_TYPE_NONE; ictx->display_supported = false; break; case 0x0036: case 0x0044: default: configured_display_type = IMON_DISPLAY_TYPE_VFD; break; } } else { configured_display_type = display_type; if (display_type == IMON_DISPLAY_TYPE_NONE) ictx->display_supported = false; else ictx->display_supported = true; dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n", __func__, display_type); } ictx->display_type = configured_display_type; } static struct rc_dev *imon_init_rdev(struct imon_context *ictx) { struct rc_dev *rdev; int ret; static const unsigned char fp_packet[] = { 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88 }; rdev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rdev) { dev_err(ictx->dev, "remote control dev allocation failed\n"); goto out; } snprintf(ictx->name_rdev, sizeof(ictx->name_rdev), "iMON Remote (%04x:%04x)", ictx->vendor, ictx->product); usb_make_path(ictx->usbdev_intf0, ictx->phys_rdev, sizeof(ictx->phys_rdev)); strlcat(ictx->phys_rdev, "/input0", sizeof(ictx->phys_rdev)); rdev->device_name = ictx->name_rdev; rdev->input_phys = ictx->phys_rdev; usb_to_input_id(ictx->usbdev_intf0, &rdev->input_id); rdev->dev.parent = ictx->dev; rdev->priv = ictx; /* iMON PAD or MCE */ rdev->allowed_protocols = RC_PROTO_BIT_IMON | RC_PROTO_BIT_RC6_MCE; rdev->change_protocol = imon_ir_change_protocol; rdev->driver_name = MOD_NAME; /* Enable front-panel buttons and/or knobs */ memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet)); ret = send_packet(ictx); /* Not fatal, but warn about it */ if (ret) dev_info(ictx->dev, "panel buttons/knobs setup failed\n"); if (ictx->product == 0xffdc) { imon_get_ffdc_type(ictx); rdev->allowed_protocols = ictx->rc_proto; } imon_set_display_type(ictx); if (ictx->rc_proto == RC_PROTO_BIT_RC6_MCE) rdev->map_name = RC_MAP_IMON_MCE; else rdev->map_name = RC_MAP_IMON_PAD; ret = rc_register_device(rdev); if (ret < 0) { dev_err(ictx->dev, "remote input dev register failed\n"); goto out; } return rdev; out: rc_free_device(rdev); return NULL; } static struct input_dev *imon_init_idev(struct imon_context *ictx) { const struct imon_panel_key_table *key_table; struct input_dev *idev; int ret, i; key_table = ictx->dev_descr->key_table; idev = input_allocate_device(); if (!idev) goto out; snprintf(ictx->name_idev, sizeof(ictx->name_idev), "iMON Panel, Knob and Mouse(%04x:%04x)", ictx->vendor, ictx->product); idev->name = ictx->name_idev; usb_make_path(ictx->usbdev_intf0, ictx->phys_idev, sizeof(ictx->phys_idev)); strlcat(ictx->phys_idev, "/input1", sizeof(ictx->phys_idev)); idev->phys = ictx->phys_idev; idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_REL); idev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); idev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) | BIT_MASK(REL_WHEEL); /* panel and/or knob code support */ for (i = 0; key_table[i].hw_code != 0; i++) { u32 kc = key_table[i].keycode; __set_bit(kc, idev->keybit); } usb_to_input_id(ictx->usbdev_intf0, &idev->id); idev->dev.parent = ictx->dev; input_set_drvdata(idev, ictx); ret = input_register_device(idev); if (ret < 0) { dev_err(ictx->dev, "input dev register failed\n"); goto out; } return idev; out: input_free_device(idev); return NULL; } static struct input_dev *imon_init_touch(struct imon_context *ictx) { struct input_dev *touch; int ret; touch = input_allocate_device(); if (!touch) goto touch_alloc_failed; snprintf(ictx->name_touch, sizeof(ictx->name_touch), "iMON USB Touchscreen (%04x:%04x)", ictx->vendor, ictx->product); touch->name = ictx->name_touch; usb_make_path(ictx->usbdev_intf1, ictx->phys_touch, sizeof(ictx->phys_touch)); strlcat(ictx->phys_touch, "/input2", sizeof(ictx->phys_touch)); touch->phys = ictx->phys_touch; touch->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); touch->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(touch, ABS_X, 0x00, 0xfff, 0, 0); input_set_abs_params(touch, ABS_Y, 0x00, 0xfff, 0, 0); input_set_drvdata(touch, ictx); usb_to_input_id(ictx->usbdev_intf1, &touch->id); touch->dev.parent = ictx->dev; ret = input_register_device(touch); if (ret < 0) { dev_info(ictx->dev, "touchscreen input dev register failed\n"); goto touch_register_failed; } return touch; touch_register_failed: input_free_device(touch); touch_alloc_failed: return NULL; } static bool imon_find_endpoints(struct imon_context *ictx, struct usb_host_interface *iface_desc) { struct usb_endpoint_descriptor *ep; struct usb_endpoint_descriptor *rx_endpoint = NULL; struct usb_endpoint_descriptor *tx_endpoint = NULL; int ifnum = iface_desc->desc.bInterfaceNumber; int num_endpts = iface_desc->desc.bNumEndpoints; int i, ep_dir, ep_type; bool ir_ep_found = false; bool display_ep_found = false; bool tx_control = false; /* * Scan the endpoint list and set: * first input endpoint = IR endpoint * first output endpoint = display endpoint */ for (i = 0; i < num_endpts && !(ir_ep_found && display_ep_found); ++i) { ep = &iface_desc->endpoint[i].desc; ep_dir = ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK; ep_type = usb_endpoint_type(ep); if (!ir_ep_found && ep_dir == USB_DIR_IN && ep_type == USB_ENDPOINT_XFER_INT) { rx_endpoint = ep; ir_ep_found = true; dev_dbg(ictx->dev, "%s: found IR endpoint\n", __func__); } else if (!display_ep_found && ep_dir == USB_DIR_OUT && ep_type == USB_ENDPOINT_XFER_INT) { tx_endpoint = ep; display_ep_found = true; dev_dbg(ictx->dev, "%s: found display endpoint\n", __func__); } } if (ifnum == 0) { ictx->rx_endpoint_intf0 = rx_endpoint; /* * tx is used to send characters to lcd/vfd, associate RF * remotes, set IR protocol, and maybe more... */ ictx->tx_endpoint = tx_endpoint; } else { ictx->rx_endpoint_intf1 = rx_endpoint; } /* * If we didn't find a display endpoint, this is probably one of the * newer iMON devices that use control urb instead of interrupt */ if (!display_ep_found) { tx_control = true; display_ep_found = true; dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n", __func__); } /* * Some iMON receivers have no display. Unfortunately, it seems * that SoundGraph recycles device IDs between devices both with * and without... :\ */ if (ictx->display_type == IMON_DISPLAY_TYPE_NONE) { display_ep_found = false; dev_dbg(ictx->dev, "%s: device has no display\n", __func__); } /* * iMON Touch devices have a VGA touchscreen, but no "display", as * that refers to e.g. /dev/lcd0 (a character device LCD or VFD). */ if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { display_ep_found = false; dev_dbg(ictx->dev, "%s: iMON Touch device found\n", __func__); } /* Input endpoint is mandatory */ if (!ir_ep_found) pr_err("no valid input (IR) endpoint found\n"); ictx->tx_control = tx_control; if (display_ep_found) ictx->display_supported = true; return ir_ep_found; } static struct imon_context *imon_init_intf0(struct usb_interface *intf, const struct usb_device_id *id) { struct imon_context *ictx; struct urb *rx_urb; struct urb *tx_urb; struct device *dev = &intf->dev; struct usb_host_interface *iface_desc; int ret = -ENOMEM; ictx = kzalloc(sizeof(*ictx), GFP_KERNEL); if (!ictx) goto exit; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) goto rx_urb_alloc_failed; tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!tx_urb) goto tx_urb_alloc_failed; mutex_init(&ictx->lock); spin_lock_init(&ictx->kc_lock); mutex_lock(&ictx->lock); ictx->dev = dev; ictx->usbdev_intf0 = usb_get_dev(interface_to_usbdev(intf)); ictx->rx_urb_intf0 = rx_urb; ictx->tx_urb = tx_urb; ictx->rf_device = false; init_completion(&ictx->tx.finished); ictx->vendor = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor); ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct); /* save drive info for later accessing the panel/knob key table */ ictx->dev_descr = (struct imon_usb_dev_descr *)id->driver_info; /* default send_packet delay is 5ms but some devices need more */ ictx->send_packet_delay = ictx->dev_descr->flags & IMON_NEED_20MS_PKT_DELAY ? 20 : 5; ret = -ENODEV; iface_desc = intf->cur_altsetting; if (!imon_find_endpoints(ictx, iface_desc)) { goto find_endpoint_failed; } usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, usb_rcvintpipe(ictx->usbdev_intf0, ictx->rx_endpoint_intf0->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf0, ictx, ictx->rx_endpoint_intf0->bInterval); ret = usb_submit_urb(ictx->rx_urb_intf0, GFP_KERNEL); if (ret) { pr_err("usb_submit_urb failed for intf0 (%d)\n", ret); goto urb_submit_failed; } ictx->idev = imon_init_idev(ictx); if (!ictx->idev) { dev_err(dev, "%s: input device setup failed\n", __func__); goto idev_setup_failed; } ictx->rdev = imon_init_rdev(ictx); if (!ictx->rdev) { dev_err(dev, "%s: rc device setup failed\n", __func__); goto rdev_setup_failed; } ictx->dev_present_intf0 = true; mutex_unlock(&ictx->lock); return ictx; rdev_setup_failed: input_unregister_device(ictx->idev); idev_setup_failed: usb_kill_urb(ictx->rx_urb_intf0); urb_submit_failed: find_endpoint_failed: usb_put_dev(ictx->usbdev_intf0); mutex_unlock(&ictx->lock); usb_free_urb(tx_urb); tx_urb_alloc_failed: usb_free_urb(rx_urb); rx_urb_alloc_failed: kfree(ictx); exit: dev_err(dev, "unable to initialize intf0, err %d\n", ret); return NULL; } static struct imon_context *imon_init_intf1(struct usb_interface *intf, struct imon_context *ictx) { struct urb *rx_urb; struct usb_host_interface *iface_desc; int ret = -ENOMEM; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) goto rx_urb_alloc_failed; mutex_lock(&ictx->lock); if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { timer_setup(&ictx->ttimer, imon_touch_display_timeout, 0); } ictx->usbdev_intf1 = usb_get_dev(interface_to_usbdev(intf)); ictx->rx_urb_intf1 = rx_urb; ret = -ENODEV; iface_desc = intf->cur_altsetting; if (!imon_find_endpoints(ictx, iface_desc)) goto find_endpoint_failed; if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { ictx->touch = imon_init_touch(ictx); if (!ictx->touch) goto touch_setup_failed; } else ictx->touch = NULL; usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, usb_rcvintpipe(ictx->usbdev_intf1, ictx->rx_endpoint_intf1->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf1, ictx, ictx->rx_endpoint_intf1->bInterval); ret = usb_submit_urb(ictx->rx_urb_intf1, GFP_KERNEL); if (ret) { pr_err("usb_submit_urb failed for intf1 (%d)\n", ret); goto urb_submit_failed; } ictx->dev_present_intf1 = true; mutex_unlock(&ictx->lock); return ictx; urb_submit_failed: if (ictx->touch) input_unregister_device(ictx->touch); touch_setup_failed: find_endpoint_failed: usb_put_dev(ictx->usbdev_intf1); ictx->usbdev_intf1 = NULL; mutex_unlock(&ictx->lock); usb_free_urb(rx_urb); ictx->rx_urb_intf1 = NULL; rx_urb_alloc_failed: dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret); return NULL; } static void imon_init_display(struct imon_context *ictx, struct usb_interface *intf) { int ret; dev_dbg(ictx->dev, "Registering iMON display with sysfs\n"); /* set up sysfs entry for built-in clock */ ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group); if (ret) dev_err(ictx->dev, "Could not create display sysfs entries(%d)", ret); if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) ret = usb_register_dev(intf, &imon_lcd_class); else ret = usb_register_dev(intf, &imon_vfd_class); if (ret) /* Not a fatal error, so ignore */ dev_info(ictx->dev, "could not get a minor number for display\n"); } /* * Callback function for USB core API: Probe */ static int imon_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev = NULL; struct usb_host_interface *iface_desc = NULL; struct usb_interface *first_if; struct device *dev = &interface->dev; int ifnum, sysfs_err; int ret = 0; struct imon_context *ictx = NULL; u16 vendor, product; usbdev = usb_get_dev(interface_to_usbdev(interface)); iface_desc = interface->cur_altsetting; ifnum = iface_desc->desc.bInterfaceNumber; vendor = le16_to_cpu(usbdev->descriptor.idVendor); product = le16_to_cpu(usbdev->descriptor.idProduct); dev_dbg(dev, "%s: found iMON device (%04x:%04x, intf%d)\n", __func__, vendor, product, ifnum); first_if = usb_ifnum_to_if(usbdev, 0); if (!first_if) { ret = -ENODEV; goto fail; } if (first_if->dev.driver != interface->dev.driver) { dev_err(&interface->dev, "inconsistent driver matching\n"); ret = -EINVAL; goto fail; } if (ifnum == 0) { ictx = imon_init_intf0(interface, id); if (!ictx) { pr_err("failed to initialize context!\n"); ret = -ENODEV; goto fail; } refcount_set(&ictx->users, 1); } else { /* this is the secondary interface on the device */ struct imon_context *first_if_ctx = usb_get_intfdata(first_if); /* fail early if first intf failed to register */ if (!first_if_ctx) { ret = -ENODEV; goto fail; } ictx = imon_init_intf1(interface, first_if_ctx); if (!ictx) { pr_err("failed to attach to context!\n"); ret = -ENODEV; goto fail; } refcount_inc(&ictx->users); } usb_set_intfdata(interface, ictx); if (ifnum == 0) { if (product == 0xffdc && ictx->rf_device) { sysfs_err = sysfs_create_group(&interface->dev.kobj, &imon_rf_attr_group); if (sysfs_err) pr_err("Could not create RF sysfs entries(%d)\n", sysfs_err); } if (ictx->display_supported) imon_init_display(ictx, interface); } dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n", vendor, product, ifnum, usbdev->bus->busnum, usbdev->devnum); usb_put_dev(usbdev); return 0; fail: usb_put_dev(usbdev); dev_err(dev, "unable to register, err %d\n", ret); return ret; } /* * Callback function for USB core API: disconnect */ static void imon_disconnect(struct usb_interface *interface) { struct imon_context *ictx; struct device *dev; int ifnum; ictx = usb_get_intfdata(interface); ictx->disconnected = true; dev = ictx->dev; ifnum = interface->cur_altsetting->desc.bInterfaceNumber; /* * sysfs_remove_group is safe to call even if sysfs_create_group * hasn't been called */ sysfs_remove_group(&interface->dev.kobj, &imon_display_attr_group); sysfs_remove_group(&interface->dev.kobj, &imon_rf_attr_group); usb_set_intfdata(interface, NULL); /* Abort ongoing write */ if (ictx->tx.busy) { usb_kill_urb(ictx->tx_urb); complete(&ictx->tx.finished); } if (ifnum == 0) { ictx->dev_present_intf0 = false; usb_kill_urb(ictx->rx_urb_intf0); input_unregister_device(ictx->idev); rc_unregister_device(ictx->rdev); if (ictx->display_supported) { if (ictx->display_type == IMON_DISPLAY_TYPE_LCD) usb_deregister_dev(interface, &imon_lcd_class); else if (ictx->display_type == IMON_DISPLAY_TYPE_VFD) usb_deregister_dev(interface, &imon_vfd_class); } usb_put_dev(ictx->usbdev_intf0); } else { ictx->dev_present_intf1 = false; usb_kill_urb(ictx->rx_urb_intf1); if (ictx->display_type == IMON_DISPLAY_TYPE_VGA) { del_timer_sync(&ictx->ttimer); input_unregister_device(ictx->touch); } usb_put_dev(ictx->usbdev_intf1); } if (refcount_dec_and_test(&ictx->users)) free_imon_context(ictx); dev_dbg(dev, "%s: iMON device (intf%d) disconnected\n", __func__, ifnum); } static int imon_suspend(struct usb_interface *intf, pm_message_t message) { struct imon_context *ictx = usb_get_intfdata(intf); int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) usb_kill_urb(ictx->rx_urb_intf0); else usb_kill_urb(ictx->rx_urb_intf1); return 0; } static int imon_resume(struct usb_interface *intf) { int rc = 0; struct imon_context *ictx = usb_get_intfdata(intf); int ifnum = intf->cur_altsetting->desc.bInterfaceNumber; if (ifnum == 0) { usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0, usb_rcvintpipe(ictx->usbdev_intf0, ictx->rx_endpoint_intf0->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf0, ictx, ictx->rx_endpoint_intf0->bInterval); rc = usb_submit_urb(ictx->rx_urb_intf0, GFP_NOIO); } else { usb_fill_int_urb(ictx->rx_urb_intf1, ictx->usbdev_intf1, usb_rcvintpipe(ictx->usbdev_intf1, ictx->rx_endpoint_intf1->bEndpointAddress), ictx->usb_rx_buf, sizeof(ictx->usb_rx_buf), usb_rx_callback_intf1, ictx, ictx->rx_endpoint_intf1->bInterval); rc = usb_submit_urb(ictx->rx_urb_intf1, GFP_NOIO); } return rc; } module_usb_driver(imon_driver); |
8329 8349 712 745 699 739 183 254 911 4 961 184 245 1133 1689 1250 535 191 1688 616 1196 1194 1633 1631 64 1579 100 1606 12 197 11 188 51 1632 268 1 3368 2764 2767 5 200 196 1523 1524 1417 4599 1762 443 1336 3633 483 45 482 42 41 42 32 15 23 13 11 1 2 1 25 6 4 6 6 6 6 418 417 6 420 391 420 419 9 12 9 15 23 419 417 395 397 417 5085 5078 5082 435 433 433 432 432 419 26 420 25 420 1 435 435 435 6 433 435 432 437 437 434 416 435 435 437 434 9 435 434 437 398 442 442 24 437 391 399 437 437 437 437 434 399 435 25 1026 2 334 299 123 333 333 870 123 1061 1092 120 121 1095 1093 33 1079 921 439 1113 14 9 9 3 3 26 1215 1218 123 1216 1201 123 5 31 1508 44 992 1218 1511 1776 351 1511 1776 17 1779 1784 1783 1785 1779 1812 1573 539 1800 33 33 5 79 1781 1690 1691 1688 1686 1688 1635 179 177 179 179 179 178 4419 4425 4421 4424 4147 4249 490 492 492 492 492 1 492 492 490 100 102 391 391 391 1 1 391 391 391 2 5 386 386 391 103 2 98 29 29 102 6 6 6 6 6 6 6 6 6 6 13 13 13 13 13 13 5 13 13 13 8 1 5 13 13 13 14 14 14 13 1 7792 7792 7803 21 21 7820 7809 7830 7820 7801 7799 7801 21 7833 7818 7822 7791 7793 7833 7809 7801 7833 7823 7833 7808 7800 21 340 1 1 8562 3345 41 41 41 334 334 334 41 297 47 301 285 197 197 197 195 197 858 6 165 4666 728 16 726 16 1523 1010 518 1520 100 1009 517 1 1520 505 12 1008 1522 28 1491 1521 1516 1004 518 1519 19 1525 1005 525 1010 11 516 517 1520 18 18 18 1 18 18 1 18 1 19 14 48 493 117 499 156 4 3 156 1575 29 1546 5 1543 1 570 1026 19 12 541 179 156 1 178 1524 516 2 1525 53 120 45 45 11 78 102 179 180 5 179 7818 3359 14 3 11 340 340 10 14 80 307 307 3 306 314 8 315 282 118 315 4 314 315 315 315 6 307 315 315 315 2 6 310 14 170 148 307 306 19 315 315 315 310 7 311 316 315 4 10 10 10 1 1 15 3859 3863 7792 4228 4223 4221 5 3868 522 3876 3868 3876 1 3864 3863 14 14 3853 7799 7785 9 1190 1143 132 411 907 65 845 62 61 3 61 51 51 1 51 3215 3216 2903 358 323 34 3218 907 359 813 98 853 428 11 426 902 904 176 730 6 3 2 902 1 899 904 2738 633 2940 2938 69 11 80 2943 2939 845 336 579 574 17 2 49 32 24 34 34 24 2 65 325 296 33 5 325 4 1 3016 49 350 3301 186 76 27 24 1 1 1 10604 2673 1903 9028 9960 1596 338 2 1 22 1577 16 37 10643 10635 10117 836 10630 10644 10025 2714 76 76 1 10585 29 2 27 26 10787 333 1813 455 9011 33 7349 5520 10791 10796 10819 9021 2045 208 10669 9038 2033 2035 10811 10859 20 17 17 4 17 10817 10859 8000 3262 17 2353 2351 166 8 2240 175 606 608 607 1319 1313 1318 1 16 16 16 1 2 15 7 15 15 15 75 75 74 74 69 71 9 1 2 6 12 57 69 71 44 31 31 62946 235 13 62909 197 30 20 199 191 191 191 32 32 32 17 17 17 17 17 17 17 3464 3463 764 2529 2401 157 2525 2402 157 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/memory.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * demand-loading started 01.12.91 - seems it is high on the list of * things wanted, and it should be easy to implement. - Linus */ /* * Ok, demand-loading was easy, shared pages a little bit tricker. Shared * pages started 02.12.91, seems to work. - Linus. * * Tested sharing by executing about 30 /bin/sh: under the old kernel it * would have taken more than the 6M I have free, but it worked well as * far as I could see. * * Also corrected some "invalidate()"s - I wasn't doing enough of them. */ /* * Real VM (paging to/from disk) started 18.12.91. Much more work and * thought has to go into this. Oh, well.. * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. * Found it. Everything seems to work now. * 20.12.91 - Ok, making the swap-device changeable like the root. */ /* * 05.04.94 - Multi-page memory management added for v1.1. * Idea by Alex Bligh (alex@cconcepts.co.uk) * * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG * (Gerhard.Wichert@pdb.siemens.de) * * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) */ #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/sched/mm.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/swap.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/memremap.h> #include <linux/kmsan.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/export.h> #include <linux/delayacct.h> #include <linux/init.h> #include <linux/pfn_t.h> #include <linux/writeback.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/swapops.h> #include <linux/elf.h> #include <linux/gfp.h> #include <linux/migrate.h> #include <linux/string.h> #include <linux/memory-tiers.h> #include <linux/debugfs.h> #include <linux/userfaultfd_k.h> #include <linux/dax.h> #include <linux/oom.h> #include <linux/numa.h> #include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/vmalloc.h> #include <linux/sched/sysctl.h> #include <trace/events/kmem.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <linux/uaccess.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include "pgalloc-track.h" #include "internal.h" #include "swap.h" #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif #ifndef CONFIG_NUMA unsigned long max_mapnr; EXPORT_SYMBOL(max_mapnr); struct page *mem_map; EXPORT_SYMBOL(mem_map); #endif static vm_fault_t do_fault(struct vm_fault *vmf); static vm_fault_t do_anonymous_page(struct vm_fault *vmf); static bool vmf_pte_changed(struct vm_fault *vmf); /* * Return true if the original pte was a uffd-wp pte marker (so the pte was * wr-protected). */ static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) { if (!userfaultfd_wp(vmf->vma)) return false; if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) return false; return pte_marker_uffd_wp(vmf->orig_pte); } /* * A number of key systems in x86 including ioremap() rely on the assumption * that high_memory defines the upper bound on direct map memory, then end * of ZONE_NORMAL. */ void *high_memory; EXPORT_SYMBOL(high_memory); /* * Randomize the address space (stacks, mmaps, brk, etc.). * * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, * as ancient (libc5 based) binaries can segfault. ) */ int randomize_va_space __read_mostly = #ifdef CONFIG_COMPAT_BRK 1; #else 2; #endif #ifndef arch_wants_old_prefaulted_pte static inline bool arch_wants_old_prefaulted_pte(void) { /* * Transitioning a PTE from 'old' to 'young' can be expensive on * some architectures, even if it's performed in hardware. By * default, "false" means prefaulted entries will be 'young'. */ return false; } #endif static int __init disable_randmaps(char *s) { randomize_va_space = 0; return 1; } __setup("norandmaps", disable_randmaps); unsigned long zero_pfn __read_mostly; EXPORT_SYMBOL(zero_pfn); unsigned long highest_memmap_pfn __read_mostly; /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ static int __init init_zero_pfn(void) { zero_pfn = page_to_pfn(ZERO_PAGE(0)); return 0; } early_initcall(init_zero_pfn); void mm_trace_rss_stat(struct mm_struct *mm, int member) { trace_rss_stat(mm, member); } /* * Note: this doesn't free the actual pages themselves. That * has been handled earlier when unmapping all the memory regions. */ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); mm_dec_nr_ptes(tlb->mm); } static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none_or_clear_bad(pmd)) continue; free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); mm_dec_nr_pmds(tlb->mm); } static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= P4D_MASK; if (start < floor) return; if (ceiling) { ceiling &= P4D_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(p4d, start); p4d_clear(p4d); pud_free_tlb(tlb, pud, start); mm_dec_nr_puds(tlb->mm); } static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { p4d_t *p4d; unsigned long next; unsigned long start; start = addr; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; free_pud_range(tlb, p4d, addr, next, floor, ceiling); } while (p4d++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; p4d = p4d_offset(pgd, start); pgd_clear(pgd); p4d_free_tlb(tlb, p4d, start); } /* * This function frees user-level page tables of a process. */ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; unsigned long next; /* * The next few lines have given us lots of grief... * * Why are we testing PMD* at this top level? Because often * there will be no work to do at all, and we'd prefer not to * go all the way down to the bottom just to discover that. * * Why all these "- 1"s? Because 0 represents both the bottom * of the address space and the top of it (using -1 for the * top wouldn't help much: the masks would do the wrong thing). * The rule is that addr 0 and floor 0 refer to the bottom of * the address space, but end 0 and ceiling 0 refer to the top * Comparisons need to use "end - 1" and "ceiling - 1" (though * that end 0 case should be mythical). * * Wherever addr is brought up or ceiling brought down, we must * be careful to reject "the opposite 0" before it confuses the * subsequent tests. But what about where end is brought down * by PMD_SIZE below? no, end can't go down to 0 there. * * Whereas we round start (addr) and ceiling down, by different * masks at different levels, in order to test whether a table * now has no other vmas using it, so can be freed, we don't * bother to round floor or end up - the tests don't need that. */ addr &= PMD_MASK; if (addr < floor) { addr += PMD_SIZE; if (!addr) return; } if (ceiling) { ceiling &= PMD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) end -= PMD_SIZE; if (addr > end - 1) return; /* * We add page table cache pages with PAGE_SIZE, * (see pte_free_tlb()), flush the tlb if we need */ tlb_change_page_size(tlb, PAGE_SIZE); pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; free_p4d_range(tlb, pgd, addr, next, floor, ceiling); } while (pgd++, addr = next, addr != end); } void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long floor, unsigned long ceiling, bool mm_wr_locked) { struct unlink_vma_file_batch vb; do { unsigned long addr = vma->vm_start; struct vm_area_struct *next; /* * Note: USER_PGTABLES_CEILING may be passed as ceiling and may * be 0. This will underflow and is okay. */ next = mas_find(mas, ceiling - 1); if (unlikely(xa_is_zero(next))) next = NULL; /* * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ if (mm_wr_locked) vma_start_write(vma); unlink_anon_vmas(vma); if (is_vm_hugetlb_page(vma)) { unlink_file_vma(vma); hugetlb_free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } else { unlink_file_vma_batch_init(&vb); unlink_file_vma_batch_add(&vb, vma); /* * Optimization: gather nearby vmas into one call down */ while (next && next->vm_start <= vma->vm_end + PMD_SIZE && !is_vm_hugetlb_page(next)) { vma = next; next = mas_find(mas, ceiling - 1); if (unlikely(xa_is_zero(next))) next = NULL; if (mm_wr_locked) vma_start_write(vma); unlink_anon_vmas(vma); unlink_file_vma_batch_add(&vb, vma); } unlink_file_vma_batch_final(&vb); free_pgd_range(tlb, addr, vma->vm_end, floor, next ? next->vm_start : ceiling); } vma = next; } while (vma); } void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) { spinlock_t *ptl = pmd_lock(mm, pmd); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ mm_inc_nr_ptes(mm); /* * Ensure all pte setup (eg. pte page lock and page clearing) are * visible before the pte is made visible to other CPUs by being * put into page tables. * * The other side of the story is the pointer chasing in the page * table walking code (when walking the page table without locking; * ie. most of the time). Fortunately, these data accesses consist * of a chain of data-dependent loads, meaning most CPUs (alpha * being the notable exception) will already guarantee loads are * seen in-order. See the alpha page table accessors for the * smp_rmb() barriers in page table walking code. */ smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ pmd_populate(mm, pmd, *pte); *pte = NULL; } spin_unlock(ptl); } int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) { pgtable_t new = pte_alloc_one(mm); if (!new) return -ENOMEM; pmd_install(mm, pmd, &new); if (new) pte_free(mm, new); return 0; } int __pte_alloc_kernel(pmd_t *pmd) { pte_t *new = pte_alloc_one_kernel(&init_mm); if (!new) return -ENOMEM; spin_lock(&init_mm.page_table_lock); if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ smp_wmb(); /* See comment in pmd_install() */ pmd_populate_kernel(&init_mm, pmd, new); new = NULL; } spin_unlock(&init_mm.page_table_lock); if (new) pte_free_kernel(&init_mm, new); return 0; } static inline void init_rss_vec(int *rss) { memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); } static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) { int i; for (i = 0; i < NR_MM_COUNTERS; i++) if (rss[i]) add_mm_counter(mm, i, rss[i]); } /* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in * a region that doesn't allow it. * * The calling function must still handle the error. */ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); pmd_t *pmd = pmd_offset(pud, addr); struct address_space *mapping; pgoff_t index; static unsigned long resume; static unsigned long nr_shown; static unsigned long nr_unshown; /* * Allow a burst of 60 reports, then keep quiet for that minute; * or allow a steady drip of one report per second. */ if (nr_shown == 60) { if (time_before(jiffies, resume)) { nr_unshown++; return; } if (nr_unshown) { pr_alert("BUG: Bad page map: %lu messages suppressed\n", nr_unshown); nr_unshown = 0; } nr_shown = 0; } if (nr_shown++ == 0) resume = jiffies + 60 * HZ; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; index = linear_page_index(vma, addr); pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); if (page) dump_page(page, "bad pte"); pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, vma->vm_file ? vma->vm_file->f_op->mmap : NULL, mapping ? mapping->a_ops->read_folio : NULL); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } /* * vm_normal_page -- This function gets the "struct page" associated with a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this * case, NULL is returned here. "Normal" mappings do have a struct page. * * There are 2 broad cases. Firstly, an architecture may define a pte_special() * pte bit, in which case this function is trivial. Secondly, an architecture * may not have a spare pte bit, which requires a more complicated scheme, * described below. * * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a * special mapping (even if there are underlying and valid "struct pages"). * COWed pages of a VM_PFNMAP are always normal. * * The way we recognize COWed pages within VM_PFNMAP mappings is through the * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit * set, and the vm_pgoff will point to the first PFN mapped: thus every special * mapping will always honor the rule * * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) * * And for normal mappings this is false. * * This restricts such mappings to be a linear translation from virtual address * to pfn. To get around this restriction, we allow arbitrary mappings so long * as the vma is not a COW mapping; in that case, we know that all ptes are * special (because none can have been COWed). * * * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. * * VM_MIXEDMAP mappings can likewise contain memory with or without "struct * page" backing, however the difference is that _all_ pages with a struct * page (that is, those where pfn_valid is true) are refcounted and considered * normal pages by the VM. The only exception are zeropages, which are * *never* refcounted. * * The disadvantage is that pages are refcounted (which can be slower and * simply not an option for some PFNMAP users). The advantage is that we * don't have to follow the strict linearity rule of PFNMAP mappings in * order to support COWable mappings. * */ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long pfn = pte_pfn(pte); if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { if (likely(!pte_special(pte))) goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL; if (pte_devmap(pte)) /* * NOTE: New users of ZONE_DEVICE will not set pte_devmap() * and will have refcounts incremented on their struct pages * when they are inserted into PTEs, thus they are safe to * return here. Legacy ZONE_DEVICE pages that set pte_devmap() * do not have refcounts. Example of legacy ZONE_DEVICE is * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. */ return NULL; print_bad_pte(vma, addr, pte, NULL); return NULL; } /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; if (is_zero_pfn(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (is_zero_pfn(pfn)) return NULL; check_pfn: if (unlikely(pfn > highest_memmap_pfn)) { print_bad_pte(vma, addr, pte, NULL); return NULL; } /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: VM_WARN_ON_ONCE(is_zero_pfn(pfn)); return pfn_to_page(pfn); } struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { struct page *page = vm_normal_page(vma, addr, pte); if (page) return page_folio(page); return NULL; } #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) { unsigned long pfn = pmd_pfn(pmd); /* Currently it's only used for huge pfnmaps */ if (unlikely(pmd_special(pmd))) return NULL; if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { if (vma->vm_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; } else { unsigned long off; off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; if (!is_cow_mapping(vma->vm_flags)) return NULL; } } if (pmd_devmap(pmd)) return NULL; if (is_huge_zero_pmd(pmd)) return NULL; if (unlikely(pfn > highest_memmap_pfn)) return NULL; /* * NOTE! We still have PageReserved() pages in the page tables. * eg. VDSO mappings can cause them to exist. */ out: return pfn_to_page(pfn); } struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) { struct page *page = vm_normal_page_pmd(vma, addr, pmd); if (page) return page_folio(page); return NULL; } #endif static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { struct folio *folio = page_folio(page); pte_t orig_pte; pte_t pte; swp_entry_t entry; orig_pte = ptep_get(ptep); pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); entry = pte_to_swp_entry(orig_pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_mkuffd_wp(pte); else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && PageAnonExclusive(page)), folio); /* * No need to take a page reference as one was already * created when the swap entry was made. */ if (folio_test_anon(folio)) folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); else /* * Currently device exclusive access only supports anonymous * memory so the entry shouldn't point to a filebacked page. */ WARN_ON_ONCE(1); set_pte_at(vma->vm_mm, address, ptep, pte); /* * No need to invalidate - it was non-present before. However * secondary CPUs may have mappings that need invalidating. */ update_mmu_cache(vma, address, ptep); } /* * Tries to restore an exclusive pte if the page lock can be acquired without * sleeping. */ static int try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr) { swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); struct page *page = pfn_swap_entry_to_page(entry); if (trylock_page(page)) { restore_exclusive_pte(vma, page, addr, src_pte); unlock_page(page); return 0; } return -EBUSY; } /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range * covered by this vma. */ static unsigned long copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long addr, int *rss) { unsigned long vm_flags = dst_vma->vm_flags; pte_t orig_pte = ptep_get(src_pte); pte_t pte = orig_pte; struct folio *folio; struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) return -EIO; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { spin_lock(&mmlist_lock); if (list_empty(&dst_mm->mmlist)) list_add(&dst_mm->mmlist, &src_mm->mmlist); spin_unlock(&mmlist_lock); } /* Mark the swap entry as shared. */ if (pte_swp_exclusive(orig_pte)) { pte = pte_swp_clear_exclusive(orig_pte); set_pte_at(src_mm, addr, src_pte, pte); } rss[MM_SWAPENTS]++; } else if (is_migration_entry(entry)) { folio = pfn_swap_entry_folio(entry); rss[mm_counter(folio)]++; if (!is_readable_migration_entry(entry) && is_cow_mapping(vm_flags)) { /* * COW mappings require pages in both parent and child * to be set to read. A previously exclusive entry is * now shared. */ entry = make_readable_migration_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(orig_pte)) pte = pte_swp_mksoft_dirty(pte); if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } } else if (is_device_private_entry(entry)) { page = pfn_swap_entry_to_page(entry); folio = page_folio(page); /* * Update rss count even for unaddressable pages, as * they should treated just like normal pages in this * respect. * * We will likely want to have some new rss counters * for unaddressable pages, at some point. But for now * keep things as they are. */ folio_get(folio); rss[mm_counter(folio)]++; /* Cannot fail as these pages cannot get pinned. */ folio_try_dup_anon_rmap_pte(folio, page, src_vma); /* * We do not preserve soft-dirty information, because so * far, checkpoint/restore is the only feature that * requires that. And checkpoint/restore does not work * when a device driver is involved (you cannot easily * save and restore device driver state). */ if (is_writable_device_private_entry(entry) && is_cow_mapping(vm_flags)) { entry = make_readable_device_private_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } } else if (is_device_exclusive_entry(entry)) { /* * Make device exclusive entries present by restoring the * original entry then copying as for a present pte. Device * exclusive entries currently only support private writable * (ie. COW) mappings. */ VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); if (try_restore_exclusive_pte(src_pte, src_vma, addr)) return -EBUSY; return -ENOENT; } else if (is_pte_marker_entry(entry)) { pte_marker marker = copy_pte_marker(entry, dst_vma); if (marker) set_pte_at(dst_mm, addr, dst_pte, make_pte_marker(marker)); return 0; } if (!userfaultfd_wp(dst_vma)) pte = pte_swp_clear_uffd_wp(pte); set_pte_at(dst_mm, addr, dst_pte, pte); return 0; } /* * Copy a present and normal page. * * NOTE! The usual case is that this isn't required; * instead, the caller can just increase the page refcount * and re-use the pte the traditional way. * * And if we need a pre-allocated page but don't yet have * one, return a negative error to let the preallocation * code know so that it can do so outside the page table * lock. */ static inline int copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, struct folio **prealloc, struct page *page) { struct folio *new_folio; pte_t pte; new_folio = *prealloc; if (!new_folio) return -EAGAIN; /* * We have a prealloc page, all good! Take it * over and copy the page & arm it. */ if (copy_mc_user_highpage(&new_folio->page, page, addr, src_vma)) return -EHWPOISON; *prealloc = NULL; __folio_mark_uptodate(new_folio); folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, dst_vma); rss[MM_ANONPAGES]++; /* All done, just insert the new page copy in the child */ pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) /* Uffd-wp needs to be delivered to dest pte as well */ pte = pte_mkuffd_wp(pte); set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); return 0; } static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, int nr) { struct mm_struct *src_mm = src_vma->vm_mm; /* If it's a COW mapping, write protect it both processes. */ if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { wrprotect_ptes(src_mm, addr, src_pte, nr); pte = pte_wrprotect(pte); } /* If it's a shared mapping, mark it clean in the child. */ if (src_vma->vm_flags & VM_SHARED) pte = pte_mkclean(pte); pte = pte_mkold(pte); if (!userfaultfd_wp(dst_vma)) pte = pte_clear_uffd_wp(pte); set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); } /* * Copy one present PTE, trying to batch-process subsequent PTEs that map * consecutive pages of the same folio by copying them as well. * * Returns -EAGAIN if one preallocated page is required to copy the next PTE. * Otherwise, returns the number of copied PTEs (at least 1). */ static inline int copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, int max_nr, int *rss, struct folio **prealloc) { struct page *page; struct folio *folio; bool any_writable; fpb_t flags = 0; int err, nr; page = vm_normal_page(src_vma, addr, pte); if (unlikely(!page)) goto copy_pte; folio = page_folio(page); /* * If we likely have to copy, just don't bother with batching. Make * sure that the common "small folio" case is as fast as possible * by keeping the batching logic separate. */ if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { if (src_vma->vm_flags & VM_SHARED) flags |= FPB_IGNORE_DIRTY; if (!vma_soft_dirty_enabled(src_vma)) flags |= FPB_IGNORE_SOFT_DIRTY; nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, &any_writable, NULL, NULL); folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, nr, src_vma))) { folio_ref_sub(folio, nr); return -EAGAIN; } rss[MM_ANONPAGES] += nr; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); } else { folio_dup_file_rmap_ptes(folio, page, nr); rss[mm_counter_file(folio)] += nr; } if (any_writable) pte = pte_mkwrite(pte, src_vma); __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, nr); return nr; } folio_get(folio); if (folio_test_anon(folio)) { /* * If this page may have been pinned by the parent process, * copy the page immediately for the child so that we'll always * guarantee the pinned page won't be randomly replaced in the * future. */ if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, addr, rss, prealloc, page); return err ? err : 1; } rss[MM_ANONPAGES]++; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); } else { folio_dup_file_rmap_pte(folio, page); rss[mm_counter_file(folio)]++; } copy_pte: __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); return 1; } static inline struct folio *folio_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, unsigned long addr, bool need_zero) { struct folio *new_folio; if (need_zero) new_folio = vma_alloc_zeroed_movable_folio(vma, addr); else new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); if (!new_folio) return NULL; if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) { folio_put(new_folio); return NULL; } folio_throttle_swaprate(new_folio, GFP_KERNEL); return new_folio; } static int copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; pmd_t dummy_pmdval; pte_t ptent; spinlock_t *src_ptl, *dst_ptl; int progress, max_nr, ret = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; struct folio *prealloc = NULL; int nr; again: progress = 0; init_rss_vec(rss); /* * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the * error handling here, assume that exclusive mmap_lock on dst and src * protects anon from unexpected THP transitions; with shmem and file * protected by mmap_lock-less collapse skipping areas with anon_vma * (whereas vma_needs_copy() skips areas without anon_vma). A rework * can remove such assumptions later, but this is good enough for now. */ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) { ret = -ENOMEM; goto out; } /* * We already hold the exclusive mmap_lock, the copy_pte_range() and * retract_page_tables() are using vma->anon_vma to be exclusive, so * the PTE page is stable, and there is no need to get pmdval and do * pmd_same() check. */ src_pte = pte_offset_map_rw_nolock(src_mm, src_pmd, addr, &dummy_pmdval, &src_ptl); if (!src_pte) { pte_unmap_unlock(dst_pte, dst_ptl); /* ret == 0 */ goto out; } spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; arch_enter_lazy_mmu_mode(); do { nr = 1; /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. */ if (progress >= 32) { progress = 0; if (need_resched() || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } ptent = ptep_get(src_pte); if (pte_none(ptent)) { progress++; continue; } if (unlikely(!pte_present(ptent))) { ret = copy_nonpresent_pte(dst_mm, src_mm, dst_pte, src_pte, dst_vma, src_vma, addr, rss); if (ret == -EIO) { entry = pte_to_swp_entry(ptep_get(src_pte)); break; } else if (ret == -EBUSY) { break; } else if (!ret) { progress += 8; continue; } ptent = ptep_get(src_pte); VM_WARN_ON_ONCE(!pte_present(ptent)); /* * Device exclusive entry restored, continue by copying * the now present pte. */ WARN_ON_ONCE(ret != -ENOENT); } /* copy_present_ptes() will clear `*prealloc' if consumed */ max_nr = (end - addr) / PAGE_SIZE; ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, ptent, addr, max_nr, rss, &prealloc); /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. * If copy failed due to hwpoison in source page, break out. */ if (unlikely(ret == -EAGAIN || ret == -EHWPOISON)) break; if (unlikely(prealloc)) { /* * pre-alloc page cannot be reused by next time so as * to strictly follow mempolicy (e.g., alloc_page_vma() * will allocate page according to address). This * could only happen if one pinned pte changed. */ folio_put(prealloc); prealloc = NULL; } nr = ret; progress += 8 * nr; } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(orig_src_pte, src_ptl); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); if (ret == -EIO) { VM_WARN_ON_ONCE(!entry.val); if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { ret = -ENOMEM; goto out; } entry.val = 0; } else if (ret == -EBUSY || unlikely(ret == -EHWPOISON)) { goto out; } else if (ret == -EAGAIN) { prealloc = folio_prealloc(src_mm, src_vma, addr, false); if (!prealloc) return -ENOMEM; } else if (ret < 0) { VM_WARN_ON_ONCE(1); } /* We've captured and resolved the error. Reset, try again. */ ret = 0; if (addr != end) goto again; out: if (unlikely(prealloc)) folio_put(prealloc); return ret; } static inline int copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; pmd_t *src_pmd, *dst_pmd; unsigned long next; dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); if (!dst_pmd) return -ENOMEM; src_pmd = pmd_offset(src_pud, addr); do { next = pmd_addr_end(addr, end); if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) { int err; VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, addr, dst_vma, src_vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, addr, next)) return -ENOMEM; } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; } static inline int copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; pud_t *src_pud, *dst_pud; unsigned long next; dst_pud = pud_alloc(dst_mm, dst_p4d, addr); if (!dst_pud) return -ENOMEM; src_pud = pud_offset(src_p4d, addr); do { next = pud_addr_end(addr, end); if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { int err; VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); err = copy_huge_pud(dst_mm, src_mm, dst_pud, src_pud, addr, src_vma); if (err == -ENOMEM) return -ENOMEM; if (!err) continue; /* fall through */ } if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, addr, next)) return -ENOMEM; } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; } static inline int copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, unsigned long end) { struct mm_struct *dst_mm = dst_vma->vm_mm; p4d_t *src_p4d, *dst_p4d; unsigned long next; dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); if (!dst_p4d) return -ENOMEM; src_p4d = p4d_offset(src_pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(src_p4d)) continue; if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, addr, next)) return -ENOMEM; } while (dst_p4d++, src_p4d++, addr = next, addr != end); return 0; } /* * Return true if the vma needs to copy the pgtable during this fork(). Return * false when we can speed up fork() by allowing lazy page faults later until * when the child accesses the memory range. */ static bool vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { /* * Always copy pgtables when dst_vma has uffd-wp enabled even if it's * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable * contains uffd-wp protection information, that's something we can't * retrieve from page cache, and skip copying will lose those info. */ if (userfaultfd_wp(dst_vma)) return true; if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) return true; if (src_vma->anon_vma) return true; /* * Don't copy ptes where a page fault will fill them correctly. Fork * becomes much lighter when there are big shared or private readonly * mappings. The tradeoff is that copy_page_range is more efficient * than faulting. */ return false; } int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) { pgd_t *src_pgd, *dst_pgd; unsigned long next; unsigned long addr = src_vma->vm_start; unsigned long end = src_vma->vm_end; struct mm_struct *dst_mm = dst_vma->vm_mm; struct mm_struct *src_mm = src_vma->vm_mm; struct mmu_notifier_range range; bool is_cow; int ret; if (!vma_needs_copy(dst_vma, src_vma)) return 0; if (is_vm_hugetlb_page(src_vma)) return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { /* * We do not free on error cases below as remove_vma * gets called on error from higher level routine */ ret = track_pfn_copy(src_vma); if (ret) return ret; } /* * We need to invalidate the secondary MMU mappings only when * there could be a permission downgrade on the ptes of the * parent mm. And a permission downgrade will only happen if * is_cow_mapping() returns true. */ is_cow = is_cow_mapping(src_vma->vm_flags); if (is_cow) { mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, src_mm, addr, end); mmu_notifier_invalidate_range_start(&range); /* * Disabling preemption is not needed for the write side, as * the read side doesn't spin, but goes to the mmap_lock. * * Use the raw variant of the seqcount_t write API to avoid * lockdep complaining about preemptibility. */ vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src_mm->write_protect_seq); } ret = 0; dst_pgd = pgd_offset(dst_mm, addr); src_pgd = pgd_offset(src_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, addr, next))) { untrack_pfn_clear(dst_vma); ret = -ENOMEM; break; } } while (dst_pgd++, src_pgd++, addr = next, addr != end); if (is_cow) { raw_write_seqcount_end(&src_mm->write_protect_seq); mmu_notifier_invalidate_range_end(&range); } return ret; } /* Whether we should zap all COWed (private) pages too */ static inline bool should_zap_cows(struct zap_details *details) { /* By default, zap all pages */ if (!details) return true; /* Or, we zap COWed pages only if the caller wants to */ return details->even_cows; } /* Decides whether we should zap this folio with the folio pointer specified */ static inline bool should_zap_folio(struct zap_details *details, struct folio *folio) { /* If we can make a decision without *folio.. */ if (should_zap_cows(details)) return true; /* Otherwise we should only zap non-anon folios */ return !folio_test_anon(folio); } static inline bool zap_drop_markers(struct zap_details *details) { if (!details) return false; return details->zap_flags & ZAP_FLAG_DROP_MARKER; } /* * This function makes sure that we'll replace the none pte with an uffd-wp * swap special pte marker when necessary. Must be with the pgtable lock held. */ static inline void zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, int nr, struct zap_details *details, pte_t pteval) { /* Zap on anonymous always means dropping everything */ if (vma_is_anonymous(vma)) return; if (zap_drop_markers(details)) return; for (;;) { /* the PFN in the PTE is irrelevant. */ pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); if (--nr == 0) break; pte++; addr += PAGE_SIZE; } } static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, struct folio *folio, struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, unsigned long addr, struct zap_details *details, int *rss, bool *force_flush, bool *force_break) { struct mm_struct *mm = tlb->mm; bool delay_rmap = false; if (!folio_test_anon(folio)) { ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); if (pte_dirty(ptent)) { folio_mark_dirty(folio); if (tlb_delay_rmap(tlb)) { delay_rmap = true; *force_flush = true; } } if (pte_young(ptent) && likely(vma_has_recency(vma))) folio_mark_accessed(folio); rss[mm_counter(folio)] -= nr; } else { /* We don't need up-to-date accessed/dirty bits. */ clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); rss[MM_ANONPAGES] -= nr; } /* Checking a single PTE in a batch is sufficient. */ arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entries(tlb, pte, nr, addr); if (unlikely(userfaultfd_pte_wp(vma, ptent))) zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); if (!delay_rmap) { folio_remove_rmap_ptes(folio, page, nr, vma); if (unlikely(folio_mapcount(folio) < 0)) print_bad_pte(vma, addr, ptent, page); } if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { *force_flush = true; *force_break = true; } } /* * Zap or skip at least one present PTE, trying to batch-process subsequent * PTEs that map consecutive pages of the same folio. * * Returns the number of processed (skipped or zapped) PTEs (at least 1). */ static inline int zap_present_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsigned int max_nr, unsigned long addr, struct zap_details *details, int *rss, bool *force_flush, bool *force_break) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; struct mm_struct *mm = tlb->mm; struct folio *folio; struct page *page; int nr; page = vm_normal_page(vma, addr, ptent); if (!page) { /* We don't need up-to-date accessed/dirty bits. */ ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entry(tlb, pte, addr); if (userfaultfd_pte_wp(vma, ptent)) zap_install_uffd_wp_if_needed(vma, addr, pte, 1, details, ptent); ksm_might_unmap_zero_page(mm, ptent); return 1; } folio = page_folio(page); if (unlikely(!should_zap_folio(details, folio))) return 1; /* * Make sure that the common "small folio" case is as fast as possible * by keeping the batching logic separate. */ if (unlikely(folio_test_large(folio) && max_nr != 1)) { nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, NULL, NULL, NULL); zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, addr, details, rss, force_flush, force_break); return nr; } zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, details, rss, force_flush, force_break); return 1; } static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) { bool force_flush = false, force_break = false; struct mm_struct *mm = tlb->mm; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; swp_entry_t entry; int nr; tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte) return addr; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); struct folio *folio; struct page *page; int max_nr; nr = 1; if (pte_none(ptent)) continue; if (need_resched()) break; if (pte_present(ptent)) { max_nr = (end - addr) / PAGE_SIZE; nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, details, rss, &force_flush, &force_break); if (unlikely(force_break)) { addr += nr * PAGE_SIZE; break; } continue; } entry = pte_to_swp_entry(ptent); if (is_device_private_entry(entry) || is_device_exclusive_entry(entry)) { page = pfn_swap_entry_to_page(entry); folio = page_folio(page); if (unlikely(!should_zap_folio(details, folio))) continue; /* * Both device private/exclusive mappings should only * work with anonymous page so far, so we don't need to * consider uffd-wp bit when zap. For more information, * see zap_install_uffd_wp_if_needed(). */ WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(folio)]--; if (is_device_private_entry(entry)) folio_remove_rmap_pte(folio, page, vma); folio_put(folio); } else if (!non_swap_entry(entry)) { max_nr = (end - addr) / PAGE_SIZE; nr = swap_pte_batch(pte, max_nr, ptent); /* Genuine swap entries, hence a private anon pages */ if (!should_zap_cows(details)) continue; rss[MM_SWAPENTS] -= nr; free_swap_and_cache_nr(entry, nr); } else if (is_migration_entry(entry)) { folio = pfn_swap_entry_folio(entry); if (!should_zap_folio(details, folio)) continue; rss[mm_counter(folio)]--; } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only * drop the marker if explicitly requested. */ if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) continue; } else if (is_guard_swp_entry(entry)) { /* * Ordinary zapping should not remove guard PTE * markers. Only do so if we should remove PTE markers * in general. */ if (!zap_drop_markers(details)) continue; } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { if (!should_zap_cows(details)) continue; } else { /* We should have covered all the swap entry types */ pr_alert("unrecognized swap entry 0x%lx\n", entry.val); WARN_ON_ONCE(1); } clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); /* Do the actual TLB flush before dropping ptl */ if (force_flush) { tlb_flush_mmu_tlbonly(tlb); tlb_flush_rmaps(tlb, vma); } pte_unmap_unlock(start_pte, ptl); /* * If we forced a TLB flush (either due to running out of * batch buffers or because we needed to flush dirty TLB * entries before releasing the ptl), free the batched * memory too. Come back again if we didn't do everything. */ if (force_flush) tlb_flush_mmu(tlb); return addr; } static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); else if (zap_huge_pmd(tlb, vma, pmd, addr)) { addr = next; continue; } /* fall through */ } else if (details && details->single_folio && folio_test_pmd_mappable(details->single_folio) && next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { spinlock_t *ptl = pmd_lock(tlb->mm, pmd); /* * Take and drop THP pmd lock so that we cannot return * prematurely, while zap_huge_pmd() has cleared *pmd, * but not yet decremented compound_mapcount(). */ spin_unlock(ptl); } if (pmd_none(*pmd)) { addr = next; continue; } addr = zap_pte_range(tlb, vma, pmd, addr, next, details); if (addr != next) pmd--; } while (pmd++, cond_resched(), addr != end); return addr; } static inline unsigned long zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, struct zap_details *details) { pud_t *pud; unsigned long next; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_trans_huge(*pud) || pud_devmap(*pud)) { if (next - addr != HPAGE_PUD_SIZE) { mmap_assert_locked(tlb->mm); split_huge_pud(vma, pud, addr); } else if (zap_huge_pud(tlb, vma, pud, addr)) goto next; /* fall through */ } if (pud_none_or_clear_bad(pud)) continue; next = zap_pmd_range(tlb, vma, pud, addr, next, details); next: cond_resched(); } while (pud++, addr = next, addr != end); return addr; } static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) { p4d_t *p4d; unsigned long next; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; next = zap_pud_range(tlb, vma, p4d, addr, next, details); } while (p4d++, addr = next, addr != end); return addr; } void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; next = zap_p4d_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); } static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details, bool mm_wr_locked) { unsigned long start = max(vma->vm_start, start_addr); unsigned long end; if (start >= vma->vm_end) return; end = min(vma->vm_end, end_addr); if (end <= vma->vm_start) return; if (vma->vm_file) uprobe_munmap(vma, start, end); if (unlikely(vma->vm_flags & VM_PFNMAP)) untrack_pfn(vma, 0, 0, mm_wr_locked); if (start != end) { if (unlikely(is_vm_hugetlb_page(vma))) { /* * It is undesirable to test vma->vm_file as it * should be non-null for valid hugetlb area. * However, vm_file will be NULL in the error * cleanup path of mmap_region. When * hugetlbfs ->mmap method fails, * mmap_region() nullifies vma->vm_file * before calling this function to clean up. * Since no pte has actually been setup, it is * safe to do nothing in this case. */ if (vma->vm_file) { zap_flags_t zap_flags = details ? details->zap_flags : 0; __unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags); } } else unmap_page_range(tlb, vma, start, end, details); } } /** * unmap_vmas - unmap a range of memory covered by a list of vma's * @tlb: address of the caller's struct mmu_gather * @mas: the maple state * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping * @tree_end: The maximum index to check * @mm_wr_locked: lock flag * * Unmap all pages in the vma list. * * Only addresses between `start' and `end' will be unmapped. * * The VMA list must be sorted in ascending virtual address order. * * unmap_vmas() assumes that the caller will flush the whole unmapped address * range after unmap_vmas() returns. So the only responsibility here is to * ensure that any thus-far unmapped pages are flushed before unmap_vmas() * drops the lock and schedules. */ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, unsigned long tree_end, bool mm_wr_locked) { struct mmu_notifier_range range; struct zap_details details = { .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP, /* Careful - we need to zap private pages too! */ .even_cows = true, }; mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(&range); do { unsigned long start = start_addr; unsigned long end = end_addr; hugetlb_zap_begin(vma, &start, &end); unmap_single_vma(tlb, vma, start, end, &details, mm_wr_locked); hugetlb_zap_end(vma, &details); vma = mas_find(mas, tree_end - 1); } while (vma && likely(!xa_is_zero(vma))); mmu_notifier_invalidate_range_end(&range); } /** * zap_page_range_single - remove user pages in a given range * @vma: vm_area_struct holding the applicable pages * @address: starting address of pages to zap * @size: number of bytes to zap * @details: details of shared cache invalidation * * The range must fit into one VMA. */ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) { const unsigned long end = address + size; struct mmu_notifier_range range; struct mmu_gather tlb; lru_add_drain(); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, end); hugetlb_zap_begin(vma, &range.start, &range.end); tlb_gather_mmu(&tlb, vma->vm_mm); update_hiwater_rss(vma->vm_mm); mmu_notifier_invalidate_range_start(&range); /* * unmap 'address-end' not 'range.start-range.end' as range * could have been expanded for hugetlb pmd sharing. */ unmap_single_vma(&tlb, vma, address, end, details, false); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb); hugetlb_zap_end(vma, details); } /** * zap_vma_ptes - remove ptes mapping the vma * @vma: vm_area_struct holding ptes to be zapped * @address: starting address of pages to zap * @size: number of bytes to zap * * This function only unmaps ptes assigned to VM_PFNMAP vmas. * * The entire address range must be fully contained within the vma. * */ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { if (!range_in_vma(vma, address, address + size) || !(vma->vm_flags & VM_PFNMAP)) return; zap_page_range_single(vma, address, size, NULL); } EXPORT_SYMBOL_GPL(zap_vma_ptes); static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); if (!pud) return NULL; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; VM_BUG_ON(pmd_trans_huge(*pmd)); return pmd; } pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pmd_t *pmd = walk_to_pmd(mm, addr); if (!pmd) return NULL; return pte_alloc_map_lock(mm, pmd, addr, ptl); } static bool vm_mixed_zeropage_allowed(struct vm_area_struct *vma) { VM_WARN_ON_ONCE(vma->vm_flags & VM_PFNMAP); /* * Whoever wants to forbid the zeropage after some zeropages * might already have been mapped has to scan the page tables and * bail out on any zeropages. Zeropages in COW mappings can * be unshared using FAULT_FLAG_UNSHARE faults. */ if (mm_forbids_zeropage(vma->vm_mm)) return false; /* zeropages in COW mappings are common and unproblematic. */ if (is_cow_mapping(vma->vm_flags)) return true; /* Mappings that do not allow for writable PTEs are unproblematic. */ if (!(vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) return true; /* * Why not allow any VMA that has vm_ops->pfn_mkwrite? GUP could * find the shared zeropage and longterm-pin it, which would * be problematic as soon as the zeropage gets replaced by a different * page due to vma->vm_ops->pfn_mkwrite, because what's mapped would * now differ to what GUP looked up. FSDAX is incompatible to * FOLL_LONGTERM and VM_IO is incompatible to GUP completely (see * check_vma_flags). */ return vma->vm_ops && vma->vm_ops->pfn_mkwrite && (vma_is_fsdax(vma) || vma->vm_flags & VM_IO); } static int validate_page_before_insert(struct vm_area_struct *vma, struct page *page) { struct folio *folio = page_folio(page); if (!folio_ref_count(folio)) return -EINVAL; if (unlikely(is_zero_folio(folio))) { if (!vm_mixed_zeropage_allowed(vma)) return -EINVAL; return 0; } if (folio_test_anon(folio) || folio_test_slab(folio) || page_has_type(page)) return -EINVAL; flush_dcache_folio(folio); return 0; } static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { struct folio *folio = page_folio(page); pte_t pteval; if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ pteval = mk_pte(page, prot); if (unlikely(is_zero_folio(folio))) { pteval = pte_mkspecial(pteval); } else { folio_get(folio); inc_mm_counter(vma->vm_mm, mm_counter_file(folio)); folio_add_file_rmap_pte(folio, page, vma); } set_pte_at(vma->vm_mm, addr, pte, pteval); return 0; } static int insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) { int retval; pte_t *pte; spinlock_t *ptl; retval = validate_page_before_insert(vma, page); if (retval) goto out; retval = -ENOMEM; pte = get_locked_pte(vma->vm_mm, addr, &ptl); if (!pte) goto out; retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); pte_unmap_unlock(pte, ptl); out: return retval; } static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { int err; err = validate_page_before_insert(vma, page); if (err) return err; return insert_page_into_pte_locked(vma, pte, addr, page, prot); } /* insert_pages() amortizes the cost of spinlock operations * when inserting pages in a loop. */ static int insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num, pgprot_t prot) { pmd_t *pmd = NULL; pte_t *start_pte, *pte; spinlock_t *pte_lock; struct mm_struct *const mm = vma->vm_mm; unsigned long curr_page_idx = 0; unsigned long remaining_pages_total = *num; unsigned long pages_to_write_in_pmd; int ret; more: ret = -EFAULT; pmd = walk_to_pmd(mm, addr); if (!pmd) goto out; pages_to_write_in_pmd = min_t(unsigned long, remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); /* Allocate the PTE if necessary; takes PMD lock once only. */ ret = -ENOMEM; if (pte_alloc(mm, pmd)) goto out; while (pages_to_write_in_pmd) { int pte_idx = 0; const int batch_size = min_t(int, pages_to_write_in_pmd, 8); start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); if (!start_pte) { ret = -EFAULT; goto out; } for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { int err = insert_page_in_batch_locked(vma, pte, addr, pages[curr_page_idx], prot); if (unlikely(err)) { pte_unmap_unlock(start_pte, pte_lock); ret = err; remaining_pages_total -= pte_idx; goto out; } addr += PAGE_SIZE; ++curr_page_idx; } pte_unmap_unlock(start_pte, pte_lock); pages_to_write_in_pmd -= batch_size; remaining_pages_total -= batch_size; } if (remaining_pages_total) goto more; ret = 0; out: *num = remaining_pages_total; return ret; } /** * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. * @vma: user vma to map to * @addr: target start user address of these pages * @pages: source kernel pages * @num: in: number of pages to map. out: number of pages that were *not* * mapped. (0 means all pages were successfully mapped). * * Preferred over vm_insert_page() when inserting multiple pages. * * In case of error, we may have mapped a subset of the provided * pages. It is the caller's responsibility to account for this case. * * The same restrictions apply as in vm_insert_page(). */ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num) { const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; if (addr < vma->vm_start || end_addr >= vma->vm_end) return -EFAULT; if (!(vma->vm_flags & VM_MIXEDMAP)) { BUG_ON(mmap_read_trylock(vma->vm_mm)); BUG_ON(vma->vm_flags & VM_PFNMAP); vm_flags_set(vma, VM_MIXEDMAP); } /* Defer page refcount checking till we're about to map that page. */ return insert_pages(vma, addr, pages, num, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_pages); /** * vm_insert_page - insert single page into user vma * @vma: user vma to map to * @addr: target user address of this page * @page: source kernel page * * This allows drivers to insert individual pages they've allocated * into a user vma. The zeropage is supported in some VMAs, * see vm_mixed_zeropage_allowed(). * * The page has to be a nice clean _individual_ kernel allocation. * If you allocate a compound page, you need to have marked it as * such (__GFP_COMP), or manually just split the page up yourself * (see split_page()). * * NOTE! Traditionally this was done with "remap_pfn_range()" which * took an arbitrary page protection parameter. This doesn't allow * that. Your vma protection will have to be set up correctly, which * means that if you want a shared writable mapping, you'd better * ask for a shared writable mapping! * * The page does not need to be reserved. * * Usually this function is called from f_op->mmap() handler * under mm->mmap_lock write-lock, so it can change vma->vm_flags. * Caller must set VM_MIXEDMAP on vma if it wants to call this * function from other places, for example from page-fault handler. * * Return: %0 on success, negative error code otherwise. */ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; if (!(vma->vm_flags & VM_MIXEDMAP)) { BUG_ON(mmap_read_trylock(vma->vm_mm)); BUG_ON(vma->vm_flags & VM_PFNMAP); vm_flags_set(vma, VM_MIXEDMAP); } return insert_page(vma, addr, page, vma->vm_page_prot); } EXPORT_SYMBOL(vm_insert_page); /* * __vm_map_pages - maps range of kernel pages into user vma * @vma: user vma to map to * @pages: pointer to array of source kernel pages * @num: number of pages in page array * @offset: user's requested vm_pgoff * * This allows drivers to map range of kernel pages into a user vma. * The zeropage is supported in some VMAs, see * vm_mixed_zeropage_allowed(). * * Return: 0 on success and error code otherwise. */ static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num, unsigned long offset) { unsigned long count = vma_pages(vma); unsigned long uaddr = vma->vm_start; int ret, i; /* Fail if the user requested offset is beyond the end of the object */ if (offset >= num) return -ENXIO; /* Fail if the user requested size exceeds available object size */ if (count > num - offset) return -ENXIO; for (i = 0; i < count; i++) { ret = vm_insert_page(vma, uaddr, pages[offset + i]); if (ret < 0) return ret; uaddr += PAGE_SIZE; } return 0; } /** * vm_map_pages - maps range of kernel pages starts with non zero offset * @vma: user vma to map to * @pages: pointer to array of source kernel pages * @num: number of pages in page array * * Maps an object consisting of @num pages, catering for the user's * requested vm_pgoff * * If we fail to insert any page into the vma, the function will return * immediately leaving any previously inserted pages present. Callers * from the mmap handler may immediately return the error as their caller * will destroy the vma, removing any successfully inserted pages. Other * callers should make their own arrangements for calling unmap_region(). * * Context: Process context. Called by mmap handlers. * Return: 0 on success and error code otherwise. */ int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num) { return __vm_map_pages(vma, pages, num, vma->vm_pgoff); } EXPORT_SYMBOL(vm_map_pages); /** * vm_map_pages_zero - map range of kernel pages starts with zero offset * @vma: user vma to map to * @pages: pointer to array of source kernel pages * @num: number of pages in page array * * Similar to vm_map_pages(), except that it explicitly sets the offset * to 0. This function is intended for the drivers that did not consider * vm_pgoff. * * Context: Process context. Called by mmap handlers. * Return: 0 on success and error code otherwise. */ int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, unsigned long num) { return __vm_map_pages(vma, pages, num, 0); } EXPORT_SYMBOL(vm_map_pages_zero); static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, pgprot_t prot, bool mkwrite) { struct mm_struct *mm = vma->vm_mm; pte_t *pte, entry; spinlock_t *ptl; pte = get_locked_pte(mm, addr, &ptl); if (!pte) return VM_FAULT_OOM; entry = ptep_get(pte); if (!pte_none(entry)) { if (mkwrite) { /* * For read faults on private mappings the PFN passed * in may not match the PFN we have mapped if the * mapped PFN is a writeable COW page. In the mkwrite * case we are creating a writable PTE for a shared * mapping and we expect the PFNs to match. If they * don't match, we are likely racing with block * allocation and mapping invalidation so just skip the * update. */ if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); goto out_unlock; } entry = pte_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, addr, pte, entry, 1)) update_mmu_cache(vma, addr, pte); } goto out_unlock; } /* Ok, finally just insert the thing.. */ if (pfn_t_devmap(pfn)) entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); else entry = pte_mkspecial(pfn_t_pte(pfn, prot)); if (mkwrite) { entry = pte_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); } set_pte_at(mm, addr, pte, entry); update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ out_unlock: pte_unmap_unlock(pte, ptl); return VM_FAULT_NOPAGE; } /** * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * @pgprot: pgprot flags for the inserted page * * This is exactly like vmf_insert_pfn(), except that it allows drivers * to override pgprot on a per-page basis. * * This only makes sense for IO mappings, and it makes no sense for * COW mappings. In general, using multiple vmas is preferable; * vmf_insert_pfn_prot should only be used if using multiple VMAs is * impractical. * * pgprot typically only differs from @vma->vm_page_prot when drivers set * caching- and encryption bits different than those of @vma->vm_page_prot, * because the caching- or encryption mode may not be known at mmap() time. * * This is ok as long as @vma->vm_page_prot is not used by the core vm * to set caching and encryption bits for those vmas (except for COW pages). * This is ensured by core vm only modifying these page table entries using * functions that don't touch caching- or encryption bits, using pte_modify() * if needed. (See for example mprotect()). * * Also when new page-table entries are created, this is only done using the * fault() callback, and never using the value of vma->vm_page_prot, * except for page-table entries that point to anonymous pages as the result * of COW. * * Context: Process context. May allocate using %GFP_KERNEL. * Return: vm_fault_t value. */ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot) { /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like * consistency in testing and feature parity among all, so we should * try to keep these invariants in place for everybody. */ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == (VM_PFNMAP|VM_MIXEDMAP)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; if (!pfn_modify_allowed(pfn, pgprot)) return VM_FAULT_SIGBUS; track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, false); } EXPORT_SYMBOL(vmf_insert_pfn_prot); /** * vmf_insert_pfn - insert single pfn into user vma * @vma: user vma to map to * @addr: target user address of this page * @pfn: source kernel pfn * * Similar to vm_insert_page, this allows drivers to insert individual pages * they've allocated into a user vma. Same comments apply. * * This function should only be called from a vm_ops->fault handler, and * in that case the handler should return the result of this function. * * vma cannot be a COW mapping. * * As this is called only for pages that do not currently exist, we * do not need to flush old virtual caches or the TLB. * * Context: Process context. May allocate using %GFP_KERNEL. * Return: vm_fault_t value. */ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); } EXPORT_SYMBOL(vmf_insert_pfn); static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn, bool mkwrite) { if (unlikely(is_zero_pfn(pfn_t_to_pfn(pfn))) && (mkwrite || !vm_mixed_zeropage_allowed(vma))) return false; /* these checks mirror the abort conditions in vm_normal_page */ if (vma->vm_flags & VM_MIXEDMAP) return true; if (pfn_t_devmap(pfn)) return true; if (pfn_t_special(pfn)) return true; if (is_zero_pfn(pfn_t_to_pfn(pfn))) return true; return false; } static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn, bool mkwrite) { pgprot_t pgprot = vma->vm_page_prot; int err; if (!vm_mixed_ok(vma, pfn, mkwrite)) return VM_FAULT_SIGBUS; if (addr < vma->vm_start || addr >= vma->vm_end) return VM_FAULT_SIGBUS; track_pfn_insert(vma, &pgprot, pfn); if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) return VM_FAULT_SIGBUS; /* * If we don't have pte special, then we have to use the pfn_valid() * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* * refcount the page if pfn_valid is true (hence insert_page rather * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP * without pte special, it would there be refcounted as a normal page. */ if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { struct page *page; /* * At this point we are committed to insert_page() * regardless of whether the caller specified flags that * result in pfn_t_has_page() == false. */ page = pfn_to_page(pfn_t_to_pfn(pfn)); err = insert_page(vma, addr, page, pgprot); } else { return insert_pfn(vma, addr, pfn, pgprot, mkwrite); } if (err == -ENOMEM) return VM_FAULT_OOM; if (err < 0 && err != -EBUSY) return VM_FAULT_SIGBUS; return VM_FAULT_NOPAGE; } vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { return __vm_insert_mixed(vma, addr, pfn, false); } EXPORT_SYMBOL(vmf_insert_mixed); /* * If the insertion of PTE failed because someone else already added a * different entry in the mean time, we treat that as success as we assume * the same entry was actually inserted. */ vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { return __vm_insert_mixed(vma, addr, pfn, true); } /* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pte_t *pte, *mapped_pte; spinlock_t *ptl; int err = 0; mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(ptep_get(pte))); if (!pfn_modify_allowed(pfn, prot)) { err = -EACCES; break; } set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(mapped_pte, ptl); return err; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pmd_t *pmd; unsigned long next; int err; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return -ENOMEM; VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); err = remap_pte_range(mm, pmd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (pmd++, addr = next, addr != end); return 0; } static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { pud_t *pud; unsigned long next; int err; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, p4d, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); err = remap_pmd_range(mm, pud, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (pud++, addr = next, addr != end); return 0; } static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) { p4d_t *p4d; unsigned long next; int err; pfn -= addr >> PAGE_SHIFT; p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); err = remap_pud_range(mm, p4d, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (p4d++, addr = next, addr != end); return 0; } static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { pgd_t *pgd; unsigned long next; unsigned long end = addr + PAGE_ALIGN(size); struct mm_struct *mm = vma->vm_mm; int err; if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) return -EINVAL; /* * Physically remapped pages are special. Tell the * rest of the world about it: * VM_IO tells people not to look at these pages * (accesses can have side effects). * VM_PFNMAP tells the core MM that the base pages are just * raw PFN mappings, and do not have a "struct page" associated * with them. * VM_DONTEXPAND * Disable vma merging and expanding with mremap(). * VM_DONTDUMP * Omit vma from core dump, even when VM_IO turned off. * * There's a horrible special case to handle copy-on-write * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". * See vm_normal_page() for details. */ if (is_cow_mapping(vma->vm_flags)) { if (addr != vma->vm_start || end != vma->vm_end) return -EINVAL; vma->vm_pgoff = pfn; } vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); BUG_ON(addr >= end); pfn -= addr >> PAGE_SHIFT; pgd = pgd_offset(mm, addr); flush_cache_range(vma, addr, end); do { next = pgd_addr_end(addr, end); err = remap_p4d_range(mm, pgd, addr, next, pfn + (addr >> PAGE_SHIFT), prot); if (err) return err; } while (pgd++, addr = next, addr != end); return 0; } /* * Variant of remap_pfn_range that does not call track_pfn_remap. The caller * must have pre-validated the caching bits of the pgprot_t. */ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { int error = remap_pfn_range_internal(vma, addr, pfn, size, prot); if (!error) return 0; /* * A partial pfn range mapping is dangerous: it does not * maintain page reference counts, and callers may free * pages due to the error. So zap it early. */ zap_page_range_single(vma, addr, size, NULL); return error; } /** * remap_pfn_range - remap kernel memory to userspace * @vma: user vma to map to * @addr: target page aligned user address to start at * @pfn: page frame number of kernel physical memory address * @size: size of mapping area * @prot: page protection flags for this mapping * * Note: this is only safe if the mm semaphore is held when called. * * Return: %0 on success, negative error code otherwise. */ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) { int err; err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); if (err) return -EINVAL; err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); if (err) untrack_pfn(vma, pfn, PAGE_ALIGN(size), true); return err; } EXPORT_SYMBOL(remap_pfn_range); /** * vm_iomap_memory - remap memory to userspace * @vma: user vma to map to * @start: start of the physical memory to be mapped * @len: size of area * * This is a simplified io_remap_pfn_range() for common driver use. The * driver just needs to give us the physical memory range to be mapped, * we'll figure out the rest from the vma information. * * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get * whatever write-combining details or similar. * * Return: %0 on success, negative error code otherwise. */ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) { unsigned long vm_len, pfn, pages; /* Check that the physical memory area passed in looks valid */ if (start + len < start) return -EINVAL; /* * You *really* shouldn't map things that aren't page-aligned, * but we've historically allowed it because IO memory might * just have smaller alignment. */ len += start & ~PAGE_MASK; pfn = start >> PAGE_SHIFT; pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; if (pfn + pages < pfn) return -EINVAL; /* We start the mapping 'vm_pgoff' pages into the area */ if (vma->vm_pgoff > pages) return -EINVAL; pfn += vma->vm_pgoff; pages -= vma->vm_pgoff; /* Can we fit all of the mapping? */ vm_len = vma->vm_end - vma->vm_start; if (vm_len >> PAGE_SHIFT > pages) return -EINVAL; /* Ok, let it rip */ return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); } EXPORT_SYMBOL(vm_iomap_memory); static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { pte_t *pte, *mapped_pte; int err = 0; spinlock_t *ptl; if (create) { mapped_pte = pte = (mm == &init_mm) ? pte_alloc_kernel_track(pmd, addr, mask) : pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) return -ENOMEM; } else { mapped_pte = pte = (mm == &init_mm) ? pte_offset_kernel(pmd, addr) : pte_offset_map_lock(mm, pmd, addr, &ptl); if (!pte) return -EINVAL; } arch_enter_lazy_mmu_mode(); if (fn) { do { if (create || !pte_none(ptep_get(pte))) { err = fn(pte++, addr, data); if (err) break; } } while (addr += PAGE_SIZE, addr != end); } *mask |= PGTBL_PTE_MODIFIED; arch_leave_lazy_mmu_mode(); if (mm != &init_mm) pte_unmap_unlock(mapped_pte, ptl); return err; } static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; int err = 0; BUG_ON(pud_leaf(*pud)); if (create) { pmd = pmd_alloc_track(mm, pud, addr, mask); if (!pmd) return -ENOMEM; } else { pmd = pmd_offset(pud, addr); } do { next = pmd_addr_end(addr, end); if (pmd_none(*pmd) && !create) continue; if (WARN_ON_ONCE(pmd_leaf(*pmd))) return -EINVAL; if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { if (!create) continue; pmd_clear_bad(pmd); } err = apply_to_pte_range(mm, pmd, addr, next, fn, data, create, mask); if (err) break; } while (pmd++, addr = next, addr != end); return err; } static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; int err = 0; if (create) { pud = pud_alloc_track(mm, p4d, addr, mask); if (!pud) return -ENOMEM; } else { pud = pud_offset(p4d, addr); } do { next = pud_addr_end(addr, end); if (pud_none(*pud) && !create) continue; if (WARN_ON_ONCE(pud_leaf(*pud))) return -EINVAL; if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { if (!create) continue; pud_clear_bad(pud); } err = apply_to_pmd_range(mm, pud, addr, next, fn, data, create, mask); if (err) break; } while (pud++, addr = next, addr != end); return err; } static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; int err = 0; if (create) { p4d = p4d_alloc_track(mm, pgd, addr, mask); if (!p4d) return -ENOMEM; } else { p4d = p4d_offset(pgd, addr); } do { next = p4d_addr_end(addr, end); if (p4d_none(*p4d) && !create) continue; if (WARN_ON_ONCE(p4d_leaf(*p4d))) return -EINVAL; if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { if (!create) continue; p4d_clear_bad(p4d); } err = apply_to_pud_range(mm, p4d, addr, next, fn, data, create, mask); if (err) break; } while (p4d++, addr = next, addr != end); return err; } static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data, bool create) { pgd_t *pgd; unsigned long start = addr, next; unsigned long end = addr + size; pgtbl_mod_mask mask = 0; int err = 0; if (WARN_ON(addr >= end)) return -EINVAL; pgd = pgd_offset(mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue; if (WARN_ON_ONCE(pgd_leaf(*pgd))) return -EINVAL; if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue; pgd_clear_bad(pgd); } err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask); if (err) break; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, start + size); return err; } /* * Scan a region of virtual memory, filling in page tables as necessary * and calling a provided function on each leaf page table. */ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { return __apply_to_page_range(mm, addr, size, fn, data, true); } EXPORT_SYMBOL_GPL(apply_to_page_range); /* * Scan a region of virtual memory, calling a provided function on * each leaf page table where it exists. * * Unlike apply_to_page_range, this does _not_ fill in page tables * where they are absent. */ int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { return __apply_to_page_range(mm, addr, size, fn, data, false); } EXPORT_SYMBOL_GPL(apply_to_existing_page_range); /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures * or configurations (e.g. i386 with PAE) which might give a mix of unmatched * parts, do_swap_page must check under lock before unmapping the pte and * proceeding (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). */ static inline int pte_unmap_same(struct vm_fault *vmf) { int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { spin_lock(vmf->ptl); same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); spin_unlock(vmf->ptl); } #endif pte_unmap(vmf->pte); vmf->pte = NULL; return same; } /* * Return: * 0: copied succeeded * -EHWPOISON: copy failed due to hwpoison in source page * -EAGAIN: copied failed (some other reason) */ static inline int __wp_page_copy_user(struct page *dst, struct page *src, struct vm_fault *vmf) { int ret; void *kaddr; void __user *uaddr; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; if (likely(src)) { if (copy_mc_user_highpage(dst, src, addr, vma)) return -EHWPOISON; return 0; } /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ kaddr = kmap_local_page(dst); pagefault_disable(); uaddr = (void __user *)(addr & PAGE_MASK); /* * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ vmf->pte = NULL; if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* * Other thread has already handled the fault * and update local tlb only */ if (vmf->pte) update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } entry = pte_mkyoung(vmf->orig_pte); if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1); } /* * This really shouldn't fail, because the page is there * in the page tables. But it might just be unreadable, * in which case we just give up and fill the result with * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { if (vmf->pte) goto warn; /* Re-validate under PTL if the page is still mapped */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ if (vmf->pte) update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } /* * The same page can be mapped back since last copy attempt. * Try to copy again under PTL. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { /* * Give a warn in case there can be some obscure * use-case */ warn: WARN_ON_ONCE(1); clear_page(kaddr); } } ret = 0; pte_unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); pagefault_enable(); kunmap_local(kaddr); flush_dcache_page(dst); return ret; } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) { struct file *vm_file = vma->vm_file; if (vm_file) return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; /* * Special mappings (e.g. VDSO) do not have any file so fake * a default GFP_KERNEL for them. */ return GFP_KERNEL; } /* * Notify the address space that the page is about to become writable so that * it can prohibit this or wait for the page to get into an appropriate state. * * We do this without the lock held, so that it can sleep if it needs to. */ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) { vm_fault_t ret; unsigned int old_flags = vmf->flags; vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; if (vmf->vma->vm_file && IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) return VM_FAULT_SIGBUS; ret = vmf->vma->vm_ops->page_mkwrite(vmf); /* Restore original flags so that caller is not surprised */ vmf->flags = old_flags; if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) return ret; if (unlikely(!(ret & VM_FAULT_LOCKED))) { folio_lock(folio); if (!folio->mapping) { folio_unlock(folio); return 0; /* retry */ } ret |= VM_FAULT_LOCKED; } else VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); return ret; } /* * Handle dirtying of a page in shared file mapping on a write fault. * * The function expects the page to be locked and unlocks it. */ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct address_space *mapping; struct folio *folio = page_folio(vmf->page); bool dirtied; bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; dirtied = folio_mark_dirty(folio); VM_BUG_ON_FOLIO(folio_test_anon(folio), folio); /* * Take a local copy of the address_space - folio.mapping may be zeroed * by truncate after folio_unlock(). The address_space itself remains * pinned by vma->vm_file's reference. We rely on folio_unlock()'s * release semantics to prevent the compiler from undoing this copying. */ mapping = folio_raw_mapping(folio); folio_unlock(folio); if (!page_mkwrite) file_update_time(vma->vm_file); /* * Throttle page dirtying rate down to writeback speed. * * mapping may be NULL here because some device drivers do not * set page.mapping but still dirty their pages * * Drop the mmap_lock before waiting on IO, if we can. The file * is pinning the mapping, as per above. */ if ((dirtied || page_mkwrite) && mapping) { struct file *fpin; fpin = maybe_unlock_mmap_for_io(vmf, NULL); balance_dirty_pages_ratelimited(mapping); if (fpin) { fput(fpin); return VM_FAULT_COMPLETED; } } return 0; } /* * Handle write page faults for pages that can be reused in the current vma * * This can happen either due to the mapping being with the VM_SHARED flag, * or due to us being the last reference standing to the page. In either * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; pte_t entry; VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); VM_WARN_ON(is_zero_pfn(pte_pfn(vmf->orig_pte))); if (folio) { VM_BUG_ON(folio_test_anon(folio) && !PageAnonExclusive(vmf->page)); /* * Clear the folio's cpupid information as the existing * information potentially belongs to a now completely * unrelated process. */ folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); pte_unmap_unlock(vmf->pte, vmf->ptl); count_vm_event(PGREUSE); } /* * We could add a bitflag somewhere, but for now, we know that all * vm_ops that have a ->map_pages have been audited and don't need * the mmap_lock to be held. */ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) return 0; vma_end_read(vma); return VM_FAULT_RETRY; } /** * __vmf_anon_prepare - Prepare to handle an anonymous fault. * @vmf: The vm_fault descriptor passed from the fault handler. * * When preparing to insert an anonymous page into a VMA from a * fault handler, call this function rather than anon_vma_prepare(). * If this vma does not already have an associated anon_vma and we are * only protected by the per-VMA lock, the caller must retry with the * mmap_lock held. __anon_vma_prepare() will look at adjacent VMAs to * determine if this VMA can share its anon_vma, and that's not safe to * do with only the per-VMA lock held for this VMA. * * Return: 0 if fault handling can proceed. Any other value should be * returned to the caller. */ vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret = 0; if (likely(vma->anon_vma)) return 0; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { if (!mmap_read_trylock(vma->vm_mm)) return VM_FAULT_RETRY; } if (__anon_vma_prepare(vma)) ret = VM_FAULT_OOM; if (vmf->flags & FAULT_FLAG_VMA_LOCK) mmap_read_unlock(vma->vm_mm); return ret; } /* * Handle the case of a page which we actually need to copy to a new page, * either due to COW or unsharing. * * Called with mmap_lock locked and the old page referenced, but * without the ptl held. * * High level logic flow: * * - Allocate a page, copy the content of the old page to the new one. * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. * - Take the PTL. If the pte changed, bail out and release the allocated page * - If the pte is still the way we remember it, update the page table and all * relevant references. This includes dropping the reference the page-table * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ static vm_fault_t wp_page_copy(struct vm_fault *vmf) { const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; struct folio *old_folio = NULL; struct folio *new_folio = NULL; pte_t entry; int page_copied = 0; struct mmu_notifier_range range; vm_fault_t ret; bool pfn_is_zero; delayacct_wpcopy_start(); if (vmf->page) old_folio = page_folio(vmf->page); ret = vmf_anon_prepare(vmf); if (unlikely(ret)) goto out; pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); if (!new_folio) goto oom; if (!pfn_is_zero) { int err; err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); if (err) { /* * COW failed, if the fault was solved by other, * it's fine. If not, userspace would re-fault on * the same address and we will handle the fault * from the second attempt. * The -EHWPOISON case will not be retried. */ folio_put(new_folio); if (old_folio) folio_put(old_folio); delayacct_wpcopy_end(); return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; } kmsan_copy_page_meta(&new_folio->page, vmf->page); } __folio_mark_uptodate(new_folio); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address & PAGE_MASK, (vmf->address & PAGE_MASK) + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); /* * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { if (old_folio) { if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(old_folio)); inc_mm_counter(mm, MM_ANONPAGES); } } else { ksm_might_unmap_zero_page(mm, vmf->orig_pte); inc_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = mk_pte(&new_folio->page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); if (unlikely(unshare)) { if (pte_soft_dirty(vmf->orig_pte)) entry = pte_mksoft_dirty(entry); if (pte_uffd_wp(vmf->orig_pte)) entry = pte_mkuffd_wp(entry); } else { entry = maybe_mkwrite(pte_mkdirty(entry), vma); } /* * Clear the pte entry and flush it first, before updating the * pte with the new entry, to keep TLBs on different CPUs in * sync. This code used to set the new PTE then flush TLBs, but * that left a window where the new PTE could be loaded into * some TLBs while the old PTE remains in others. */ ptep_clear_flush(vma, vmf->address, vmf->pte); folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, vma); BUG_ON(unshare && pte_write(entry)); set_pte_at(mm, vmf->address, vmf->pte, entry); update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); if (old_folio) { /* * Only after switching the pte to the new page may * we remove the mapcount here. Otherwise another * process may come and find the rmap count decremented * before the pte is switched to the new page, and * "reuse" the old page writing into it while our pte * here still points into it and can be read by other * threads. * * The critical issue is to order this * folio_remove_rmap_pte() with the ptp_clear_flush * above. Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative * in folio_remove_rmap_pte(); * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the * decremented mapcount is visible. And the old page * cannot be reused until after the decremented * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ folio_remove_rmap_pte(old_folio, vmf->page, vma); } /* Free the old page.. */ new_folio = old_folio; page_copied = 1; pte_unmap_unlock(vmf->pte, vmf->ptl); } else if (vmf->pte) { update_mmu_tlb(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); } mmu_notifier_invalidate_range_end(&range); if (new_folio) folio_put(new_folio); if (old_folio) { if (page_copied) free_swap_cache(old_folio); folio_put(old_folio); } delayacct_wpcopy_end(); return 0; oom: ret = VM_FAULT_OOM; out: if (old_folio) folio_put(old_folio); delayacct_wpcopy_end(); return ret; } /** * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE * writeable once the page is prepared * * @vmf: structure describing the fault * @folio: the folio of vmf->page * * This function handles all that is needed to finish a write page fault in a * shared mapping due to PTE being read-only once the mapped page is prepared. * It handles locking of PTE and modifying it. * * The function expects the page to be locked or other protection against * concurrent faults / writeback (such as DAX radix tree locks). * * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before * we acquired PTE lock. */ static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) return VM_FAULT_NOPAGE; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. */ if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; } wp_page_reuse(vmf, folio); return 0; } /* * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping */ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { vm_fault_t ret; pte_unmap_unlock(vmf->pte, vmf->ptl); ret = vmf_can_call_fault(vmf); if (ret) return ret; vmf->flags |= FAULT_FLAG_MKWRITE; ret = vma->vm_ops->pfn_mkwrite(vmf); if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; return finish_mkwrite_fault(vmf, NULL); } wp_page_reuse(vmf, NULL); return 0; } static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret = 0; folio_get(folio); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { vm_fault_t tmp; pte_unmap_unlock(vmf->pte, vmf->ptl); tmp = vmf_can_call_fault(vmf); if (tmp) { folio_put(folio); return tmp; } tmp = do_page_mkwrite(vmf, folio); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { folio_put(folio); return tmp; } tmp = finish_mkwrite_fault(vmf, folio); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { folio_unlock(folio); folio_put(folio); return tmp; } } else { wp_page_reuse(vmf, folio); folio_lock(folio); } ret |= fault_dirty_shared_page(vmf); folio_put(folio); return ret; } static bool wp_can_reuse_anon_folio(struct folio *folio, struct vm_area_struct *vma) { /* * We could currently only reuse a subpage of a large folio if no * other subpages of the large folios are still mapped. However, * let's just consistently not reuse subpages even if we could * reuse in that scenario, and give back a large folio a bit * sooner. */ if (folio_test_large(folio)) return false; /* * We have to verify under folio lock: these early checks are * just an optimization to avoid locking the folio and freeing * the swapcache if there is little hope that we can reuse. * * KSM doesn't necessarily raise the folio refcount. */ if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) return false; if (!folio_test_lru(folio)) /* * We cannot easily detect+handle references from * remote LRU caches or references to LRU folios. */ lru_add_drain(); if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) return false; if (!folio_trylock(folio)) return false; if (folio_test_swapcache(folio)) folio_free_swap(folio); if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { folio_unlock(folio); return false; } /* * Ok, we've got the only folio reference from our mapping * and the folio is locked, it's dark out, and we're wearing * sunglasses. Hit it. */ folio_move_anon_rmap(folio, vma); folio_unlock(folio); return true; } /* * This routine handles present pages, when * * users try to write to a shared page (FAULT_FLAG_WRITE) * * GUP wants to take a R/O pin on a possibly shared anonymous page * (FAULT_FLAG_UNSHARE) * * It is done by copying the page to a new address and decrementing the * shared-page counter for the old page. * * Note that this routine assumes that the protection checks have been * done by the caller (the low-level page fault routine in most cases). * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've * done any necessary COW. * * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even * though the page will change only once the write actually happens. This * avoids a few races, and potentially makes it more efficient. * * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_lock still held, but pte unmapped and unlocked. */ static vm_fault_t do_wp_page(struct vm_fault *vmf) __releases(vmf->ptl) { const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; struct vm_area_struct *vma = vmf->vma; struct folio *folio = NULL; pte_t pte; if (likely(!unshare)) { if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { if (!userfaultfd_wp_async(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_WP); } /* * Nothing needed (cache flush, TLB invalidations, * etc.) because we're only removing the uffd-wp bit, * which is completely invisible to the user. */ pte = pte_clear_uffd_wp(ptep_get(vmf->pte)); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); /* * Update this to be prepared for following up CoW * handling */ vmf->orig_pte = pte; } /* * Userfaultfd write-protect can defer flushes. Ensure the TLB * is flushed in this case before copying. */ if (unlikely(userfaultfd_wp(vmf->vma) && mm_tlb_flush_pending(vmf->vma->vm_mm))) flush_tlb_page(vmf->vma, vmf->address); } vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); if (vmf->page) folio = page_folio(vmf->page); /* * Shared mapping: we are guaranteed to have VM_WRITE and * FAULT_FLAG_WRITE set at this point. */ if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a * VM_PFNMAP VMA. * * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ if (!vmf->page) return wp_pfn_shared(vmf); return wp_page_shared(vmf, folio); } /* * Private mapping: create an exclusive anonymous page copy if reuse * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. * * If we encounter a page that is marked exclusive, we must reuse * the page without further checks. */ if (folio && folio_test_anon(folio) && (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { if (!PageAnonExclusive(vmf->page)) SetPageAnonExclusive(vmf->page); if (unlikely(unshare)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } wp_page_reuse(vmf, folio); return 0; } /* * Ok, we need to copy. Oh, well.. */ if (folio) folio_get(folio); pte_unmap_unlock(vmf->pte, vmf->ptl); #ifdef CONFIG_KSM if (folio && folio_test_ksm(folio)) count_vm_event(COW_KSM); #endif return wp_page_copy(vmf); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) { zap_page_range_single(vma, start_addr, end_addr - start_addr, details); } static inline void unmap_mapping_range_tree(struct rb_root_cached *root, pgoff_t first_index, pgoff_t last_index, struct zap_details *details) { struct vm_area_struct *vma; pgoff_t vba, vea, zba, zea; vma_interval_tree_foreach(vma, root, first_index, last_index) { vba = vma->vm_pgoff; vea = vba + vma_pages(vma) - 1; zba = max(first_index, vba); zea = min(last_index, vea); unmap_mapping_range_vma(vma, ((zba - vba) << PAGE_SHIFT) + vma->vm_start, ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, details); } } /** * unmap_mapping_folio() - Unmap single folio from processes. * @folio: The locked folio to be unmapped. * * Unmap this folio from any userspace process which still has it mmaped. * Typically, for efficiency, the range of nearby pages has already been * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once * truncation or invalidation holds the lock on a folio, it may find that * the page has been remapped again: and then uses unmap_mapping_folio() * to unmap it finally. */ void unmap_mapping_folio(struct folio *folio) { struct address_space *mapping = folio->mapping; struct zap_details details = { }; pgoff_t first_index; pgoff_t last_index; VM_BUG_ON(!folio_test_locked(folio)); first_index = folio->index; last_index = folio_next_index(folio) - 1; details.even_cows = false; details.single_folio = folio; details.zap_flags = ZAP_FLAG_DROP_MARKER; i_mmap_lock_read(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) unmap_mapping_range_tree(&mapping->i_mmap, first_index, last_index, &details); i_mmap_unlock_read(mapping); } /** * unmap_mapping_pages() - Unmap pages from processes. * @mapping: The address space containing pages to be unmapped. * @start: Index of first page to be unmapped. * @nr: Number of pages to be unmapped. 0 to unmap to end of file. * @even_cows: Whether to unmap even private COWed pages. * * Unmap the pages in this address space from any userspace process which * has them mmaped. Generally, you want to remove COWed pages as well when * a file is being truncated, but not when invalidating pages from the page * cache. */ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows) { struct zap_details details = { }; pgoff_t first_index = start; pgoff_t last_index = start + nr - 1; details.even_cows = even_cows; if (last_index < first_index) last_index = ULONG_MAX; i_mmap_lock_read(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) unmap_mapping_range_tree(&mapping->i_mmap, first_index, last_index, &details); i_mmap_unlock_read(mapping); } EXPORT_SYMBOL_GPL(unmap_mapping_pages); /** * unmap_mapping_range - unmap the portion of all mmaps in the specified * address_space corresponding to the specified byte range in the underlying * file. * * @mapping: the address space containing mmaps to be unmapped. * @holebegin: byte in first page to unmap, relative to the start of * the underlying file. This will be rounded down to a PAGE_SIZE * boundary. Note that this is different from truncate_pagecache(), which * must keep the partial page. In contrast, we must get rid of * partial pages. * @holelen: size of prospective hole in bytes. This will be rounded * up to a PAGE_SIZE boundary. A holelen of zero truncates to the * end of the file. * @even_cows: 1 when truncating a file, unmap even private COWed pages; * but 0 when invalidating pagecache, don't throw away private data. */ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) { pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Check for overflow. */ if (sizeof(holelen) > sizeof(hlen)) { long long holeend = (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; if (holeend & ~(long long)ULONG_MAX) hlen = ULONG_MAX - hba + 1; } unmap_mapping_pages(mapping, hba, hlen, even_cows); } EXPORT_SYMBOL(unmap_mapping_range); /* * Restore a potential device exclusive pte to a working pte entry */ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct vm_area_struct *vma = vmf->vma; struct mmu_notifier_range range; vm_fault_t ret; /* * We need a reference to lock the folio because we don't hold * the PTL so a racing thread can remove the device-exclusive * entry and unmap it. If the folio is free the entry must * have been removed already. If it happens to have already * been re-allocated after being freed all we do is lock and * unlock it. */ if (!folio_try_get(folio)) return 0; ret = folio_lock_or_retry(folio, vmf); if (ret) { folio_put(folio); return ret; } mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, vmf->address & PAGE_MASK, (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); mmu_notifier_invalidate_range_start(&range); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); folio_unlock(folio); folio_put(folio); mmu_notifier_invalidate_range_end(&range); return 0; } static inline bool should_try_to_free_swap(struct folio *folio, struct vm_area_struct *vma, unsigned int fault_flags) { if (!folio_test_swapcache(folio)) return false; if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || folio_test_mlocked(folio)) return true; /* * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * user. Try freeing the swapcache to get rid of the swapcache * reference only in case it's likely that we'll be the exlusive user. */ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && folio_ref_count(folio) == (1 + folio_nr_pages(folio)); } static vm_fault_t pte_marker_clear(struct vm_fault *vmf) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) return 0; /* * Be careful so that we will only recover a special uffd-wp pte into a * none pte. Otherwise it means the pte could have changed, so retry. * * This should also cover the case where e.g. the pte changed * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED. * So is_pte_marker() check is not enough to safely drop the pte. */ if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } static vm_fault_t do_pte_missing(struct vm_fault *vmf) { if (vma_is_anonymous(vmf->vma)) return do_anonymous_page(vmf); else return do_fault(vmf); } /* * This is actually a page-missing access, but with uffd-wp special pte * installed. It means this pte was wr-protected before being unmapped. */ static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) { /* * Just in case there're leftover special ptes even after the region * got unregistered - we can simply clear them. */ if (unlikely(!userfaultfd_wp(vmf->vma))) return pte_marker_clear(vmf); return do_pte_missing(vmf); } static vm_fault_t handle_pte_marker(struct vm_fault *vmf) { swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); unsigned long marker = pte_marker_get(entry); /* * PTE markers should never be empty. If anything weird happened, * the best thing to do is to kill the process along with its mm. */ if (WARN_ON_ONCE(!marker)) return VM_FAULT_SIGBUS; /* Higher priority than uffd-wp when data corrupted */ if (marker & PTE_MARKER_POISONED) return VM_FAULT_HWPOISON; /* Hitting a guard page is always a fatal condition. */ if (marker & PTE_MARKER_GUARD) return VM_FAULT_SIGSEGV; if (pte_marker_entry_uffd_wp(entry)) return pte_marker_handle_uffd_wp(vmf); /* This is an unknown pte marker */ return VM_FAULT_SIGBUS; } static struct folio *__alloc_swap_folio(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *folio; swp_entry_t entry; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vmf->address); if (!folio) return NULL; entry = pte_to_swp_entry(vmf->orig_pte); if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, GFP_KERNEL, entry)) { folio_put(folio); return NULL; } return folio; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) { struct swap_info_struct *si = swp_swap_info(entry); pgoff_t offset = swp_offset(entry); int i; /* * While allocating a large folio and doing swap_read_folio, which is * the case the being faulted pte doesn't have swapcache. We need to * ensure all PTEs have no cache as well, otherwise, we might go to * swap devices while the content is in swapcache. */ for (i = 0; i < max_nr; i++) { if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) return i; } return i; } /* * Check if the PTEs within a range are contiguous swap entries * and have consistent swapcache, zeromap. */ static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) { unsigned long addr; swp_entry_t entry; int idx; pte_t pte; addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); idx = (vmf->address - addr) / PAGE_SIZE; pte = ptep_get(ptep); if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) return false; entry = pte_to_swp_entry(pte); if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages) return false; /* * swap_read_folio() can't handle the case a large folio is hybridly * from different backends. And they are likely corner cases. Similar * things might be added once zswap support large folios. */ if (unlikely(swap_zeromap_batch(entry, nr_pages, NULL) != nr_pages)) return false; if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages)) return false; return true; } static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset, unsigned long addr, unsigned long orders) { int order, nr; order = highest_order(orders); /* * To swap in a THP with nr pages, we require that its first swap_offset * is aligned with that number, as it was when the THP was swapped out. * This helps filter out most invalid entries. */ while (orders) { nr = 1 << order; if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr) break; order = next_order(&orders, order); } return orders; } static struct folio *alloc_swap_folio(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; unsigned long orders; struct folio *folio; unsigned long addr; swp_entry_t entry; spinlock_t *ptl; pte_t *pte; gfp_t gfp; int order; /* * If uffd is active for the vma we need per-page fault fidelity to * maintain the uffd semantics. */ if (unlikely(userfaultfd_armed(vma))) goto fallback; /* * A large swapped out folio could be partially or fully in zswap. We * lack handling for such cases, so fallback to swapping in order-0 * folio. */ if (!zswap_never_enabled()) goto fallback; entry = pte_to_swp_entry(vmf->orig_pte); /* * Get a list of all the (large) orders below PMD_ORDER that are enabled * and suitable for swapping THP. */ orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1); orders = thp_vma_suitable_orders(vma, vmf->address, orders); orders = thp_swap_suitable_orders(swp_offset(entry), vmf->address, orders); if (!orders) goto fallback; pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address & PMD_MASK, &ptl); if (unlikely(!pte)) goto fallback; /* * For do_swap_page, find the highest order where the aligned range is * completely swap entries with contiguous swap offsets. */ order = highest_order(orders); while (orders) { addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) break; order = next_order(&orders, order); } pte_unmap_unlock(pte, ptl); /* Try allocating the highest of the remaining orders. */ gfp = vma_thp_gfp_mask(vma); while (orders) { addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); folio = vma_alloc_folio(gfp, order, vma, addr); if (folio) { if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, gfp, entry)) return folio; folio_put(folio); } order = next_order(&orders, order); } fallback: return __alloc_swap_folio(vmf); } #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ static struct folio *alloc_swap_folio(struct vm_fault *vmf) { return __alloc_swap_folio(vmf); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static DECLARE_WAIT_QUEUE_HEAD(swapcache_wq); /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with pte unmapped and unlocked. * * We return with the mmap_lock locked or unlocked in the same cases * as does filemap_fault(). */ vm_fault_t do_swap_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *swapcache, *folio = NULL; DECLARE_WAITQUEUE(wait, current); struct page *page; struct swap_info_struct *si = NULL; rmap_t rmap_flags = RMAP_NONE; bool need_clear_cache = false; bool exclusive = false; swp_entry_t entry; pte_t pte; vm_fault_t ret = 0; void *shadow = NULL; int nr_pages; unsigned long page_idx; unsigned long address; pte_t *ptep; if (!pte_unmap_same(vmf)) goto out; entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); } else if (is_device_exclusive_entry(entry)) { vmf->page = pfn_swap_entry_to_page(entry); ret = remove_device_exclusive_entry(vmf); } else if (is_device_private_entry(entry)) { if (vmf->flags & FAULT_FLAG_VMA_LOCK) { /* * migrate_to_ram is not yet ready to operate * under VMA lock. */ vma_end_read(vma); ret = VM_FAULT_RETRY; goto out; } vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) goto unlock; /* * Get a page reference while we know the page can't be * freed. */ get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); put_page(vmf->page); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else if (is_pte_marker_entry(entry)) { ret = handle_pte_marker(vmf); } else { print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; } /* Prevent swapoff from happening to us. */ si = get_swap_device(entry); if (unlikely(!si)) goto out; folio = swap_cache_get_folio(entry, vma, vmf->address); if (folio) page = folio_file_page(folio, swp_offset(entry)); swapcache = folio; if (!folio) { if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1) { /* skip swapcache */ folio = alloc_swap_folio(vmf); if (folio) { __folio_set_locked(folio); __folio_set_swapbacked(folio); nr_pages = folio_nr_pages(folio); if (folio_test_large(folio)) entry.val = ALIGN_DOWN(entry.val, nr_pages); /* * Prevent parallel swapin from proceeding with * the cache flag. Otherwise, another thread * may finish swapin first, free the entry, and * swapout reusing the same entry. It's * undetectable as pte_same() returns true due * to entry reuse. */ if (swapcache_prepare(entry, nr_pages)) { /* * Relax a bit to prevent rapid * repeated page faults. */ add_wait_queue(&swapcache_wq, &wait); schedule_timeout_uninterruptible(1); remove_wait_queue(&swapcache_wq, &wait); goto out_page; } need_clear_cache = true; mem_cgroup_swapin_uncharge_swap(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) workingset_refault(folio, shadow); folio_add_lru(folio); /* To provide entry to swap_read_folio() */ folio->swap = entry; swap_read_folio(folio, NULL); folio->private = NULL; } } else { folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf); swapcache = folio; } if (!folio) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) ret = VM_FAULT_OOM; goto unlock; } /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); page = folio_file_page(folio, swp_offset(entry)); } else if (PageHWPoison(page)) { /* * hwpoisoned dirty swapcache pages are kept for killing * owner processes (which may be unknown at hwpoison time) */ ret = VM_FAULT_HWPOISON; goto out_release; } ret |= folio_lock_or_retry(folio, vmf); if (ret & VM_FAULT_RETRY) goto out_release; if (swapcache) { /* * Make sure folio_free_swap() or swapoff did not release the * swapcache from under us. The page pin, and pte_same test * below, are not enough to exclude that. Even if it is still * swapcache, we need to check that the page's swap has not * changed. */ if (unlikely(!folio_test_swapcache(folio) || page_swap_entry(page).val != entry.val)) goto out_page; /* * KSM sometimes has to copy on read faults, for example, if * page->index of !PageKSM() pages would be nonlinear inside the * anon VMA -- PageKSM() is lost on actual swapout. */ folio = ksm_might_need_to_copy(folio, vma, vmf->address); if (unlikely(!folio)) { ret = VM_FAULT_OOM; folio = swapcache; goto out_page; } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { ret = VM_FAULT_HWPOISON; folio = swapcache; goto out_page; } if (folio != swapcache) page = folio_page(folio, 0); /* * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * owner. Try removing the extra reference from the local LRU * caches if required. */ if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && !folio_test_ksm(folio) && !folio_test_lru(folio)) lru_add_drain(); } folio_throttle_swaprate(folio, GFP_KERNEL); /* * Back out if somebody else already faulted in this pte. */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) goto out_nomap; if (unlikely(!folio_test_uptodate(folio))) { ret = VM_FAULT_SIGBUS; goto out_nomap; } /* allocated large folios for SWP_SYNCHRONOUS_IO */ if (folio_test_large(folio) && !folio_test_swapcache(folio)) { unsigned long nr = folio_nr_pages(folio); unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE); unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE; pte_t *folio_ptep = vmf->pte - idx; pte_t folio_pte = ptep_get(folio_ptep); if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || swap_pte_batch(folio_ptep, nr, folio_pte) != nr) goto out_nomap; page_idx = idx; address = folio_start; ptep = folio_ptep; goto check_folio; } nr_pages = 1; page_idx = 0; address = vmf->address; ptep = vmf->pte; if (folio_test_large(folio) && folio_test_swapcache(folio)) { int nr = folio_nr_pages(folio); unsigned long idx = folio_page_idx(folio, page); unsigned long folio_start = address - idx * PAGE_SIZE; unsigned long folio_end = folio_start + nr * PAGE_SIZE; pte_t *folio_ptep; pte_t folio_pte; if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) goto check_folio; if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) goto check_folio; folio_ptep = vmf->pte - idx; folio_pte = ptep_get(folio_ptep); if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || swap_pte_batch(folio_ptep, nr, folio_pte) != nr) goto check_folio; page_idx = idx; address = folio_start; ptep = folio_ptep; nr_pages = nr; entry = folio->swap; page = &folio->page; } check_folio: /* * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte * must never point at an anonymous page in the swapcache that is * PG_anon_exclusive. Sanity check that this holds and especially, that * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity * check after taking the PT lock and making sure that nobody * concurrently faulted in this page and set PG_anon_exclusive. */ BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); /* * Check under PT lock (to protect against concurrent fork() sharing * the swap entry concurrently) for certainly exclusive pages. */ if (!folio_test_ksm(folio)) { exclusive = pte_swp_exclusive(vmf->orig_pte); if (folio != swapcache) { /* * We have a fresh page that is not exposed to the * swapcache -> certainly exclusive. */ exclusive = true; } else if (exclusive && folio_test_writeback(folio) && data_race(si->flags & SWP_STABLE_WRITES)) { /* * This is tricky: not all swap backends support * concurrent page modifications while under writeback. * * So if we stumble over such a page in the swapcache * we must not set the page exclusive, otherwise we can * map it writable without further checks and modify it * while still under writeback. * * For these problematic swap backends, simply drop the * exclusive marker: this is perfectly fine as we start * writeback only if we fully unmapped the page and * there are no unexpected references on the page after * unmapping succeeded. After fully unmapped, no * further GUP references (FOLL_GET and FOLL_PIN) can * appear, so dropping the exclusive marker and mapping * it only R/O is fine. */ exclusive = false; } } /* * Some architectures may have to restore extra metadata to the page * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ arch_swap_restore(folio_swap(entry, folio), folio); /* * Remove the swap entry and conditionally try to free up the swapcache. * We're already holding a reference on the page but haven't mapped it * yet. */ swap_free_nr(entry, nr_pages); if (should_try_to_free_swap(folio, vma, vmf->flags)) folio_free_swap(folio); add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); pte = mk_pte(page, vma->vm_page_prot); if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); if (pte_swp_uffd_wp(vmf->orig_pte)) pte = pte_mkuffd_wp(pte); /* * Same logic as in do_wp_page(); however, optimize for pages that are * certainly not shared either because we just allocated them without * exposing them to the swapcache or because the swap entry indicates * exclusivity. */ if (!folio_test_ksm(folio) && (exclusive || folio_ref_count(folio) == 1)) { if ((vma->vm_flags & VM_WRITE) && !userfaultfd_pte_wp(vma, pte) && !pte_needs_soft_dirty_wp(vma, pte)) { pte = pte_mkwrite(pte, vma); if (vmf->flags & FAULT_FLAG_WRITE) { pte = pte_mkdirty(pte); vmf->flags &= ~FAULT_FLAG_WRITE; } } rmap_flags |= RMAP_EXCLUSIVE; } folio_ref_add(folio, nr_pages - 1); flush_icache_pages(vma, page, nr_pages); vmf->orig_pte = pte_advance_pfn(pte, page_idx); /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } else if (!folio_test_anon(folio)) { /* * We currently only expect small !anon folios which are either * fully exclusive or fully shared, or new allocated large * folios which are fully exclusive. If we ever get large * folios within swapcache here, we have to be careful. */ VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio)); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); folio_add_new_anon_rmap(folio, vma, address, rmap_flags); } else { folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || (pte_write(pte) && !PageAnonExclusive(page))); set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); arch_do_swap_page_nr(vma->vm_mm, vma, address, pte, pte, nr_pages); folio_unlock(folio); if (folio != swapcache && swapcache) { /* * Hold the lock to avoid the swap entry to be reused * until we take the PT lock for the pte_same() check * (to avoid false positives from pte_same). For * further safety release the lock after the swap_free * so that the swap count won't change under a * parallel locked swapcache. */ folio_unlock(swapcache); folio_put(swapcache); } if (vmf->flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(vmf); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); out: /* Clear the swap cache pin for direct swapin after PTL unlock */ if (need_clear_cache) { swapcache_clear(si, entry, nr_pages); if (waitqueue_active(&swapcache_wq)) wake_up(&swapcache_wq); } if (si) put_swap_device(si); return ret; out_nomap: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); out_page: folio_unlock(folio); out_release: folio_put(folio); if (folio != swapcache && swapcache) { folio_unlock(swapcache); folio_put(swapcache); } if (need_clear_cache) { swapcache_clear(si, entry, nr_pages); if (waitqueue_active(&swapcache_wq)) wake_up(&swapcache_wq); } if (si) put_swap_device(si); return ret; } static bool pte_range_none(pte_t *pte, int nr_pages) { int i; for (i = 0; i < nr_pages; i++) { if (!pte_none(ptep_get_lockless(pte + i))) return false; } return true; } static struct folio *alloc_anon_folio(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long orders; struct folio *folio; unsigned long addr; pte_t *pte; gfp_t gfp; int order; /* * If uffd is active for the vma we need per-page fault fidelity to * maintain the uffd semantics. */ if (unlikely(userfaultfd_armed(vma))) goto fallback; /* * Get a list of all the (large) orders below PMD_ORDER that are enabled * for this vma. Then filter out the orders that can't be allocated over * the faulting address and still be fully contained in the vma. */ orders = thp_vma_allowable_orders(vma, vma->vm_flags, TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1); orders = thp_vma_suitable_orders(vma, vmf->address, orders); if (!orders) goto fallback; pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); if (!pte) return ERR_PTR(-EAGAIN); /* * Find the highest order where the aligned range is completely * pte_none(). Note that all remaining orders will be completely * pte_none(). */ order = highest_order(orders); while (orders) { addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); if (pte_range_none(pte + pte_index(addr), 1 << order)) break; order = next_order(&orders, order); } pte_unmap(pte); if (!orders) goto fallback; /* Try allocating the highest of the remaining orders. */ gfp = vma_thp_gfp_mask(vma); while (orders) { addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); folio = vma_alloc_folio(gfp, order, vma, addr); if (folio) { if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); folio_put(folio); goto next; } folio_throttle_swaprate(folio, gfp); /* * When a folio is not zeroed during allocation * (__GFP_ZERO not used) or user folios require special * handling, folio_zero_user() is used to make sure * that the page corresponding to the faulting address * will be hot in the cache after zeroing. */ if (user_alloc_needs_zeroing()) folio_zero_user(folio, vmf->address); return folio; } next: count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); order = next_order(&orders, order); } fallback: #endif return folio_prealloc(vma->vm_mm, vma, vmf->address, true); } /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_lock still held, but pte unmapped and unlocked. */ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; unsigned long addr = vmf->address; struct folio *folio; vm_fault_t ret = 0; int nr_pages = 1; pte_t entry; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* * Use pte_alloc() instead of pte_alloc_map(), so that OOM can * be distinguished from a transient failure of pte_offset_map(). */ if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; /* Use the zero-page for reads */ if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!vmf->pte) goto unlock; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto unlock; } ret = check_stable_address_space(vma->vm_mm); if (ret) goto unlock; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_MISSING); } goto setpte; } /* Allocate our own private page. */ ret = vmf_anon_prepare(vmf); if (ret) return ret; /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ folio = alloc_anon_folio(vmf); if (IS_ERR(folio)) return 0; if (!folio) goto oom; nr_pages = folio_nr_pages(folio); addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); /* * The memory barrier inside __folio_mark_uptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __folio_mark_uptodate(folio); entry = mk_pte(&folio->page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry), vma); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); if (!vmf->pte) goto release; if (nr_pages == 1 && vmf_pte_changed(vmf)) { update_mmu_tlb(vma, addr, vmf->pte); goto release; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); goto release; } ret = check_stable_address_space(vma->vm_mm); if (ret) goto release; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); folio_put(folio); return handle_userfault(vmf, VM_UFFD_MISSING); } folio_ref_add(folio, nr_pages - 1); add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); setpte: if (vmf_orig_pte_uffd_wp(vmf)) entry = pte_mkuffd_wp(entry); set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); /* No need to invalidate - it was non-present before */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; release: folio_put(folio); goto unlock; oom: return VM_FAULT_OOM; } /* * The mmap_lock must have been held on entry, and may have been * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ static vm_fault_t __do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *folio; vm_fault_t ret; /* * Preallocate pte before we take page_lock because this might lead to * deadlocks for memcg reclaim which waits for pages under writeback: * lock_page(A) * SetPageWriteback(A) * unlock_page(A) * lock_page(B) * lock_page(B) * pte_alloc_one * shrink_folio_list * wait_on_page_writeback(A) * SetPageWriteback(B) * unlock_page(B) * # flush A, B to clear the writeback */ if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; } ret = vma->vm_ops->fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | VM_FAULT_DONE_COW))) return ret; folio = page_folio(vmf->page); if (unlikely(PageHWPoison(vmf->page))) { vm_fault_t poisonret = VM_FAULT_HWPOISON; if (ret & VM_FAULT_LOCKED) { if (page_mapped(vmf->page)) unmap_mapping_folio(folio); /* Retry if a clean folio was removed from the cache. */ if (mapping_evict_folio(folio->mapping, folio)) poisonret = VM_FAULT_NOPAGE; folio_unlock(folio); } folio_put(folio); vmf->page = NULL; return poisonret; } if (unlikely(!(ret & VM_FAULT_LOCKED))) folio_lock(folio); else VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page); return ret; } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void deposit_prealloc_pte(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); /* * We are going to consume the prealloc table, * count that as nr_ptes. */ mm_inc_nr_ptes(vma->vm_mm); vmf->prealloc_pte = NULL; } vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { struct folio *folio = page_folio(page); struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; pmd_t entry; vm_fault_t ret = VM_FAULT_FALLBACK; /* * It is too late to allocate a small folio, we already have a large * folio in the pagecache: especially s390 KVM cannot tolerate any * PMD mappings, but PTE-mapped THP are fine. So let's simply refuse any * PMD mappings if THPs are disabled. */ if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags)) return ret; if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; if (folio_order(folio) != HPAGE_PMD_ORDER) return ret; page = &folio->page; /* * Just backoff if any subpage of a THP is corrupted otherwise * the corrupted page may mapped by PMD silently to escape the * check. This kind of THP just can be PTE mapped. Access to * the corrupted subpage should trigger SIGBUS as expected. */ if (unlikely(folio_test_has_hwpoisoned(folio))) return ret; /* * Archs like ppc64 need additional space to store information * related to pte entry. Use the preallocated table for that. */ if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; } vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_none(*vmf->pmd))) goto out; flush_icache_pages(vma, page, HPAGE_PMD_NR); entry = mk_huge_pmd(page, vma->vm_page_prot); if (write) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); folio_add_file_rmap_pmd(folio, page, vma); /* * deposit and withdraw with pmd lock held */ if (arch_needs_pgtable_deposit()) deposit_prealloc_pte(vmf); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); update_mmu_cache_pmd(vma, haddr, vmf->pmd); /* fault is handled */ ret = 0; count_vm_event(THP_FILE_MAPPED); out: spin_unlock(vmf->ptl); return ret; } #else vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { return VM_FAULT_FALLBACK; } #endif /** * set_pte_range - Set a range of PTEs to point to pages in a folio. * @vmf: Fault decription. * @folio: The folio that contains @page. * @page: The first page to create a PTE for. * @nr: The number of PTEs to create. * @addr: The first address to create a PTE for. */ void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr) { struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE); pte_t entry; flush_icache_pages(vma, page, nr); entry = mk_pte(page, vma->vm_page_prot); if (prefault && arch_wants_old_prefaulted_pte()) entry = pte_mkold(entry); else entry = pte_sw_mkyoung(entry); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (unlikely(vmf_orig_pte_uffd_wp(vmf))) entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } else { folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); } static bool vmf_pte_changed(struct vm_fault *vmf) { if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); return !pte_none(ptep_get(vmf->pte)); } /** * finish_fault - finish page fault once we have prepared the page to fault * * @vmf: structure describing the fault * * This function handles all that is needed to finish a page fault once the * page to fault in is prepared. It handles locking of PTEs, inserts PTE for * given page, adds reverse page mapping, handles memcg charges and LRU * addition. * * The function expects the page to be locked and on success it consumes a * reference of a page being mapped (for the PTE which maps it). * * Return: %0 on success, %VM_FAULT_ code in case of error. */ vm_fault_t finish_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page; struct folio *folio; vm_fault_t ret; bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED); int type, nr_pages; unsigned long addr = vmf->address; /* Did we COW the page? */ if (is_cow) page = vmf->cow_page; else page = vmf->page; /* * check even for read faults because we might have lost our CoWed * page */ if (!(vma->vm_flags & VM_SHARED)) { ret = check_stable_address_space(vma->vm_mm); if (ret) return ret; } if (pmd_none(*vmf->pmd)) { if (PageTransCompound(page)) { ret = do_set_pmd(vmf, page); if (ret != VM_FAULT_FALLBACK) return ret; } if (vmf->prealloc_pte) pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) return VM_FAULT_OOM; } folio = page_folio(page); nr_pages = folio_nr_pages(folio); /* * Using per-page fault to maintain the uffd semantics, and same * approach also applies to non-anonymous-shmem faults to avoid * inflating the RSS of the process. */ if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { nr_pages = 1; } else if (nr_pages > 1) { pgoff_t idx = folio_page_idx(folio, page); /* The page offset of vmf->address within the VMA. */ pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; /* The index of the entry in the pagetable for fault page. */ pgoff_t pte_off = pte_index(vmf->address); /* * Fallback to per-page fault in case the folio size in page * cache beyond the VMA limits and PMD pagetable limits. */ if (unlikely(vma_off < idx || vma_off + (nr_pages - idx) > vma_pages(vma) || pte_off < idx || pte_off + (nr_pages - idx) > PTRS_PER_PTE)) { nr_pages = 1; } else { /* Now we can set mappings for the whole large folio. */ addr = vmf->address - idx * PAGE_SIZE; page = &folio->page; } } vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); if (!vmf->pte) return VM_FAULT_NOPAGE; /* Re-check under ptl */ if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { update_mmu_tlb(vma, addr, vmf->pte); ret = VM_FAULT_NOPAGE; goto unlock; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); ret = VM_FAULT_NOPAGE; goto unlock; } folio_ref_add(folio, nr_pages - 1); set_pte_range(vmf, folio, page, nr_pages, addr); type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); add_mm_counter(vma->vm_mm, type, nr_pages); ret = 0; unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; } static unsigned long fault_around_pages __read_mostly = 65536 >> PAGE_SHIFT; #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) { *val = fault_around_pages << PAGE_SHIFT; return 0; } /* * fault_around_bytes must be rounded down to the nearest page order as it's * what do_fault_around() expects to see. */ static int fault_around_bytes_set(void *data, u64 val) { if (val / PAGE_SIZE > PTRS_PER_PTE) return -EINVAL; /* * The minimum value is 1 page, however this results in no fault-around * at all. See should_fault_around(). */ val = max(val, PAGE_SIZE); fault_around_pages = rounddown_pow_of_two(val) >> PAGE_SHIFT; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); static int __init fault_around_debugfs(void) { debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, &fault_around_bytes_fops); return 0; } late_initcall(fault_around_debugfs); #endif /* * do_fault_around() tries to map few pages around the fault address. The hope * is that the pages will be needed soon and this will lower the number of * faults to handle. * * It uses vm_ops->map_pages() to map the pages, which skips the page if it's * not ready to be mapped: not up-to-date, locked, etc. * * This function doesn't cross VMA or page table boundaries, in order to call * map_pages() and acquire a PTE lock only once. * * fault_around_pages defines how many pages we'll try to map. * do_fault_around() expects it to be set to a power of two less than or equal * to PTRS_PER_PTE. * * The virtual address of the area that we map is naturally aligned to * fault_around_pages * PAGE_SIZE rounded down to the machine page size * (and therefore to page order). This way it's easier to guarantee * that we don't cross page table boundaries. */ static vm_fault_t do_fault_around(struct vm_fault *vmf) { pgoff_t nr_pages = READ_ONCE(fault_around_pages); pgoff_t pte_off = pte_index(vmf->address); /* The page offset of vmf->address within the VMA. */ pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; pgoff_t from_pte, to_pte; vm_fault_t ret; /* The PTE offset of the start address, clamped to the VMA. */ from_pte = max(ALIGN_DOWN(pte_off, nr_pages), pte_off - min(pte_off, vma_off)); /* The PTE offset of the end address, clamped to the VMA and PTE. */ to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE, pte_off + vma_pages(vmf->vma) - vma_off) - 1; if (pmd_none(*vmf->pmd)) { vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); if (!vmf->prealloc_pte) return VM_FAULT_OOM; } rcu_read_lock(); ret = vmf->vma->vm_ops->map_pages(vmf, vmf->pgoff + from_pte - pte_off, vmf->pgoff + to_pte - pte_off); rcu_read_unlock(); return ret; } /* Return true if we should do read fault-around, false otherwise */ static inline bool should_fault_around(struct vm_fault *vmf) { /* No ->map_pages? No way to fault around... */ if (!vmf->vma->vm_ops->map_pages) return false; if (uffd_disable_fault_around(vmf->vma)) return false; /* A single page implies no faulting 'around' at all. */ return fault_around_pages > 1; } static vm_fault_t do_read_fault(struct vm_fault *vmf) { vm_fault_t ret = 0; struct folio *folio; /* * Let's call ->map_pages() first and use ->fault() as fallback * if page by the offset is not ready to be mapped (cold cache or * something). */ if (should_fault_around(vmf)) { ret = do_fault_around(vmf); if (ret) return ret; } ret = vmf_can_call_fault(vmf); if (ret) return ret; ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; ret |= finish_fault(vmf); folio = page_folio(vmf->page); folio_unlock(folio); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) folio_put(folio); return ret; } static vm_fault_t do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *folio; vm_fault_t ret; ret = vmf_can_call_fault(vmf); if (!ret) ret = vmf_anon_prepare(vmf); if (ret) return ret; folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); if (!folio) return VM_FAULT_OOM; vmf->cow_page = &folio->page; ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (ret & VM_FAULT_DONE_COW) return ret; if (copy_mc_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma)) { ret = VM_FAULT_HWPOISON; goto unlock; } __folio_mark_uptodate(folio); ret |= finish_fault(vmf); unlock: unlock_page(vmf->page); put_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; return ret; uncharge_out: folio_put(folio); return ret; } static vm_fault_t do_shared_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; vm_fault_t ret, tmp; struct folio *folio; ret = vmf_can_call_fault(vmf); if (ret) return ret; ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; folio = page_folio(vmf->page); /* * Check if the backing address space wants to know that the page is * about to become writable */ if (vma->vm_ops->page_mkwrite) { folio_unlock(folio); tmp = do_page_mkwrite(vmf, folio); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { folio_put(folio); return tmp; } } ret |= finish_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) { folio_unlock(folio); folio_put(folio); return ret; } ret |= fault_dirty_shared_page(vmf); return ret; } /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults). * The mmap_lock may have been released depending on flags and our * return value. See filemap_fault() and __folio_lock_or_retry(). * If mmap_lock is released, vma may become invalid (for example * by other thread calling munmap()). */ static vm_fault_t do_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mm_struct *vm_mm = vma->vm_mm; vm_fault_t ret; /* * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) ret = VM_FAULT_SIGBUS; else { /* * Make sure this is not a temporary clearing of pte * by holding ptl and checking again. A R/M/W update * of pte involves: take ptl, clearing the pte so that * we don't have concurrent modification by hardware * followed by an update. */ if (unlikely(pte_none(ptep_get(vmf->pte)))) ret = VM_FAULT_SIGBUS; else ret = VM_FAULT_NOPAGE; pte_unmap_unlock(vmf->pte, vmf->ptl); } } else if (!(vmf->flags & FAULT_FLAG_WRITE)) ret = do_read_fault(vmf); else if (!(vma->vm_flags & VM_SHARED)) ret = do_cow_fault(vmf); else ret = do_shared_fault(vmf); /* preallocated pagetable is unused: free it */ if (vmf->prealloc_pte) { pte_free(vm_mm, vmf->prealloc_pte); vmf->prealloc_pte = NULL; } return ret; } int numa_migrate_check(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int *flags, bool writable, int *last_cpupid) { struct vm_area_struct *vma = vmf->vma; /* * Avoid grouping on RO pages in general. RO pages shouldn't hurt as * much anyway since they can be in shared cache state. This misses * the case where a mapping is writable but the process never writes * to it but pte_write gets cleared during protection updates and * pte_dirty has unpredictable behaviour between PTE scan updates, * background writeback, dirty balancing and application behaviour. */ if (!writable) *flags |= TNF_NO_GROUP; /* * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) *flags |= TNF_SHARED; /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ if (folio_use_access_time(folio)) *last_cpupid = (-1 & LAST_CPUPID_MASK); else *last_cpupid = folio_last_cpupid(folio); /* Record the current PID acceesing VMA */ vma_set_access_pid_bit(vma); count_vm_numa_event(NUMA_HINT_FAULTS); #ifdef CONFIG_NUMA_BALANCING count_memcg_folio_events(folio, NUMA_HINT_FAULTS, 1); #endif if (folio_nid(folio) == numa_node_id()) { count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); *flags |= TNF_FAULT_LOCAL; } return mpol_misplaced(folio, vmf, addr); } static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long fault_addr, pte_t *fault_pte, bool writable) { pte_t pte, old_pte; old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); pte = pte_modify(old_pte, vma->vm_page_prot); pte = pte_mkyoung(pte); if (writable) pte = pte_mkwrite(pte, vma); ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); } static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, struct folio *folio, pte_t fault_pte, bool ignore_writable, bool pte_write_upgrade) { int nr = pte_pfn(fault_pte) - folio_pfn(folio); unsigned long start, end, addr = vmf->address; unsigned long addr_start = addr - (nr << PAGE_SHIFT); unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); pte_t *start_ptep; /* Stay within the VMA and within the page table. */ start = max3(addr_start, pt_start, vma->vm_start); end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, vma->vm_end); start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); /* Restore all PTEs' mapping of the large folio */ for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { pte_t ptent = ptep_get(start_ptep); bool writable = false; if (!pte_present(ptent) || !pte_protnone(ptent)) continue; if (pfn_folio(pte_pfn(ptent)) != folio) continue; if (!ignore_writable) { ptent = pte_modify(ptent, vma->vm_page_prot); writable = pte_write(ptent); if (!writable && pte_write_upgrade && can_change_pte_writable(vma, addr, ptent)) writable = true; } numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); } } static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct folio *folio = NULL; int nid = NUMA_NO_NODE; bool writable = false, ignore_writable = false; bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); int last_cpupid; int target_nid; pte_t pte, old_pte; int flags = 0, nr_pages; /* * The pte cannot be used safely until we verify, while holding the page * table lock, that its contents have not changed during fault handling. */ spin_lock(vmf->ptl); /* Read the live PTE from the page tables: */ old_pte = ptep_get(vmf->pte); if (unlikely(!pte_same(old_pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } pte = pte_modify(old_pte, vma->vm_page_prot); /* * Detect now whether the PTE could be writable; this information * is only valid while holding the PT lock. */ writable = pte_write(pte); if (!writable && pte_write_upgrade && can_change_pte_writable(vma, vmf->address, pte)) writable = true; folio = vm_normal_folio(vma, vmf->address, pte); if (!folio || folio_is_zone_device(folio)) goto out_map; nid = folio_nid(folio); nr_pages = folio_nr_pages(folio); target_nid = numa_migrate_check(folio, vmf, vmf->address, &flags, writable, &last_cpupid); if (target_nid == NUMA_NO_NODE) goto out_map; if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) { flags |= TNF_MIGRATE_FAIL; goto out_map; } /* The folio is isolated and isolation code holds a folio reference. */ pte_unmap_unlock(vmf->pte, vmf->ptl); writable = false; ignore_writable = true; /* Migrate to the requested node */ if (!migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; } flags |= TNF_MIGRATE_FAIL; vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) return 0; if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } out_map: /* * Make it present again, depending on how arch implements * non-accessible ptes, some can allow access by kernel mode. */ if (folio && folio_test_large(folio)) numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, pte_write_upgrade); else numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); if (nid != NUMA_NO_NODE) task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; } static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; if (vma_is_anonymous(vma)) return do_huge_pmd_anonymous_page(vmf); if (vma->vm_ops->huge_fault) return vma->vm_ops->huge_fault(vmf, PMD_ORDER); return VM_FAULT_FALLBACK; } /* `inline' is required to avoid gcc 4.1.2 build error */ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; vm_fault_t ret; if (vma_is_anonymous(vma)) { if (likely(!unshare) && userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) { if (userfaultfd_wp_async(vmf->vma)) goto split; return handle_userfault(vmf, VM_UFFD_WP); } return do_huge_pmd_wp_page(vmf); } if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { if (vma->vm_ops->huge_fault) { ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); if (!(ret & VM_FAULT_FALLBACK)) return ret; } } split: /* COW or write-notify handled on pte level: split pmd. */ __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; } static vm_fault_t create_huge_pud(struct vm_fault *vmf) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) struct vm_area_struct *vma = vmf->vma; /* No support for anonymous transparent PUD pages yet */ if (vma_is_anonymous(vma)) return VM_FAULT_FALLBACK; if (vma->vm_ops->huge_fault) return vma->vm_ops->huge_fault(vmf, PUD_ORDER); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ return VM_FAULT_FALLBACK; } static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; /* No support for anonymous transparent PUD pages yet */ if (vma_is_anonymous(vma)) goto split; if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { if (vma->vm_ops->huge_fault) { ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); if (!(ret & VM_FAULT_FALLBACK)) return ret; } } split: /* COW or write-notify not handled on PUD level: split pud.*/ __split_huge_pud(vma, vmf->pud, vmf->address); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ return VM_FAULT_FALLBACK; } /* * These routines also need to handle stuff like marking pages dirty * and/or accessed for architectures that don't do it in hardware (most * RISC architectures). The early dirtying is also good on the i386. * * There is also a hook called "update_mmu_cache()" that architectures * with external mmu caches can use to update those (ie the Sparc or * PowerPC hashed page tables that act as extended TLBs). * * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow * concurrent faults). * * The mmap_lock may have been released depending on flags and our return value. * See filemap_fault() and __folio_lock_or_retry(). */ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) { pte_t entry; if (unlikely(pmd_none(*vmf->pmd))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table * for an instant, it will be difficult to retract from * concurrent faults and from rmap lookups. */ vmf->pte = NULL; vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; } else { pmd_t dummy_pmdval; /* * A regular pmd is established and it can't morph into a huge * pmd by anon khugepaged, since that takes mmap_lock in write * mode; but shmem or file collapse to THP could still morph * it into a huge pmd: just retry later if so. * * Use the maywrite version to indicate that vmf->pte may be * modified, but since we will use pte_same() to detect the * change of the !pte_none() entry, there is no need to recheck * the pmdval. Here we chooes to pass a dummy variable instead * of NULL, which helps new user think about why this place is * special. */ vmf->pte = pte_offset_map_rw_nolock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &dummy_pmdval, &vmf->ptl); if (unlikely(!vmf->pte)) return 0; vmf->orig_pte = ptep_get_lockless(vmf->pte); vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; if (pte_none(vmf->orig_pte)) { pte_unmap(vmf->pte); vmf->pte = NULL; } } if (!vmf->pte) return do_pte_missing(vmf); if (!pte_present(vmf->orig_pte)) return do_swap_page(vmf); if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); spin_lock(vmf->ptl); entry = vmf->orig_pte; if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); goto unlock; } if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!pte_write(entry)) return do_wp_page(vmf); else if (likely(vmf->flags & FAULT_FLAG_WRITE)) entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, vmf->flags & FAULT_FLAG_WRITE)) { update_mmu_cache_range(vmf, vmf->vma, vmf->address, vmf->pte, 1); } else { /* Skip spurious TLB flush for retried page fault */ if (vmf->flags & FAULT_FLAG_TRIED) goto unlock; /* * This is needed only for protection faults but the arch code * is not yet telling us if this is a protection fault or not. * This still avoids useless tlb flushes for .text page faults * with threads. */ if (vmf->flags & FAULT_FLAG_WRITE) flush_tlb_fix_spurious_fault(vmf->vma, vmf->address, vmf->pte); } unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } /* * On entry, we hold either the VMA lock or the mmap_lock * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in * the result, the mmap_lock is not held on exit. See filemap_fault() * and __folio_lock_or_retry(). */ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { struct vm_fault vmf = { .vma = vma, .address = address & PAGE_MASK, .real_address = address, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), }; struct mm_struct *mm = vma->vm_mm; unsigned long vm_flags = vma->vm_flags; pgd_t *pgd; p4d_t *p4d; vm_fault_t ret; pgd = pgd_offset(mm, address); p4d = p4d_alloc(mm, pgd, address); if (!p4d) return VM_FAULT_OOM; vmf.pud = pud_alloc(mm, p4d, address); if (!vmf.pud) return VM_FAULT_OOM; retry_pud: if (pud_none(*vmf.pud) && thp_vma_allowable_order(vma, vm_flags, TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { pud_t orig_pud = *vmf.pud; barrier(); if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { /* * TODO once we support anonymous PUDs: NUMA case and * FAULT_FLAG_UNSHARE handling. */ if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { ret = wp_huge_pud(&vmf, orig_pud); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { huge_pud_set_accessed(&vmf, orig_pud); return 0; } } } vmf.pmd = pmd_alloc(mm, vmf.pud, address); if (!vmf.pmd) return VM_FAULT_OOM; /* Huge pud page fault raced with pmd_alloc? */ if (pud_trans_unstable(vmf.pud)) goto retry_pud; if (pmd_none(*vmf.pmd) && thp_vma_allowable_order(vma, vm_flags, TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); if (unlikely(is_swap_pmd(vmf.orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(vmf.orig_pmd)); if (is_pmd_migration_entry(vmf.orig_pmd)) pmd_migration_entry_wait(mm, vmf.pmd); return 0; } if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) return do_huge_pmd_numa_page(&vmf); if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && !pmd_write(vmf.orig_pmd)) { ret = wp_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { huge_pmd_set_accessed(&vmf); return 0; } } } return handle_pte_fault(&vmf); } /** * mm_account_fault - Do page fault accounting * @mm: mm from which memcg should be extracted. It can be NULL. * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting * of perf event counters, but we'll still do the per-task accounting to * the task who triggered this page fault. * @address: the faulted address. * @flags: the fault flags. * @ret: the fault retcode. * * This will take care of most of the page fault accounting. Meanwhile, it * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should * still be in per-arch page fault handlers at the entry of page fault. */ static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, unsigned long address, unsigned int flags, vm_fault_t ret) { bool major; /* Incomplete faults will be accounted upon completion. */ if (ret & VM_FAULT_RETRY) return; /* * To preserve the behavior of older kernels, PGFAULT counters record * both successful and failed faults, as opposed to perf counters, * which ignore failed cases. */ count_vm_event(PGFAULT); count_memcg_event_mm(mm, PGFAULT); /* * Do not account for unsuccessful faults (e.g. when the address wasn't * valid). That includes arch_vma_access_permitted() failing before * reaching here. So this is not a "this many hardware page faults" * counter. We should use the hw profiling for that. */ if (ret & VM_FAULT_ERROR) return; /* * We define the fault as a major fault when the final successful fault * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't * handle it immediately previously). */ major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); if (major) current->maj_flt++; else current->min_flt++; /* * If the fault is done for GUP, regs will be NULL. We only do the * accounting for the per thread fault counters who triggered the * fault, and we skip the perf event updates. */ if (!regs) return; if (major) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); else perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } #ifdef CONFIG_LRU_GEN static void lru_gen_enter_fault(struct vm_area_struct *vma) { /* the LRU algorithm only applies to accesses with recency */ current->in_lru_fault = vma_has_recency(vma); } static void lru_gen_exit_fault(void) { current->in_lru_fault = false; } #else static void lru_gen_enter_fault(struct vm_area_struct *vma) { } static void lru_gen_exit_fault(void) { } #endif /* CONFIG_LRU_GEN */ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, unsigned int *flags) { if (unlikely(*flags & FAULT_FLAG_UNSHARE)) { if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE)) return VM_FAULT_SIGSEGV; /* * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's * just treat it like an ordinary read-fault otherwise. */ if (!is_cow_mapping(vma->vm_flags)) *flags &= ~FAULT_FLAG_UNSHARE; } else if (*flags & FAULT_FLAG_WRITE) { /* Write faults on read-only mappings are impossible ... */ if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE))) return VM_FAULT_SIGSEGV; /* ... and FOLL_FORCE only applies to COW mappings. */ if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) && !is_cow_mapping(vma->vm_flags))) return VM_FAULT_SIGSEGV; } #ifdef CONFIG_PER_VMA_LOCK /* * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of * the assumption that lock is dropped on VM_FAULT_RETRY. */ if (WARN_ON_ONCE((*flags & (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) return VM_FAULT_SIGSEGV; #endif return 0; } /* * By the time we get here, we already hold the mm semaphore * * The mmap_lock may have been released depending on flags and our * return value. See filemap_fault() and __folio_lock_or_retry(). */ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct pt_regs *regs) { /* If the fault handler drops the mmap_lock, vma may be freed */ struct mm_struct *mm = vma->vm_mm; vm_fault_t ret; bool is_droppable; __set_current_state(TASK_RUNNING); ret = sanitize_fault_flags(vma, &flags); if (ret) goto out; if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, flags & FAULT_FLAG_INSTRUCTION, flags & FAULT_FLAG_REMOTE)) { ret = VM_FAULT_SIGSEGV; goto out; } is_droppable = !!(vma->vm_flags & VM_DROPPABLE); /* * Enable the memcg OOM handling for faults triggered in user * space. Kernel faults are handled more gracefully. */ if (flags & FAULT_FLAG_USER) mem_cgroup_enter_user_fault(); lru_gen_enter_fault(vma); if (unlikely(is_vm_hugetlb_page(vma))) ret = hugetlb_fault(vma->vm_mm, vma, address, flags); else ret = __handle_mm_fault(vma, address, flags); /* * Warning: It is no longer safe to dereference vma-> after this point, * because mmap_lock might have been dropped by __handle_mm_fault(), so * vma might be destroyed from underneath us. */ lru_gen_exit_fault(); /* If the mapping is droppable, then errors due to OOM aren't fatal. */ if (is_droppable) ret &= ~VM_FAULT_OOM; if (flags & FAULT_FLAG_USER) { mem_cgroup_exit_user_fault(); /* * The task may have entered a memcg OOM situation but * if the allocation error was handled gracefully (no * VM_FAULT_OOM), there is no need to kill anything. * Just clean up the OOM state peacefully. */ if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) mem_cgroup_oom_synchronize(false); } out: mm_account_fault(mm, regs, address, flags, ret); return ret; } EXPORT_SYMBOL_GPL(handle_mm_fault); #ifdef CONFIG_LOCK_MM_AND_FIND_VMA #include <linux/extable.h> static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) { if (likely(mmap_read_trylock(mm))) return true; if (regs && !user_mode(regs)) { unsigned long ip = exception_ip(regs); if (!search_exception_tables(ip)) return false; } return !mmap_read_lock_killable(mm); } static inline bool mmap_upgrade_trylock(struct mm_struct *mm) { /* * We don't have this operation yet. * * It should be easy enough to do: it's basically a * atomic_long_try_cmpxchg_acquire() * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but * it also needs the proper lockdep magic etc. */ return false; } static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) { mmap_read_unlock(mm); if (regs && !user_mode(regs)) { unsigned long ip = exception_ip(regs); if (!search_exception_tables(ip)) return false; } return !mmap_write_lock_killable(mm); } /* * Helper for page fault handling. * * This is kind of equivalend to "mmap_read_lock()" followed * by "find_extend_vma()", except it's a lot more careful about * the locking (and will drop the lock on failure). * * For example, if we have a kernel bug that causes a page * fault, we don't want to just use mmap_read_lock() to get * the mm lock, because that would deadlock if the bug were * to happen while we're holding the mm lock for writing. * * So this checks the exception tables on kernel faults in * order to only do this all for instructions that are actually * expected to fault. * * We can also actually take the mm lock for writing if we * need to extend the vma, which helps the VM layer a lot. */ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, unsigned long addr, struct pt_regs *regs) { struct vm_area_struct *vma; if (!get_mmap_lock_carefully(mm, regs)) return NULL; vma = find_vma(mm, addr); if (likely(vma && (vma->vm_start <= addr))) return vma; /* * Well, dang. We might still be successful, but only * if we can extend a vma to do so. */ if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { mmap_read_unlock(mm); return NULL; } /* * We can try to upgrade the mmap lock atomically, * in which case we can continue to use the vma * we already looked up. * * Otherwise we'll have to drop the mmap lock and * re-take it, and also look up the vma again, * re-checking it. */ if (!mmap_upgrade_trylock(mm)) { if (!upgrade_mmap_lock_carefully(mm, regs)) return NULL; vma = find_vma(mm, addr); if (!vma) goto fail; if (vma->vm_start <= addr) goto success; if (!(vma->vm_flags & VM_GROWSDOWN)) goto fail; } if (expand_stack_locked(vma, addr)) goto fail; success: mmap_write_downgrade(mm); return vma; fail: mmap_write_unlock(mm); return NULL; } #endif #ifdef CONFIG_PER_VMA_LOCK /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the * function returns NULL. */ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address) { MA_STATE(mas, &mm->mm_mt, address, address); struct vm_area_struct *vma; rcu_read_lock(); retry: vma = mas_walk(&mas); if (!vma) goto inval; if (!vma_start_read(vma)) goto inval; /* Check if the VMA got isolated after we found it */ if (vma->detached) { vma_end_read(vma); count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ goto retry; } /* * At this point, we have a stable reference to a VMA: The VMA is * locked and we know it hasn't already been isolated. * From here on, we can access the VMA without worrying about which * fields are accessible for RCU readers. */ /* Check since vm_start/vm_end might change before we lock the VMA */ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read; rcu_read_unlock(); return vma; inval_end_read: vma_end_read(vma); inval: rcu_read_unlock(); count_vm_vma_lock_event(VMA_LOCK_ABORT); return NULL; } #endif /* CONFIG_PER_VMA_LOCK */ #ifndef __PAGETABLE_P4D_FOLDED /* * Allocate p4d page table. * We've already handled the fast-path in-line. */ int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { p4d_t *new = p4d_alloc_one(mm, address); if (!new) return -ENOMEM; spin_lock(&mm->page_table_lock); if (pgd_present(*pgd)) { /* Another has populated it */ p4d_free(mm, new); } else { smp_wmb(); /* See comment in pmd_install() */ pgd_populate(mm, pgd, new); } spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_P4D_FOLDED */ #ifndef __PAGETABLE_PUD_FOLDED /* * Allocate page upper directory. * We've already handled the fast-path in-line. */ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) { pud_t *new = pud_alloc_one(mm, address); if (!new) return -ENOMEM; spin_lock(&mm->page_table_lock); if (!p4d_present(*p4d)) { mm_inc_nr_puds(mm); smp_wmb(); /* See comment in pmd_install() */ p4d_populate(mm, p4d, new); } else /* Another has populated it */ pud_free(mm, new); spin_unlock(&mm->page_table_lock); return 0; } #endif /* __PAGETABLE_PUD_FOLDED */ #ifndef __PAGETABLE_PMD_FOLDED /* * Allocate page middle directory. * We've already handled the fast-path in-line. */ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { spinlock_t *ptl; pmd_t *new = pmd_alloc_one(mm, address); if (!new) return -ENOMEM; ptl = pud_lock(mm, pud); if (!pud_present(*pud)) { mm_inc_nr_pmds(mm); smp_wmb(); /* See comment in pmd_install() */ pud_populate(mm, pud, new); } else { /* Another has populated it */ pmd_free(mm, new); } spin_unlock(ptl); return 0; } #endif /* __PAGETABLE_PMD_FOLDED */ static inline void pfnmap_args_setup(struct follow_pfnmap_args *args, spinlock_t *lock, pte_t *ptep, pgprot_t pgprot, unsigned long pfn_base, unsigned long addr_mask, bool writable, bool special) { args->lock = lock; args->ptep = ptep; args->pfn = pfn_base + ((args->address & ~addr_mask) >> PAGE_SHIFT); args->pgprot = pgprot; args->writable = writable; args->special = special; } static inline void pfnmap_lockdep_assert(struct vm_area_struct *vma) { #ifdef CONFIG_LOCKDEP struct file *file = vma->vm_file; struct address_space *mapping = file ? file->f_mapping : NULL; if (mapping) lockdep_assert(lockdep_is_held(&mapping->i_mmap_rwsem) || lockdep_is_held(&vma->vm_mm->mmap_lock)); else lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); #endif } /** * follow_pfnmap_start() - Look up a pfn mapping at a user virtual address * @args: Pointer to struct @follow_pfnmap_args * * The caller needs to setup args->vma and args->address to point to the * virtual address as the target of such lookup. On a successful return, * the results will be put into other output fields. * * After the caller finished using the fields, the caller must invoke * another follow_pfnmap_end() to proper releases the locks and resources * of such look up request. * * During the start() and end() calls, the results in @args will be valid * as proper locks will be held. After the end() is called, all the fields * in @follow_pfnmap_args will be invalid to be further accessed. Further * use of such information after end() may require proper synchronizations * by the caller with page table updates, otherwise it can create a * security bug. * * If the PTE maps a refcounted page, callers are responsible to protect * against invalidation with MMU notifiers; otherwise access to the PFN at * a later point in time can trigger use-after-free. * * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore * should be taken for read, and the mmap semaphore cannot be released * before the end() is invoked. * * This function must not be used to modify PTE content. * * Return: zero on success, negative otherwise. */ int follow_pfnmap_start(struct follow_pfnmap_args *args) { struct vm_area_struct *vma = args->vma; unsigned long address = args->address; struct mm_struct *mm = vma->vm_mm; spinlock_t *lock; pgd_t *pgdp; p4d_t *p4dp, p4d; pud_t *pudp, pud; pmd_t *pmdp, pmd; pte_t *ptep, pte; pfnmap_lockdep_assert(vma); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) goto out; if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) goto out; retry: pgdp = pgd_offset(mm, address); if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) goto out; p4dp = p4d_offset(pgdp, address); p4d = READ_ONCE(*p4dp); if (p4d_none(p4d) || unlikely(p4d_bad(p4d))) goto out; pudp = pud_offset(p4dp, address); pud = READ_ONCE(*pudp); if (pud_none(pud)) goto out; if (pud_leaf(pud)) { lock = pud_lock(mm, pudp); if (!unlikely(pud_leaf(pud))) { spin_unlock(lock); goto retry; } pfnmap_args_setup(args, lock, NULL, pud_pgprot(pud), pud_pfn(pud), PUD_MASK, pud_write(pud), pud_special(pud)); return 0; } pmdp = pmd_offset(pudp, address); pmd = pmdp_get_lockless(pmdp); if (pmd_leaf(pmd)) { lock = pmd_lock(mm, pmdp); if (!unlikely(pmd_leaf(pmd))) { spin_unlock(lock); goto retry; } pfnmap_args_setup(args, lock, NULL, pmd_pgprot(pmd), pmd_pfn(pmd), PMD_MASK, pmd_write(pmd), pmd_special(pmd)); return 0; } ptep = pte_offset_map_lock(mm, pmdp, address, &lock); if (!ptep) goto out; pte = ptep_get(ptep); if (!pte_present(pte)) goto unlock; pfnmap_args_setup(args, lock, ptep, pte_pgprot(pte), pte_pfn(pte), PAGE_MASK, pte_write(pte), pte_special(pte)); return 0; unlock: pte_unmap_unlock(ptep, lock); out: return -EINVAL; } EXPORT_SYMBOL_GPL(follow_pfnmap_start); /** * follow_pfnmap_end(): End a follow_pfnmap_start() process * @args: Pointer to struct @follow_pfnmap_args * * Must be used in pair of follow_pfnmap_start(). See the start() function * above for more information. */ void follow_pfnmap_end(struct follow_pfnmap_args *args) { if (args->lock) spin_unlock(args->lock); if (args->ptep) pte_unmap(args->ptep); } EXPORT_SYMBOL_GPL(follow_pfnmap_end); #ifdef CONFIG_HAVE_IOREMAP_PROT /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access * @addr: userspace address, not relative offset within @vma * @buf: buffer to read/write * @len: length of transfer * @write: set to FOLL_WRITE when writing, otherwise reading * * This is a generic implementation for &vm_operations_struct.access for an * iomem mapping. This callback is used by access_process_vm() when the @vma is * not page based. */ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { resource_size_t phys_addr; unsigned long prot = 0; void __iomem *maddr; int offset = offset_in_page(addr); int ret = -EINVAL; bool writable; struct follow_pfnmap_args args = { .vma = vma, .address = addr }; retry: if (follow_pfnmap_start(&args)) return -EINVAL; prot = pgprot_val(args.pgprot); phys_addr = (resource_size_t)args.pfn << PAGE_SHIFT; writable = args.writable; follow_pfnmap_end(&args); if ((write & FOLL_WRITE) && !writable) return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); if (!maddr) return -ENOMEM; if (follow_pfnmap_start(&args)) goto out_unmap; if ((prot != pgprot_val(args.pgprot)) || (phys_addr != (args.pfn << PAGE_SHIFT)) || (writable != args.writable)) { follow_pfnmap_end(&args); iounmap(maddr); goto retry; } if (write) memcpy_toio(maddr + offset, buf, len); else memcpy_fromio(buf, maddr + offset, len); ret = len; follow_pfnmap_end(&args); out_unmap: iounmap(maddr); return ret; } EXPORT_SYMBOL_GPL(generic_access_phys); #endif /* * Access another process' address space as given in mm. */ static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags) { void *old_buf = buf; int write = gup_flags & FOLL_WRITE; if (mmap_read_lock_killable(mm)) return 0; /* Untag the address before looking up the VMA */ addr = untagged_addr_remote(mm, addr); /* Avoid triggering the temporary warning in __get_user_pages */ if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) return 0; /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, offset; void *maddr; struct vm_area_struct *vma = NULL; struct page *page = get_user_page_vma_remote(mm, addr, gup_flags, &vma); if (IS_ERR(page)) { /* We might need to expand the stack to access it */ vma = vma_lookup(mm, addr); if (!vma) { vma = expand_stack(mm, addr); /* mmap_lock was dropped on failure */ if (!vma) return buf - old_buf; /* Try again if stack expansion worked */ continue; } /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ bytes = 0; #ifdef CONFIG_HAVE_IOREMAP_PROT if (vma->vm_ops && vma->vm_ops->access) bytes = vma->vm_ops->access(vma, addr, buf, len, write); #endif if (bytes <= 0) break; } else { bytes = len; offset = addr & (PAGE_SIZE-1); if (bytes > PAGE_SIZE-offset) bytes = PAGE_SIZE-offset; maddr = kmap_local_page(page); if (write) { copy_to_user_page(vma, page, addr, maddr + offset, buf, bytes); set_page_dirty_lock(page); } else { copy_from_user_page(vma, page, addr, buf, maddr + offset, bytes); } unmap_and_put_page(page, maddr); } len -= bytes; buf += bytes; addr += bytes; } mmap_read_unlock(mm); return buf - old_buf; } /** * access_remote_vm - access another process' address space * @mm: the mm_struct of the target address space * @addr: start address to access * @buf: source or destination buffer * @len: number of bytes to transfer * @gup_flags: flags modifying lookup behaviour * * The caller must hold a reference on @mm. * * Return: number of bytes copied from source to destination. */ int access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags) { return __access_remote_vm(mm, addr, buf, len, gup_flags); } /* * Access another process' address space. * Source/target buffer must be kernel space, * Do not walk the page table directly, use get_user_pages */ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags) { struct mm_struct *mm; int ret; mm = get_task_mm(tsk); if (!mm) return 0; ret = __access_remote_vm(mm, addr, buf, len, gup_flags); mmput(mm); return ret; } EXPORT_SYMBOL_GPL(access_process_vm); /* * Print the name of a VMA. */ void print_vma_addr(char *prefix, unsigned long ip) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; /* * we might be running from an atomic context so we cannot sleep */ if (!mmap_read_trylock(mm)) return; vma = vma_lookup(mm, ip); if (vma && vma->vm_file) { struct file *f = vma->vm_file; ip -= vma->vm_start; ip += vma->vm_pgoff << PAGE_SHIFT; printk("%s%pD[%lx,%lx+%lx]", prefix, f, ip, vma->vm_start, vma->vm_end - vma->vm_start); } mmap_read_unlock(mm); } #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) void __might_fault(const char *file, int line) { if (pagefault_disabled()) return; __might_sleep(file, line); #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(¤t->mm->mmap_lock); #endif } EXPORT_SYMBOL(__might_fault); #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) /* * Process all subpages of the specified huge page with the specified * operation. The target subpage will be processed last to keep its * cache lines hot. */ static inline int process_huge_page( unsigned long addr_hint, unsigned int nr_pages, int (*process_subpage)(unsigned long addr, int idx, void *arg), void *arg) { int i, n, base, l, ret; unsigned long addr = addr_hint & ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1); /* Process target subpage last to keep its cache lines hot */ might_sleep(); n = (addr_hint - addr) / PAGE_SIZE; if (2 * n <= nr_pages) { /* If target subpage in first half of huge page */ base = 0; l = n; /* Process subpages at the end of huge page */ for (i = nr_pages - 1; i >= 2 * n; i--) { cond_resched(); ret = process_subpage(addr + i * PAGE_SIZE, i, arg); if (ret) return ret; } } else { /* If target subpage in second half of huge page */ base = nr_pages - 2 * (nr_pages - n); l = nr_pages - n; /* Process subpages at the begin of huge page */ for (i = 0; i < base; i++) { cond_resched(); ret = process_subpage(addr + i * PAGE_SIZE, i, arg); if (ret) return ret; } } /* * Process remaining subpages in left-right-left-right pattern * towards the target subpage */ for (i = 0; i < l; i++) { int left_idx = base + i; int right_idx = base + 2 * l - 1 - i; cond_resched(); ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); if (ret) return ret; cond_resched(); ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); if (ret) return ret; } return 0; } static void clear_gigantic_page(struct folio *folio, unsigned long addr_hint, unsigned int nr_pages) { unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(folio)); int i; might_sleep(); for (i = 0; i < nr_pages; i++) { cond_resched(); clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE); } } static int clear_subpage(unsigned long addr, int idx, void *arg) { struct folio *folio = arg; clear_user_highpage(folio_page(folio, idx), addr); return 0; } /** * folio_zero_user - Zero a folio which will be mapped to userspace. * @folio: The folio to zero. * @addr_hint: The address will be accessed or the base address if uncelar. */ void folio_zero_user(struct folio *folio, unsigned long addr_hint) { unsigned int nr_pages = folio_nr_pages(folio); if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) clear_gigantic_page(folio, addr_hint, nr_pages); else process_huge_page(addr_hint, nr_pages, clear_subpage, folio); } static int copy_user_gigantic_page(struct folio *dst, struct folio *src, unsigned long addr_hint, struct vm_area_struct *vma, unsigned int nr_pages) { unsigned long addr = ALIGN_DOWN(addr_hint, folio_size(dst)); struct page *dst_page; struct page *src_page; int i; for (i = 0; i < nr_pages; i++) { dst_page = folio_page(dst, i); src_page = folio_page(src, i); cond_resched(); if (copy_mc_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma)) return -EHWPOISON; } return 0; } struct copy_subpage_arg { struct folio *dst; struct folio *src; struct vm_area_struct *vma; }; static int copy_subpage(unsigned long addr, int idx, void *arg) { struct copy_subpage_arg *copy_arg = arg; struct page *dst = folio_page(copy_arg->dst, idx); struct page *src = folio_page(copy_arg->src, idx); if (copy_mc_user_highpage(dst, src, addr, copy_arg->vma)) return -EHWPOISON; return 0; } int copy_user_large_folio(struct folio *dst, struct folio *src, unsigned long addr_hint, struct vm_area_struct *vma) { unsigned int nr_pages = folio_nr_pages(dst); struct copy_subpage_arg arg = { .dst = dst, .src = src, .vma = vma, }; if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) return copy_user_gigantic_page(dst, src, addr_hint, vma, nr_pages); return process_huge_page(addr_hint, nr_pages, copy_subpage, &arg); } long copy_folio_from_user(struct folio *dst_folio, const void __user *usr_src, bool allow_pagefault) { void *kaddr; unsigned long i, rc = 0; unsigned int nr_pages = folio_nr_pages(dst_folio); unsigned long ret_val = nr_pages * PAGE_SIZE; struct page *subpage; for (i = 0; i < nr_pages; i++) { subpage = folio_page(dst_folio, i); kaddr = kmap_local_page(subpage); if (!allow_pagefault) pagefault_disable(); rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); if (!allow_pagefault) pagefault_enable(); kunmap_local(kaddr); ret_val -= (PAGE_SIZE - rc); if (rc) break; flush_dcache_page(subpage); cond_resched(); } return ret_val; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if defined(CONFIG_SPLIT_PTE_PTLOCKS) && ALLOC_SPLIT_PTLOCKS static struct kmem_cache *page_ptl_cachep; void __init ptlock_cache_init(void) { page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, SLAB_PANIC, NULL); } bool ptlock_alloc(struct ptdesc *ptdesc) { spinlock_t *ptl; ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); if (!ptl) return false; ptdesc->ptl = ptl; return true; } void ptlock_free(struct ptdesc *ptdesc) { kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif void vma_pgtable_walk_begin(struct vm_area_struct *vma) { if (is_vm_hugetlb_page(vma)) hugetlb_vma_lock_read(vma); } void vma_pgtable_walk_end(struct vm_area_struct *vma) { if (is_vm_hugetlb_page(vma)) hugetlb_vma_unlock_read(vma); } |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for some sunplus "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && rdesc[106] == 0x03) { hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n"); rdesc[105] = rdesc[110] = 0x03; rdesc[106] = rdesc[111] = 0x21; } return rdesc; } #define sp_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int sp_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x2003: sp_map_key_clear(KEY_ZOOMIN); break; case 0x2103: sp_map_key_clear(KEY_ZOOMOUT); break; default: return 0; } return 1; } static const struct hid_device_id sp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { } }; MODULE_DEVICE_TABLE(hid, sp_devices); static struct hid_driver sp_driver = { .name = "sunplus", .id_table = sp_devices, .report_fixup = sp_report_fixup, .input_mapping = sp_input_mapping, }; module_hid_driver(sp_driver); MODULE_DESCRIPTION("HID driver for some sunplus \"special\" devices"); MODULE_LICENSE("GPL"); |
9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for Tascam US-X2Y USB soundcards * * FPGA Loader + ALSA Startup * * Copyright (c) 2003 by Karsten Wiese <annabellesgarden@yahoo.de> */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/memalloc.h> #include <sound/pcm.h> #include <sound/hwdep.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) { unsigned long offset; struct page *page; void *vaddr; offset = vmf->pgoff << PAGE_SHIFT; vaddr = (char *)((struct usx2ydev *)vmf->vma->vm_private_data)->us428ctls_sharedmem + offset; page = virt_to_page(vaddr); get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct us428ctls_vm_ops = { .fault = snd_us428ctls_vm_fault, }; static int snd_us428ctls_mmap(struct snd_hwdep *hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = (unsigned long)(area->vm_end - area->vm_start); struct usx2ydev *us428 = hw->private_data; // FIXME this hwdep interface is used twice: fpga download and mmap for controlling Lights etc. Maybe better using 2 hwdep devs? // so as long as the device isn't fully initialised yet we return -EBUSY here. if (!(us428->chip_status & USX2Y_STAT_CHIP_INIT)) return -EBUSY; /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > US428_SHAREDMEM_PAGES) { dev_dbg(hw->card->dev, "%s: mmap size %lu > %lu\n", __func__, size, (unsigned long)US428_SHAREDMEM_PAGES); return -EINVAL; } area->vm_ops = &us428ctls_vm_ops; vm_flags_set(area, VM_DONTEXPAND | VM_DONTDUMP); area->vm_private_data = hw->private_data; return 0; } static __poll_t snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { __poll_t mask = 0; struct usx2ydev *us428 = hw->private_data; struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem; if (us428->chip_status & USX2Y_STAT_CHIP_HUP) return EPOLLHUP; poll_wait(file, &us428->us428ctls_wait_queue_head, wait); if (shm && shm->ctl_snapshot_last != shm->ctl_snapshot_red) mask |= EPOLLIN; return mask; } static int snd_usx2y_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { static const char * const type_ids[USX2Y_TYPE_NUMS] = { [USX2Y_TYPE_122] = "us122", [USX2Y_TYPE_224] = "us224", [USX2Y_TYPE_428] = "us428", }; struct usx2ydev *us428 = hw->private_data; int id = -1; switch (le16_to_cpu(us428->dev->descriptor.idProduct)) { case USB_ID_US122: id = USX2Y_TYPE_122; break; case USB_ID_US224: id = USX2Y_TYPE_224; break; case USB_ID_US428: id = USX2Y_TYPE_428; break; } if (id < 0) return -ENODEV; strcpy(info->id, type_ids[id]); info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code if (us428->chip_status & USX2Y_STAT_CHIP_INIT) info->chip_ready = 1; info->version = USX2Y_DRIVER_VERSION; return 0; } static int usx2y_create_usbmidi(struct snd_card *card) { static const struct snd_usb_midi_endpoint_info quirk_data_1 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x001, .in_cables = 0x001 }; static const struct snd_usb_audio_quirk quirk_1 = { .vendor_name = "TASCAM", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_1 }; static const struct snd_usb_midi_endpoint_info quirk_data_2 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x003, .in_cables = 0x003 }; static const struct snd_usb_audio_quirk quirk_2 = { .vendor_name = "TASCAM", .product_name = "US428", .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_2 }; struct usb_device *dev = usx2y(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); const struct snd_usb_audio_quirk *quirk = le16_to_cpu(dev->descriptor.idProduct) == USB_ID_US428 ? &quirk_2 : &quirk_1; return snd_usbmidi_create(card, iface, &usx2y(card)->midi_list, quirk); } static int usx2y_create_alsa_devices(struct snd_card *card) { int err; err = usx2y_create_usbmidi(card); if (err < 0) { dev_err(card->dev, "%s: usx2y_create_usbmidi error %i\n", __func__, err); return err; } err = usx2y_audio_create(card); if (err < 0) return err; err = usx2y_hwdep_pcm_new(card); if (err < 0) return err; err = snd_card_register(card); if (err < 0) return err; return 0; } static int snd_usx2y_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct usx2ydev *priv = hw->private_data; struct usb_device *dev = priv->dev; int lret, err; char *buf; buf = memdup_user(dsp->image, dsp->length); if (IS_ERR(buf)) return PTR_ERR(buf); err = usb_set_interface(dev, 0, 1); if (err) dev_err(&dev->dev, "usb_set_interface error\n"); else err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000); kfree(buf); if (err) return err; if (dsp->index == 1) { msleep(250); // give the device some time err = usx2y_async_seq04_init(priv); if (err) { dev_err(&dev->dev, "usx2y_async_seq04_init error\n"); return err; } err = usx2y_in04_init(priv); if (err) { dev_err(&dev->dev, "usx2y_in04_init error\n"); return err; } err = usx2y_create_alsa_devices(hw->card); if (err) { dev_err(&dev->dev, "usx2y_create_alsa_devices error %i\n", err); return err; } priv->chip_status |= USX2Y_STAT_CHIP_INIT; } return err; } int usx2y_hwdep_new(struct snd_card *card, struct usb_device *device) { int err; struct snd_hwdep *hw; struct usx2ydev *us428 = usx2y(card); err = snd_hwdep_new(card, SND_USX2Y_LOADER_ID, 0, &hw); if (err < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_USX2Y; hw->private_data = us428; hw->ops.dsp_status = snd_usx2y_hwdep_dsp_status; hw->ops.dsp_load = snd_usx2y_hwdep_dsp_load; hw->ops.mmap = snd_us428ctls_mmap; hw->ops.poll = snd_us428ctls_poll; hw->exclusive = 1; sprintf(hw->name, "/dev/bus/usb/%03d/%03d", device->bus->busnum, device->devnum); us428->us428ctls_sharedmem = alloc_pages_exact(US428_SHAREDMEM_PAGES, GFP_KERNEL); if (!us428->us428ctls_sharedmem) return -ENOMEM; memset(us428->us428ctls_sharedmem, -1, US428_SHAREDMEM_PAGES); us428->us428ctls_sharedmem->ctl_snapshot_last = -2; return 0; } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/truncate.h * * Common inline functions needed for truncate support */ /* * Truncate blocks that were not used by write. We have to truncate the * pagecache as well so that corresponding buffers get properly unmapped. */ static inline void ext4_truncate_failed_write(struct inode *inode) { struct address_space *mapping = inode->i_mapping; /* * We don't need to call ext4_break_layouts() because the blocks we * are truncating were never visible to userspace. */ filemap_invalidate_lock(mapping); truncate_inode_pages(mapping, inode->i_size); ext4_truncate(inode); filemap_invalidate_unlock(mapping); } /* * Work out how many blocks we need to proceed with the next chunk of a * truncate transaction. */ static inline unsigned long ext4_blocks_for_truncate(struct inode *inode) { ext4_lblk_t needed; needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); /* Give ourselves just enough room to cope with inodes in which * i_blocks is corrupt: we've seen disk corruptions in the past * which resulted in random data in an inode which looked enough * like a regular file for ext4 to try to delete it. Things * will go a bit crazy if that happens, but at least we should * try not to panic the whole kernel. */ if (needed < 2) needed = 2; /* But we need to bound the transaction so we don't overflow the * journal. */ if (needed > EXT4_MAX_TRANS_DATA) needed = EXT4_MAX_TRANS_DATA; return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; } |
4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 | #ifndef _CRYPTO_GCM_H #define _CRYPTO_GCM_H #include <linux/errno.h> #include <crypto/aes.h> #include <crypto/gf128mul.h> #define GCM_AES_IV_SIZE 12 #define GCM_RFC4106_IV_SIZE 8 #define GCM_RFC4543_IV_SIZE 8 /* * validate authentication tag for GCM */ static inline int crypto_gcm_check_authsize(unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return 0; } /* * validate authentication tag for RFC4106 */ static inline int crypto_rfc4106_check_authsize(unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } /* * validate assoclen for RFC4106/RFC4543 */ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) { switch (assoclen) { case 16: case 20: break; default: return -EINVAL; } return 0; } struct aesgcm_ctx { be128 ghash_key; struct crypto_aes_ctx aes_ctx; unsigned int authsize; }; int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key, unsigned int keysize, unsigned int authsize); void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, int crypt_len, const u8 *assoc, int assoc_len, const u8 iv[GCM_AES_IV_SIZE], u8 *authtag); bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, int crypt_len, const u8 *assoc, int assoc_len, const u8 iv[GCM_AES_IV_SIZE], const u8 *authtag); #endif |
3242 3243 3155 3155 24 24 24 3152 3152 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | // SPDX-License-Identifier: GPL-2.0 /* * of.c The helpers for hcd device tree support * * Copyright (C) 2016 Freescale Semiconductor, Inc. * Author: Peter Chen <peter.chen@freescale.com> * Copyright (C) 2017 Johan Hovold <johan@kernel.org> */ #include <linux/of.h> #include <linux/of_graph.h> #include <linux/usb/of.h> /** * usb_of_get_device_node() - get a USB device node * @hub: hub to which device is connected * @port1: one-based index of port * * Look up the node of a USB device given its parent hub device and one-based * port number. * * Return: A pointer to the node with incremented refcount if found, or * %NULL otherwise. */ struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1) { struct device_node *node; u32 reg; for_each_child_of_node(hub->dev.of_node, node) { if (of_property_read_u32(node, "reg", ®)) continue; if (reg == port1) return node; } return NULL; } EXPORT_SYMBOL_GPL(usb_of_get_device_node); /** * usb_of_has_combined_node() - determine whether a device has a combined node * @udev: USB device * * Determine whether a USB device has a so called combined node which is * shared with its sole interface. This is the case if and only if the device * has a node and its descriptors report the following: * * 1) bDeviceClass is 0 or 9, and * 2) bNumConfigurations is 1, and * 3) bNumInterfaces is 1. * * Return: True iff the device has a device node and its descriptors match the * criteria for a combined node. */ bool usb_of_has_combined_node(struct usb_device *udev) { struct usb_device_descriptor *ddesc = &udev->descriptor; struct usb_config_descriptor *cdesc; if (!udev->dev.of_node) return false; switch (ddesc->bDeviceClass) { case USB_CLASS_PER_INTERFACE: case USB_CLASS_HUB: if (ddesc->bNumConfigurations == 1) { cdesc = &udev->config->desc; if (cdesc->bNumInterfaces == 1) return true; } } return false; } EXPORT_SYMBOL_GPL(usb_of_has_combined_node); static bool usb_of_has_devices_or_graph(const struct usb_device *hub) { const struct device_node *np = hub->dev.of_node; struct device_node *child; if (of_graph_is_present(np)) return true; for_each_child_of_node(np, child) { if (of_property_present(child, "reg")) { of_node_put(child); return true; } } return false; } /** * usb_of_get_connect_type() - get a USB hub's port connect_type * @hub: hub to which port is for @port1 * @port1: one-based index of port * * Get the connect_type of @port1 based on the device node for @hub. If the * port is described in the OF graph, the connect_type is "hotplug". If the * @hub has a child device has with a 'reg' property equal to @port1 the * connect_type is "hard-wired". If there isn't an OF graph or child node at * all then the connect_type is "unknown". Otherwise, the port is considered * "unused" because it isn't described at all. * * Return: A connect_type for @port1 based on the device node for @hub. */ enum usb_port_connect_type usb_of_get_connect_type(struct usb_device *hub, int port1) { struct device_node *np, *child, *ep, *remote_np; enum usb_port_connect_type connect_type; /* Only set connect_type if binding has ports/hardwired devices. */ if (!usb_of_has_devices_or_graph(hub)) return USB_PORT_CONNECT_TYPE_UNKNOWN; /* Assume port is unused if there's a graph or a child node. */ connect_type = USB_PORT_NOT_USED; np = hub->dev.of_node; /* * Hotplug ports are connected to an available remote node, e.g. * usb-a-connector compatible node, in the OF graph. */ if (of_graph_is_present(np)) { ep = of_graph_get_endpoint_by_regs(np, port1, -1); if (ep) { remote_np = of_graph_get_remote_port_parent(ep); of_node_put(ep); if (of_device_is_available(remote_np)) connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG; of_node_put(remote_np); } } /* * Hard-wired ports are child nodes with a reg property corresponding * to the port number, i.e. a usb device. */ child = usb_of_get_device_node(hub, port1); if (of_device_is_available(child)) connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED; of_node_put(child); return connect_type; } EXPORT_SYMBOL_GPL(usb_of_get_connect_type); /** * usb_of_get_interface_node() - get a USB interface node * @udev: USB device of interface * @config: configuration value * @ifnum: interface number * * Look up the node of a USB interface given its USB device, configuration * value and interface number. * * Return: A pointer to the node with incremented refcount if found, or * %NULL otherwise. */ struct device_node * usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) { struct device_node *node; u32 reg[2]; for_each_child_of_node(udev->dev.of_node, node) { if (of_property_read_u32_array(node, "reg", reg, 2)) continue; if (reg[0] == ifnum && reg[1] == config) return node; } return NULL; } EXPORT_SYMBOL_GPL(usb_of_get_interface_node); |
5 3 1 1 4 6 2 5 3 6 6 6 3 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 | // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_entity.c -- USB Video Class driver * * Copyright (C) 2005-2011 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "uvcvideo.h" static int uvc_mc_create_links(struct uvc_video_chain *chain, struct uvc_entity *entity) { const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; struct media_entity *sink; unsigned int i; int ret; sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING) ? (entity->vdev ? &entity->vdev->entity : NULL) : &entity->subdev.entity; if (sink == NULL) return 0; for (i = 0; i < entity->num_pads; ++i) { struct media_entity *source; struct uvc_entity *remote; u8 remote_pad; if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK)) continue; remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]); if (remote == NULL || remote->num_pads == 0) return -EINVAL; source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) ? (remote->vdev ? &remote->vdev->entity : NULL) : &remote->subdev.entity; if (source == NULL) continue; remote_pad = remote->num_pads - 1; ret = media_create_pad_link(source, remote_pad, sink, i, flags); if (ret < 0) return ret; } return 0; } static const struct v4l2_subdev_ops uvc_subdev_ops = { }; void uvc_mc_cleanup_entity(struct uvc_entity *entity) { if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) media_entity_cleanup(&entity->subdev.entity); else if (entity->vdev != NULL) media_entity_cleanup(&entity->vdev->entity); } static int uvc_mc_init_entity(struct uvc_video_chain *chain, struct uvc_entity *entity) { int ret; if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { u32 function; v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); strscpy(entity->subdev.name, entity->name, sizeof(entity->subdev.name)); switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_SELECTOR_UNIT: function = MEDIA_ENT_F_VID_MUX; break; case UVC_VC_PROCESSING_UNIT: case UVC_VC_EXTENSION_UNIT: /* For lack of a better option. */ function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; break; case UVC_COMPOSITE_CONNECTOR: case UVC_COMPONENT_CONNECTOR: function = MEDIA_ENT_F_CONN_COMPOSITE; break; case UVC_SVIDEO_CONNECTOR: function = MEDIA_ENT_F_CONN_SVIDEO; break; case UVC_ITT_CAMERA: function = MEDIA_ENT_F_CAM_SENSOR; break; case UVC_TT_VENDOR_SPECIFIC: case UVC_ITT_VENDOR_SPECIFIC: case UVC_ITT_MEDIA_TRANSPORT_INPUT: case UVC_OTT_VENDOR_SPECIFIC: case UVC_OTT_DISPLAY: case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: case UVC_EXTERNAL_VENDOR_SPECIFIC: case UVC_EXT_GPIO_UNIT: default: function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; break; } entity->subdev.entity.function = function; ret = media_entity_pads_init(&entity->subdev.entity, entity->num_pads, entity->pads); if (ret < 0) return ret; ret = v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev); } else if (entity->vdev != NULL) { ret = media_entity_pads_init(&entity->vdev->entity, entity->num_pads, entity->pads); if (entity->flags & UVC_ENTITY_FLAG_DEFAULT) entity->vdev->entity.flags |= MEDIA_ENT_FL_DEFAULT; } else ret = 0; return ret; } int uvc_mc_register_entities(struct uvc_video_chain *chain) { struct uvc_entity *entity; int ret; list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_mc_init_entity(chain, entity); if (ret < 0) { dev_info(&chain->dev->udev->dev, "Failed to initialize entity for entity %u\n", entity->id); return ret; } } list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_mc_create_links(chain, entity); if (ret < 0) { dev_info(&chain->dev->udev->dev, "Failed to create links for entity %u\n", entity->id); return ret; } } return 0; } |
6 4 5 2 2 1 6 6 6 2 6 6 8 2 1 1 1 1 1 6 6 1 5 5 1 18 18 4 14 4 8 6 2 8 2 5 1 5 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 | // SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/media/radio/si470x/radio-si470x-usb.c * * USB driver for radios with Silicon Labs Si470x FM Radio Receivers * * Copyright (c) 2009 Tobias Lorenz <tobias.lorenz@gmx.net> */ /* * ToDo: * - add firmware download/update support */ /* driver definitions */ #define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>" #define DRIVER_CARD "Silicon Labs Si470x FM Radio" #define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers" #define DRIVER_VERSION "1.0.10" /* kernel includes */ #include <linux/usb.h> #include <linux/hid.h> #include <linux/slab.h> #include "radio-si470x.h" /* USB Device ID List */ static const struct usb_device_id si470x_usb_driver_id_table[] = { /* Silicon Labs USB FM Radio Reference Design */ { USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, /* ADS/Tech FM Radio Receiver (formerly Instant FM Music) */ { USB_DEVICE_AND_INTERFACE_INFO(0x06e1, 0xa155, USB_CLASS_HID, 0, 0) }, /* KWorld USB FM Radio SnapMusic Mobile 700 (FM700) */ { USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) }, /* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */ { USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) }, /* Axentia ALERT FM USB Receiver */ { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) }, /* Terminating entry */ { } }; MODULE_DEVICE_TABLE(usb, si470x_usb_driver_id_table); /************************************************************************** * Module Parameters **************************************************************************/ /* Radio Nr */ static int radio_nr = -1; module_param(radio_nr, int, 0444); MODULE_PARM_DESC(radio_nr, "Radio Nr"); /* USB timeout */ static unsigned int usb_timeout = 500; module_param(usb_timeout, uint, 0644); MODULE_PARM_DESC(usb_timeout, "USB timeout (ms): *500*"); /* RDS buffer blocks */ static unsigned int rds_buf = 100; module_param(rds_buf, uint, 0444); MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*"); /* RDS maximum block errors */ static unsigned short max_rds_errors = 1; /* 0 means 0 errors requiring correction */ /* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */ /* 2 means 3-5 errors requiring correction */ /* 3 means 6+ errors or errors in checkword, correction not possible */ module_param(max_rds_errors, ushort, 0644); MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); /************************************************************************** * USB HID Reports **************************************************************************/ /* Reports 1-16 give direct read/write access to the 16 Si470x registers */ /* with the (REPORT_ID - 1) corresponding to the register address across USB */ /* endpoint 0 using GET_REPORT and SET_REPORT */ #define REGISTER_REPORT_SIZE (RADIO_REGISTER_SIZE + 1) #define REGISTER_REPORT(reg) ((reg) + 1) /* Report 17 gives direct read/write access to the entire Si470x register */ /* map across endpoint 0 using GET_REPORT and SET_REPORT */ #define ENTIRE_REPORT_SIZE (RADIO_REGISTER_NUM * RADIO_REGISTER_SIZE + 1) #define ENTIRE_REPORT 17 /* Report 18 is used to send the lowest 6 Si470x registers up the HID */ /* interrupt endpoint 1 to Windows every 20 milliseconds for status */ #define RDS_REPORT_SIZE (RDS_REGISTER_NUM * RADIO_REGISTER_SIZE + 1) #define RDS_REPORT 18 /* Report 19: LED state */ #define LED_REPORT_SIZE 3 #define LED_REPORT 19 /* Report 19: stream */ #define STREAM_REPORT_SIZE 3 #define STREAM_REPORT 19 /* Report 20: scratch */ #define SCRATCH_PAGE_SIZE 63 #define SCRATCH_REPORT_SIZE (SCRATCH_PAGE_SIZE + 1) #define SCRATCH_REPORT 20 /* Reports 19-22: flash upgrade of the C8051F321 */ #define WRITE_REPORT_SIZE 4 #define WRITE_REPORT 19 #define FLASH_REPORT_SIZE 64 #define FLASH_REPORT 20 #define CRC_REPORT_SIZE 3 #define CRC_REPORT 21 #define RESPONSE_REPORT_SIZE 2 #define RESPONSE_REPORT 22 /* Report 23: currently unused, but can accept 60 byte reports on the HID */ /* interrupt out endpoint 2 every 1 millisecond */ #define UNUSED_REPORT 23 #define MAX_REPORT_SIZE 64 /************************************************************************** * Software/Hardware Versions from Scratch Page **************************************************************************/ #define RADIO_HW_VERSION 1 /************************************************************************** * LED State Definitions **************************************************************************/ #define LED_COMMAND 0x35 #define NO_CHANGE_LED 0x00 #define ALL_COLOR_LED 0x01 /* streaming state */ #define BLINK_GREEN_LED 0x02 /* connect state */ #define BLINK_RED_LED 0x04 #define BLINK_ORANGE_LED 0x10 /* disconnect state */ #define SOLID_GREEN_LED 0x20 /* tuning/seeking state */ #define SOLID_RED_LED 0x40 /* bootload state */ #define SOLID_ORANGE_LED 0x80 /************************************************************************** * Stream State Definitions **************************************************************************/ #define STREAM_COMMAND 0x36 #define STREAM_VIDPID 0x00 #define STREAM_AUDIO 0xff /************************************************************************** * Bootloader / Flash Commands **************************************************************************/ /* unique id sent to bootloader and required to put into a bootload state */ #define UNIQUE_BL_ID 0x34 /* mask for the flash data */ #define FLASH_DATA_MASK 0x55 /* bootloader commands */ #define GET_SW_VERSION_COMMAND 0x00 #define SET_PAGE_COMMAND 0x01 #define ERASE_PAGE_COMMAND 0x02 #define WRITE_PAGE_COMMAND 0x03 #define CRC_ON_PAGE_COMMAND 0x04 #define READ_FLASH_BYTE_COMMAND 0x05 #define RESET_DEVICE_COMMAND 0x06 #define GET_HW_VERSION_COMMAND 0x07 #define BLANK 0xff /* bootloader command responses */ #define COMMAND_OK 0x01 #define COMMAND_FAILED 0x02 #define COMMAND_PENDING 0x03 /************************************************************************** * General Driver Functions - REGISTER_REPORTs **************************************************************************/ /* * si470x_get_report - receive a HID report */ static int si470x_get_report(struct si470x_device *radio, void *buf, int size) { unsigned char *report = buf; int retval; retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, report[0], 2, buf, size, usb_timeout); if (retval < 0) dev_warn(&radio->intf->dev, "si470x_get_report: usb_control_msg returned %d\n", retval); return retval; } /* * si470x_set_report - send a HID report */ static int si470x_set_report(struct si470x_device *radio, void *buf, int size) { unsigned char *report = buf; int retval; retval = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), HID_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, report[0], 2, buf, size, usb_timeout); if (retval < 0) dev_warn(&radio->intf->dev, "si470x_set_report: usb_control_msg returned %d\n", retval); return retval; } /* * si470x_get_register - read register */ static int si470x_get_register(struct si470x_device *radio, int regnr) { int retval; radio->usb_buf[0] = REGISTER_REPORT(regnr); retval = si470x_get_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE); if (retval >= 0) radio->registers[regnr] = get_unaligned_be16(&radio->usb_buf[1]); return (retval < 0) ? -EINVAL : 0; } /* * si470x_set_register - write register */ static int si470x_set_register(struct si470x_device *radio, int regnr) { int retval; radio->usb_buf[0] = REGISTER_REPORT(regnr); put_unaligned_be16(radio->registers[regnr], &radio->usb_buf[1]); retval = si470x_set_report(radio, radio->usb_buf, REGISTER_REPORT_SIZE); return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * General Driver Functions - ENTIRE_REPORT **************************************************************************/ /* * si470x_get_all_registers - read entire registers */ static int si470x_get_all_registers(struct si470x_device *radio) { int retval; unsigned char regnr; radio->usb_buf[0] = ENTIRE_REPORT; retval = si470x_get_report(radio, radio->usb_buf, ENTIRE_REPORT_SIZE); if (retval >= 0) for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++) radio->registers[regnr] = get_unaligned_be16( &radio->usb_buf[regnr * RADIO_REGISTER_SIZE + 1]); return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * General Driver Functions - LED_REPORT **************************************************************************/ /* * si470x_set_led_state - sets the led state */ static int si470x_set_led_state(struct si470x_device *radio, unsigned char led_state) { int retval; radio->usb_buf[0] = LED_REPORT; radio->usb_buf[1] = LED_COMMAND; radio->usb_buf[2] = led_state; retval = si470x_set_report(radio, radio->usb_buf, LED_REPORT_SIZE); return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * General Driver Functions - SCRATCH_REPORT **************************************************************************/ /* * si470x_get_scratch_versions - gets the scratch page and version infos */ static int si470x_get_scratch_page_versions(struct si470x_device *radio) { int retval; radio->usb_buf[0] = SCRATCH_REPORT; retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE); if (retval < 0) dev_warn(&radio->intf->dev, "si470x_get_scratch: si470x_get_report returned %d\n", retval); else { radio->software_version = radio->usb_buf[1]; radio->hardware_version = radio->usb_buf[2]; } return (retval < 0) ? -EINVAL : 0; } /************************************************************************** * RDS Driver Functions **************************************************************************/ /* * si470x_int_in_callback - rds callback and processing function * * TODO: do we need to use mutex locks in some sections? */ static void si470x_int_in_callback(struct urb *urb) { struct si470x_device *radio = urb->context; int retval; unsigned char regnr; unsigned char blocknum; unsigned short bler; /* rds block errors */ unsigned short rds; unsigned char tmpbuf[3]; if (urb->status) { if (urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN) { return; } else { dev_warn(&radio->intf->dev, "non-zero urb status (%d)\n", urb->status); goto resubmit; /* Maybe we can recover. */ } } /* Sometimes the device returns len 0 packets */ if (urb->actual_length != RDS_REPORT_SIZE) goto resubmit; radio->registers[STATUSRSSI] = get_unaligned_be16(&radio->int_in_buffer[1]); if (radio->registers[STATUSRSSI] & STATUSRSSI_STC) complete(&radio->completion); if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)) { /* Update RDS registers with URB data */ for (regnr = 1; regnr < RDS_REGISTER_NUM; regnr++) radio->registers[STATUSRSSI + regnr] = get_unaligned_be16(&radio->int_in_buffer[ regnr * RADIO_REGISTER_SIZE + 1]); /* get rds blocks */ if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0) { /* No RDS group ready, better luck next time */ goto resubmit; } if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSS) == 0) { /* RDS decoder not synchronized */ goto resubmit; } for (blocknum = 0; blocknum < 4; blocknum++) { switch (blocknum) { default: bler = (radio->registers[STATUSRSSI] & STATUSRSSI_BLERA) >> 9; rds = radio->registers[RDSA]; break; case 1: bler = (radio->registers[READCHAN] & READCHAN_BLERB) >> 14; rds = radio->registers[RDSB]; break; case 2: bler = (radio->registers[READCHAN] & READCHAN_BLERC) >> 12; rds = radio->registers[RDSC]; break; case 3: bler = (radio->registers[READCHAN] & READCHAN_BLERD) >> 10; rds = radio->registers[RDSD]; break; } /* Fill the V4L2 RDS buffer */ put_unaligned_le16(rds, &tmpbuf); tmpbuf[2] = blocknum; /* offset name */ tmpbuf[2] |= blocknum << 3; /* received offset */ if (bler > max_rds_errors) tmpbuf[2] |= 0x80; /* uncorrectable errors */ else if (bler > 0) tmpbuf[2] |= 0x40; /* corrected error(s) */ /* copy RDS block to internal buffer */ memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3); radio->wr_index += 3; /* wrap write pointer */ if (radio->wr_index >= radio->buf_size) radio->wr_index = 0; /* check for overflow */ if (radio->wr_index == radio->rd_index) { /* increment and wrap read pointer */ radio->rd_index += 3; if (radio->rd_index >= radio->buf_size) radio->rd_index = 0; } } if (radio->wr_index != radio->rd_index) wake_up_interruptible(&radio->read_queue); } resubmit: /* Resubmit if we're still running. */ if (radio->int_in_running && radio->usbdev) { retval = usb_submit_urb(radio->int_in_urb, GFP_ATOMIC); if (retval) { dev_warn(&radio->intf->dev, "resubmitting urb failed (%d)", retval); radio->int_in_running = 0; } } radio->status_rssi_auto_update = radio->int_in_running; } static int si470x_fops_open(struct file *file) { return v4l2_fh_open(file); } static int si470x_fops_release(struct file *file) { return v4l2_fh_release(file); } static void si470x_usb_release(struct v4l2_device *v4l2_dev) { struct si470x_device *radio = container_of(v4l2_dev, struct si470x_device, v4l2_dev); usb_free_urb(radio->int_in_urb); v4l2_ctrl_handler_free(&radio->hdl); v4l2_device_unregister(&radio->v4l2_dev); kfree(radio->int_in_buffer); kfree(radio->buffer); kfree(radio->usb_buf); kfree(radio); } /************************************************************************** * Video4Linux Interface **************************************************************************/ /* * si470x_vidioc_querycap - query device capabilities */ static int si470x_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *capability) { struct si470x_device *radio = video_drvdata(file); strscpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strscpy(capability->card, DRIVER_CARD, sizeof(capability->card)); usb_make_path(radio->usbdev, capability->bus_info, sizeof(capability->bus_info)); return 0; } static int si470x_start_usb(struct si470x_device *radio) { int retval; /* initialize interrupt urb */ usb_fill_int_urb(radio->int_in_urb, radio->usbdev, usb_rcvintpipe(radio->usbdev, radio->int_in_endpoint->bEndpointAddress), radio->int_in_buffer, le16_to_cpu(radio->int_in_endpoint->wMaxPacketSize), si470x_int_in_callback, radio, radio->int_in_endpoint->bInterval); radio->int_in_running = 1; mb(); retval = usb_submit_urb(radio->int_in_urb, GFP_KERNEL); if (retval) { dev_info(&radio->intf->dev, "submitting int urb failed (%d)\n", retval); radio->int_in_running = 0; } radio->status_rssi_auto_update = radio->int_in_running; /* start radio */ retval = si470x_start(radio); if (retval < 0) return retval; v4l2_ctrl_handler_setup(&radio->hdl); return retval; } /************************************************************************** * USB Interface **************************************************************************/ /* * si470x_usb_driver_probe - probe for the device */ static int si470x_usb_driver_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct si470x_device *radio; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i, int_end_size, retval; unsigned char version_warning = 0; /* private data allocation and initialization */ radio = k |