22 22 60 4 1 3 2 56 1 56 5 1 55 44 1 54 25 1 53 1 52 1 59 48 24 46 12 10 46 19 14 48 44 44 2 42 1 41 7 41 41 41 41 39 2 40 39 44 18 129 129 137 137 137 136 137 13 13 13 13 13 13 137 8 8 8 8 8 8 8 8 137 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 | // SPDX-License-Identifier: GPL-2.0-only /* * VLAN netlink control interface * * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/module.h> #include <net/net_namespace.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include "vlan.h" static const struct nla_policy vlan_policy[IFLA_VLAN_MAX + 1] = { [IFLA_VLAN_ID] = { .type = NLA_U16 }, [IFLA_VLAN_FLAGS] = { .len = sizeof(struct ifla_vlan_flags) }, [IFLA_VLAN_EGRESS_QOS] = { .type = NLA_NESTED }, [IFLA_VLAN_INGRESS_QOS] = { .type = NLA_NESTED }, [IFLA_VLAN_PROTOCOL] = { .type = NLA_U16 }, }; static const struct nla_policy vlan_map_policy[IFLA_VLAN_QOS_MAX + 1] = { [IFLA_VLAN_QOS_MAPPING] = { .len = sizeof(struct ifla_vlan_qos_mapping) }, }; static inline int vlan_validate_qos_map(struct nlattr *attr) { if (!attr) return 0; return nla_validate_nested_deprecated(attr, IFLA_VLAN_QOS_MAX, vlan_map_policy, NULL); } static int vlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct ifla_vlan_flags *flags; u16 id; int err; if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { NL_SET_ERR_MSG_MOD(extack, "Invalid link address"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { NL_SET_ERR_MSG_MOD(extack, "Invalid link address"); return -EADDRNOTAVAIL; } } if (!data) { NL_SET_ERR_MSG_MOD(extack, "VLAN properties not specified"); return -EINVAL; } if (data[IFLA_VLAN_PROTOCOL]) { switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) { case htons(ETH_P_8021Q): case htons(ETH_P_8021AD): break; default: NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN protocol"); return -EPROTONOSUPPORT; } } if (data[IFLA_VLAN_ID]) { id = nla_get_u16(data[IFLA_VLAN_ID]); if (id >= VLAN_VID_MASK) { NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN id"); return -ERANGE; } } if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); if ((flags->flags & flags->mask) & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP | VLAN_FLAG_BRIDGE_BINDING)) { NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN flags"); return -EINVAL; } } err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]); if (err < 0) { NL_SET_ERR_MSG_MOD(extack, "Invalid ingress QOS map"); return err; } err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]); if (err < 0) { NL_SET_ERR_MSG_MOD(extack, "Invalid egress QOS map"); return err; } return 0; } static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct ifla_vlan_flags *flags; struct ifla_vlan_qos_mapping *m; struct nlattr *attr; int rem, err; if (data[IFLA_VLAN_FLAGS]) { flags = nla_data(data[IFLA_VLAN_FLAGS]); err = vlan_dev_change_flags(dev, flags->flags, flags->mask); if (err) return err; } if (data[IFLA_VLAN_INGRESS_QOS]) { nla_for_each_nested_type(attr, IFLA_VLAN_QOS_MAPPING, data[IFLA_VLAN_INGRESS_QOS], rem) { m = nla_data(attr); vlan_dev_set_ingress_priority(dev, m->to, m->from); } } if (data[IFLA_VLAN_EGRESS_QOS]) { nla_for_each_nested_type(attr, IFLA_VLAN_QOS_MAPPING, data[IFLA_VLAN_EGRESS_QOS], rem) { m = nla_data(attr); err = vlan_dev_set_egress_priority(dev, m->from, m->to); if (err) return err; } } return 0; } static int vlan_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net *link_net = rtnl_newlink_link_net(params); struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct nlattr **data = params->data; struct nlattr **tb = params->tb; struct net_device *real_dev; unsigned int max_mtu; __be16 proto; int err; if (!data[IFLA_VLAN_ID]) { NL_SET_ERR_MSG_MOD(extack, "VLAN id not specified"); return -EINVAL; } if (!tb[IFLA_LINK]) { NL_SET_ERR_MSG_MOD(extack, "link not specified"); return -EINVAL; } real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) { NL_SET_ERR_MSG_MOD(extack, "link does not exist"); return -ENODEV; } proto = nla_get_be16_default(data[IFLA_VLAN_PROTOCOL], htons(ETH_P_8021Q)); vlan->vlan_proto = proto; vlan->vlan_id = nla_get_u16(data[IFLA_VLAN_ID]); vlan->real_dev = real_dev; dev->priv_flags |= (real_dev->priv_flags & IFF_XMIT_DST_RELEASE); vlan->flags = VLAN_FLAG_REORDER_HDR; err = vlan_check_real_dev(real_dev, vlan->vlan_proto, vlan->vlan_id, extack); if (err < 0) return err; max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN : real_dev->mtu; if (!tb[IFLA_MTU]) dev->mtu = max_mtu; else if (dev->mtu > max_mtu) return -EINVAL; /* Note: If this initial vlan_changelink() fails, we need * to call vlan_dev_free_egress_priority() to free memory. */ err = vlan_changelink(dev, tb, data, extack); if (!err) err = register_vlan_dev(dev, extack); if (err) vlan_dev_free_egress_priority(dev); return err; } static inline size_t vlan_qos_map_size(unsigned int n) { if (n == 0) return 0; /* IFLA_VLAN_{EGRESS,INGRESS}_QOS + n * IFLA_VLAN_QOS_MAPPING */ return nla_total_size(sizeof(struct nlattr)) + nla_total_size(sizeof(struct ifla_vlan_qos_mapping)) * n; } static size_t vlan_get_size(const struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ nla_total_size(2) + /* IFLA_VLAN_ID */ nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */ vlan_qos_map_size(vlan->nr_ingress_mappings) + vlan_qos_map_size(vlan->nr_egress_mappings); } static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_priority_tci_mapping *pm; struct ifla_vlan_flags f; struct ifla_vlan_qos_mapping m; struct nlattr *nest; unsigned int i; if (nla_put_be16(skb, IFLA_VLAN_PROTOCOL, vlan->vlan_proto) || nla_put_u16(skb, IFLA_VLAN_ID, vlan->vlan_id)) goto nla_put_failure; if (vlan->flags) { f.flags = vlan->flags; f.mask = ~0; if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f)) goto nla_put_failure; } if (vlan->nr_ingress_mappings) { nest = nla_nest_start_noflag(skb, IFLA_VLAN_INGRESS_QOS); if (nest == NULL) goto nla_put_failure; for (i = 0; i < ARRAY_SIZE(vlan->ingress_priority_map); i++) { if (!vlan->ingress_priority_map[i]) continue; m.from = i; m.to = vlan->ingress_priority_map[i]; if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, sizeof(m), &m)) goto nla_put_failure; } nla_nest_end(skb, nest); } if (vlan->nr_egress_mappings) { nest = nla_nest_start_noflag(skb, IFLA_VLAN_EGRESS_QOS); if (nest == NULL) goto nla_put_failure; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { for (pm = vlan->egress_priority_map[i]; pm; pm = pm->next) { if (!pm->vlan_qos) continue; m.from = pm->priority; m.to = (pm->vlan_qos >> 13) & 0x7; if (nla_put(skb, IFLA_VLAN_QOS_MAPPING, sizeof(m), &m)) goto nla_put_failure; } } nla_nest_end(skb, nest); } return 0; nla_put_failure: return -EMSGSIZE; } static struct net *vlan_get_link_net(const struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; return dev_net(real_dev); } struct rtnl_link_ops vlan_link_ops __read_mostly = { .kind = "vlan", .maxtype = IFLA_VLAN_MAX, .policy = vlan_policy, .priv_size = sizeof(struct vlan_dev_priv), .setup = vlan_setup, .validate = vlan_validate, .newlink = vlan_newlink, .changelink = vlan_changelink, .dellink = unregister_vlan_dev, .get_size = vlan_get_size, .fill_info = vlan_fill_info, .get_link_net = vlan_get_link_net, }; int __init vlan_netlink_init(void) { return rtnl_link_register(&vlan_link_ops); } void __exit vlan_netlink_fini(void) { rtnl_link_unregister(&vlan_link_ops); } MODULE_ALIAS_RTNL_LINK("vlan"); |
2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 | /* * IPv6 specific functions of netfilter core * * Rusty Russell (C) 2000 -- This code is GPL. * Patrick McHardy (C) 2006-2012 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ipv6.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/export.h> #include <net/addrconf.h> #include <net/dst.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/xfrm.h> #include <net/netfilter/nf_queue.h> #include <net/netfilter/nf_conntrack_bridge.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #include "../bridge/br_private.h" int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct sock *sk = sk_to_full_sk(sk_partial); struct net_device *dev = skb_dst(skb)->dev; struct flow_keys flkeys; unsigned int hh_len; struct dst_entry *dst; int strict = (ipv6_addr_type(&iph->daddr) & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); struct flowi6 fl6 = { .flowi6_l3mdev = l3mdev_master_ifindex(dev), .flowi6_mark = skb->mark, .flowi6_uid = sock_net_uid(net, sk), .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), }; int err; if (sk && sk->sk_bound_dev_if) fl6.flowi6_oif = sk->sk_bound_dev_if; else if (strict) fl6.flowi6_oif = dev->ifindex; fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys); dst = ip6_route_output(net, sk, &fl6); err = dst->error; if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); net_dbg_ratelimited("ip6_route_me_harder: No more route\n"); dst_release(dst); return err; } /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, dst); #ifdef CONFIG_XFRM if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && xfrm_decode_session(net, skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } EXPORT_SYMBOL(ip6_route_me_harder); static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->state.hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) return ip6_route_me_harder(entry->state.net, entry->state.sk, skb); } return 0; } int __nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { static const struct ipv6_pinfo fake_pinfo; static const struct inet_sock fake_sk = { /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ .sk.sk_bound_dev_if = 1, .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, }; const void *sk = strict ? &fake_sk : NULL; struct dst_entry *result; int err; result = ip6_route_output(net, sk, &fl->u.ip6); err = result->error; if (err) dst_release(result); else *dst = result; return err; } EXPORT_SYMBOL_GPL(__nf_ip6_route); int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; u8 tstamp_type = skb->tstamp_type; ktime_t tstamp = skb->tstamp; struct ip6_frag_state state; u8 *prevhdr, nexthdr = 0; unsigned int mtu, hlen; int hroom, err = 0; __be32 frag_id; err = ip6_find_1stfragopt(skb, &prevhdr); if (err < 0) goto blackhole; hlen = err; nexthdr = *prevhdr; mtu = skb->dev->mtu; if (frag_max_size > mtu || frag_max_size < IPV6_MIN_MTU) goto blackhole; mtu = frag_max_size; if (mtu < hlen + sizeof(struct frag_hdr) + 8) goto blackhole; mtu -= hlen + sizeof(struct frag_hdr); frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr); if (skb->ip_summed == CHECKSUM_PARTIAL && (err = skb_checksum_help(skb))) goto blackhole; hroom = LL_RESERVED_SPACE(skb->dev); if (skb_has_frag_list(skb)) { unsigned int first_len = skb_pagelen(skb); struct ip6_fraglist_iter iter; struct sk_buff *frag2; if (first_len - hlen > mtu) goto blackhole; if (skb_cloned(skb) || skb_headroom(skb) < (hroom + sizeof(struct frag_hdr))) goto slow_path; skb_walk_frags(skb, frag2) { if (frag2->len > mtu) goto blackhole; /* Partially cloned skb? */ if (skb_shared(frag2) || skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr))) goto slow_path; } err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id, &iter); if (err < 0) goto blackhole; for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (iter.frag) ip6_fraglist_prepare(skb, &iter); skb_set_delivery_time(skb, tstamp, tstamp_type); err = output(net, sk, data, skb); if (err || !iter.frag) break; skb = ip6_fraglist_next(&iter); } kfree(iter.tmp_hdr); if (!err) return 0; kfree_skb_list(iter.frag); return err; } slow_path: /* This is a linearized skbuff, the original geometry is lost for us. * This may also be a clone skbuff, we could preserve the geometry for * the copies but probably not worth the effort. */ ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom, LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id, &state); while (state.left > 0) { struct sk_buff *skb2; skb2 = ip6_frag_next(skb, &state); if (IS_ERR(skb2)) { err = PTR_ERR(skb2); goto blackhole; } skb_set_delivery_time(skb2, tstamp, tstamp_type); err = output(net, sk, data, skb2); if (err) goto blackhole; } consume_skb(skb); return err; blackhole: kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(br_ip6_fragment); static const struct nf_ipv6_ops ipv6ops = { #if IS_MODULE(CONFIG_IPV6) .chk_addr = ipv6_chk_addr, .route_me_harder = ip6_route_me_harder, .dev_get_saddr = ipv6_dev_get_saddr, .route = __nf_ip6_route, #if IS_ENABLED(CONFIG_SYN_COOKIES) .cookie_init_sequence = __cookie_v6_init_sequence, .cookie_v6_check = __cookie_v6_check, #endif #endif .route_input = ip6_route_input, .fragment = ip6_fragment, .reroute = nf_ip6_reroute, #if IS_MODULE(CONFIG_IPV6) .br_fragment = br_ip6_fragment, #endif }; int __init ipv6_netfilter_init(void) { RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); return 0; } /* This can be called from inet6_init() on errors, so it cannot * be marked __exit. -DaveM */ void ipv6_netfilter_fini(void) { RCU_INIT_POINTER(nf_ipv6_ops, NULL); } |
9 1 1 1 1 1 1 9 9 9 9 1 1 9 1 9 9 9 9 9 9 9 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 | // SPDX-License-Identifier: GPL-2.0-only /* * Ram backed block device driver. * * Copyright (C) 2007 Nick Piggin * Copyright (C) 2007 Novell Inc. * * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright * of their respective owners. */ #include <linux/init.h> #include <linux/initrd.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/major.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/highmem.h> #include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/xarray.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/backing-dev.h> #include <linux/debugfs.h> #include <linux/uaccess.h> /* * Each block ramdisk device has a xarray brd_pages of pages that stores * the pages containing the block device's contents. */ struct brd_device { int brd_number; struct gendisk *brd_disk; struct list_head brd_list; /* * Backing store of pages. This is the contents of the block device. */ struct xarray brd_pages; u64 brd_nr_pages; }; /* * Look up and return a brd's page for a given sector. */ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) { return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT); } /* * Insert a new page for a given sector, if one does not already exist. */ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector, blk_opf_t opf) __releases(rcu) __acquires(rcu) { gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO; struct page *page, *ret; rcu_read_unlock(); page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM); rcu_read_lock(); if (!page) return ERR_PTR(-ENOMEM); xa_lock(&brd->brd_pages); ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL, page, gfp); if (ret) { xa_unlock(&brd->brd_pages); __free_page(page); if (xa_is_err(ret)) return ERR_PTR(xa_err(ret)); return ret; } brd->brd_nr_pages++; xa_unlock(&brd->brd_pages); return page; } /* * Free all backing store pages and xarray. This must only be called when * there are no other users of the device. */ static void brd_free_pages(struct brd_device *brd) { struct page *page; pgoff_t idx; xa_for_each(&brd->brd_pages, idx, page) { __free_page(page); cond_resched(); } xa_destroy(&brd->brd_pages); } /* * Process a single segment. The segment is capped to not cross page boundaries * in both the bio and the brd backing memory. */ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio) { struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter); sector_t sector = bio->bi_iter.bi_sector; u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT; blk_opf_t opf = bio->bi_opf; struct page *page; void *kaddr; bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); rcu_read_lock(); page = brd_lookup_page(brd, sector); if (!page && op_is_write(opf)) { page = brd_insert_page(brd, sector, opf); if (IS_ERR(page)) goto out_error; } kaddr = bvec_kmap_local(&bv); if (op_is_write(opf)) { memcpy_to_page(page, offset, kaddr, bv.bv_len); } else { if (page) memcpy_from_page(kaddr, page, offset, bv.bv_len); else memset(kaddr, 0, bv.bv_len); } kunmap_local(kaddr); rcu_read_unlock(); bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len); return true; out_error: rcu_read_unlock(); if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT)) bio_wouldblock_error(bio); else bio_io_error(bio); return false; } static void brd_free_one_page(struct rcu_head *head) { struct page *page = container_of(head, struct page, rcu_head); __free_page(page); } static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) { sector_t aligned_sector = round_up(sector, PAGE_SECTORS); sector_t aligned_end = round_down( sector + (size >> SECTOR_SHIFT), PAGE_SECTORS); struct page *page; if (aligned_end <= aligned_sector) return; xa_lock(&brd->brd_pages); while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) { page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); if (page) { call_rcu(&page->rcu_head, brd_free_one_page); brd->brd_nr_pages--; } aligned_sector += PAGE_SECTORS; } xa_unlock(&brd->brd_pages); } static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; if (unlikely(op_is_discard(bio->bi_opf))) { brd_do_discard(brd, bio->bi_iter.bi_sector, bio->bi_iter.bi_size); bio_endio(bio); return; } do { if (!brd_rw_bvec(brd, bio)) return; } while (bio->bi_iter.bi_size); bio_endio(bio); } static const struct block_device_operations brd_fops = { .owner = THIS_MODULE, .submit_bio = brd_submit_bio, }; /* * And now the modules code and kernel interface. */ static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT; module_param(rd_nr, int, 0444); MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE; module_param(rd_size, ulong, 0444); MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); static int max_part = 1; module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices"); MODULE_DESCRIPTION("Ram backed block device driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); MODULE_ALIAS("rd"); #ifndef MODULE /* Legacy boot options - nonmodular */ static int __init ramdisk_size(char *str) { rd_size = simple_strtol(str, NULL, 0); return 1; } __setup("ramdisk_size=", ramdisk_size); #endif /* * The device scheme is derived from loop.c. Keep them in synch where possible * (should share code eventually). */ static LIST_HEAD(brd_devices); static DEFINE_MUTEX(brd_devices_mutex); static struct dentry *brd_debugfs_dir; static struct brd_device *brd_find_or_alloc_device(int i) { struct brd_device *brd; mutex_lock(&brd_devices_mutex); list_for_each_entry(brd, &brd_devices, brd_list) { if (brd->brd_number == i) { mutex_unlock(&brd_devices_mutex); return ERR_PTR(-EEXIST); } } brd = kzalloc(sizeof(*brd), GFP_KERNEL); if (!brd) { mutex_unlock(&brd_devices_mutex); return ERR_PTR(-ENOMEM); } brd->brd_number = i; list_add_tail(&brd->brd_list, &brd_devices); mutex_unlock(&brd_devices_mutex); return brd; } static void brd_free_device(struct brd_device *brd) { mutex_lock(&brd_devices_mutex); list_del(&brd->brd_list); mutex_unlock(&brd_devices_mutex); kfree(brd); } static int brd_alloc(int i) { struct brd_device *brd; struct gendisk *disk; char buf[DISK_NAME_LEN]; int err = -ENOMEM; struct queue_limits lim = { /* * This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN * (This is only a problem on very small devices <= 4M, * otherwise fdisk will align on 1M. Regardless this call * is harmless) */ .physical_block_size = PAGE_SIZE, .max_hw_discard_sectors = UINT_MAX, .max_discard_segments = 1, .discard_granularity = PAGE_SIZE, .features = BLK_FEAT_SYNCHRONOUS | BLK_FEAT_NOWAIT, }; brd = brd_find_or_alloc_device(i); if (IS_ERR(brd)) return PTR_ERR(brd); xa_init(&brd->brd_pages); snprintf(buf, DISK_NAME_LEN, "ram%d", i); if (!IS_ERR_OR_NULL(brd_debugfs_dir)) debugfs_create_u64(buf, 0444, brd_debugfs_dir, &brd->brd_nr_pages); disk = brd->brd_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_free_dev; } disk->major = RAMDISK_MAJOR; disk->first_minor = i * max_part; disk->minors = max_part; disk->fops = &brd_fops; disk->private_data = brd; strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); err = add_disk(disk); if (err) goto out_cleanup_disk; return 0; out_cleanup_disk: put_disk(disk); out_free_dev: brd_free_device(brd); return err; } static void brd_probe(dev_t dev) { brd_alloc(MINOR(dev) / max_part); } static void brd_cleanup(void) { struct brd_device *brd, *next; debugfs_remove_recursive(brd_debugfs_dir); list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { del_gendisk(brd->brd_disk); put_disk(brd->brd_disk); brd_free_pages(brd); brd_free_device(brd); } } static inline void brd_check_and_reset_par(void) { if (unlikely(!max_part)) max_part = 1; /* * make sure 'max_part' can be divided exactly by (1U << MINORBITS), * otherwise, it is possiable to get same dev_t when adding partitions. */ if ((1U << MINORBITS) % max_part != 0) max_part = 1UL << fls(max_part); if (max_part > DISK_MAX_PARTS) { pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n", DISK_MAX_PARTS, DISK_MAX_PARTS); max_part = DISK_MAX_PARTS; } } static int __init brd_init(void) { int err, i; /* * brd module now has a feature to instantiate underlying device * structure on-demand, provided that there is an access dev node. * * (1) if rd_nr is specified, create that many upfront. else * it defaults to CONFIG_BLK_DEV_RAM_COUNT * (2) User can further extend brd devices by create dev node themselves * and have kernel automatically instantiate actual device * on-demand. Example: * mknod /path/devnod_name b 1 X # 1 is the rd major * fdisk -l /path/devnod_name * If (X / max_part) was not already created it will be created * dynamically. */ brd_check_and_reset_par(); brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) { err = -EIO; goto out_free; } for (i = 0; i < rd_nr; i++) brd_alloc(i); pr_info("brd: module loaded\n"); return 0; out_free: brd_cleanup(); pr_info("brd: module NOT loaded !!!\n"); return err; } static void __exit brd_exit(void) { unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); brd_cleanup(); pr_info("brd: module unloaded\n"); } module_init(brd_init); module_exit(brd_exit); |
9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #ifndef __GLOCK_DOT_H__ #define __GLOCK_DOT_H__ #include <linux/sched.h> #include <linux/parser.h> #include "incore.h" #include "util.h" /* Options for hostdata parser */ enum { Opt_jid, Opt_id, Opt_first, Opt_nodir, Opt_err, }; /* * lm_lockname types */ #define LM_TYPE_RESERVED 0x00 #define LM_TYPE_NONDISK 0x01 #define LM_TYPE_INODE 0x02 #define LM_TYPE_RGRP 0x03 #define LM_TYPE_META 0x04 #define LM_TYPE_IOPEN 0x05 #define LM_TYPE_FLOCK 0x06 #define LM_TYPE_PLOCK 0x07 #define LM_TYPE_QUOTA 0x08 #define LM_TYPE_JOURNAL 0x09 /* * lm_lock() states * * SHARED is compatible with SHARED, not with DEFERRED or EX. * DEFERRED is compatible with DEFERRED, not with SHARED or EX. */ #define LM_ST_UNLOCKED 0 #define LM_ST_EXCLUSIVE 1 #define LM_ST_DEFERRED 2 #define LM_ST_SHARED 3 /* * lm_lock() flags * * LM_FLAG_TRY * Don't wait to acquire the lock if it can't be granted immediately. * * LM_FLAG_TRY_1CB * Send one blocking callback if TRY is set and the lock is not granted. * * LM_FLAG_NOEXP * GFS sets this flag on lock requests it makes while doing journal recovery. * These special requests should not be blocked due to the recovery like * ordinary locks would be. * * LM_FLAG_ANY * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may * also be granted in SHARED. The preferred state is whichever is compatible * with other granted locks, or the specified state if no other locks exist. * * LM_FLAG_NODE_SCOPE * This holder agrees to share the lock within this node. In other words, * the glock is held in EX mode according to DLM, but local holders on the * same node can share it. */ #define LM_FLAG_TRY 0x0001 #define LM_FLAG_TRY_1CB 0x0002 #define LM_FLAG_NOEXP 0x0004 #define LM_FLAG_ANY 0x0008 #define LM_FLAG_NODE_SCOPE 0x0020 #define GL_ASYNC 0x0040 #define GL_EXACT 0x0080 #define GL_SKIP 0x0100 #define GL_NOPID 0x0200 #define GL_NOCACHE 0x0400 #define GL_NOBLOCK 0x0800 /* * lm_async_cb return flags * * LM_OUT_ST_MASK * Masks the lower two bits of lock state in the returned value. * * LM_OUT_CANCELED * The lock request was canceled. * */ #define LM_OUT_ST_MASK 0x00000003 #define LM_OUT_CANCELED 0x00000008 #define LM_OUT_ERROR 0x00000004 /* * lm_recovery_done() messages */ #define LM_RD_GAVEUP 308 #define LM_RD_SUCCESS 309 #define GLR_TRYFAILED 13 #define GL_GLOCK_MAX_HOLD (long)(HZ / 5) #define GL_GLOCK_DFT_HOLD (long)(HZ / 5) #define GL_GLOCK_MIN_HOLD (long)(10) #define GL_GLOCK_HOLD_INCR (long)(HZ / 20) #define GL_GLOCK_HOLD_DECR (long)(HZ / 40) struct lm_lockops { const char *lm_proto_name; int (*lm_mount) (struct gfs2_sbd *sdp, const char *table); void (*lm_first_done) (struct gfs2_sbd *sdp); void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid, unsigned int result); void (*lm_unmount) (struct gfs2_sbd *sdp); void (*lm_withdraw) (struct gfs2_sbd *sdp); void (*lm_put_lock) (struct gfs2_glock *gl); int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, unsigned int flags); void (*lm_cancel) (struct gfs2_glock *gl); const match_table_t *lm_tokens; }; struct gfs2_glock_aspace { struct gfs2_glock glock; struct address_space mapping; }; static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) { struct gfs2_holder *gh; struct pid *pid; /* Look in glock's list of holders for one with current task as owner */ spin_lock(&gl->gl_lockref.lock); pid = task_pid(current); list_for_each_entry(gh, &gl->gl_holders, gh_list) { if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) break; if (gh->gh_owner_pid == pid) goto out; } gh = NULL; out: spin_unlock(&gl->gl_lockref.lock); return gh; } static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) { if (gl->gl_ops->go_flags & GLOF_ASPACE) { struct gfs2_glock_aspace *gla = container_of(gl, struct gfs2_glock_aspace, glock); return &gla->mapping; } return NULL; } int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp); struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl); void gfs2_glock_put(struct gfs2_glock *gl); void gfs2_glock_put_async(struct gfs2_glock *gl); void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, struct gfs2_holder *gh, unsigned long ip); static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, struct gfs2_holder *gh) { __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); } void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh); void gfs2_holder_uninit(struct gfs2_holder *gh); int gfs2_glock_nq(struct gfs2_holder *gh); int gfs2_glock_poll(struct gfs2_holder *gh); int gfs2_instantiate(struct gfs2_holder *gh); int gfs2_glock_holder_ready(struct gfs2_holder *gh); int gfs2_glock_wait(struct gfs2_holder *gh); int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); void gfs2_glock_dq(struct gfs2_holder *gh); void gfs2_glock_dq_wait(struct gfs2_holder *gh); void gfs2_glock_dq_uninit(struct gfs2_holder *gh); int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, unsigned int state, u16 flags, struct gfs2_holder *gh); int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid); #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ gfs2_dump_glock(NULL, gl, true); \ BUG(); } } while(0) #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ gfs2_dump_glock(NULL, gl, true); \ gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \ while (0) #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ gfs2_dump_glock(NULL, gl, true); \ gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \ while (0) __printf(2, 3) void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); /** * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock * @gl: the glock * @state: the state we're requesting * @flags: the modifier flags * @gh: the holder structure * * Returns: 0, GLR_*, or errno */ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, unsigned int state, u16 flags, struct gfs2_holder *gh) { int error; __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); error = gfs2_glock_nq(gh); if (error) gfs2_holder_uninit(gh); return error; } void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); void gfs2_glock_complete(struct gfs2_glock *gl, int ret); bool gfs2_queue_try_to_evict(struct gfs2_glock *gl); bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later); void gfs2_cancel_delete_work(struct gfs2_glock *gl); void gfs2_flush_delete_work(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); void gfs2_gl_dq_holders(struct gfs2_sbd *sdp); void gfs2_glock_thaw(struct gfs2_sbd *sdp); void gfs2_glock_free(struct gfs2_glock *gl); void gfs2_glock_free_later(struct gfs2_glock *gl); int __init gfs2_glock_init(void); void gfs2_glock_exit(void); void gfs2_create_debugfs_file(struct gfs2_sbd *sdp); void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp); void gfs2_register_debugfs(void); void gfs2_unregister_debugfs(void); void glock_set_object(struct gfs2_glock *gl, void *object); void glock_clear_object(struct gfs2_glock *gl, void *object); extern const struct lm_lockops gfs2_dlm_ops; static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) { gh->gh_gl = NULL; } static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) { return gh->gh_gl; } static inline bool gfs2_holder_queued(struct gfs2_holder *gh) { return !list_empty(&gh->gh_list); } void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); static inline bool glock_needs_demote(struct gfs2_glock *gl) { return (test_bit(GLF_DEMOTE, &gl->gl_flags) || test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); } #endif /* __GLOCK_DOT_H__ */ |
7 7 7 6 7 5 7 7 2 2 7 7 29 7 7 7 7 6 6 11 9 11 11 11 18 16 2 16 2 1 1 1 15 15 15 15 15 15 15 15 15 2 2 15 15 18 2 1 1 1 4 11 11 11 10 10 10 10 10 10 10 10 10 10 10 10 8 8 2 2 2 2 2 2 2 2 2 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include "queueing.h" #include "socket.h" #include "timers.h" #include "device.h" #include "ratelimiter.h" #include "peer.h" #include "messages.h" #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/if_arp.h> #include <linux/icmp.h> #include <linux/suspend.h> #include <net/dst_metadata.h> #include <net/gso.h> #include <net/icmp.h> #include <net/rtnetlink.h> #include <net/ip_tunnels.h> #include <net/addrconf.h> static LIST_HEAD(device_list); static int wg_open(struct net_device *dev) { struct in_device *dev_v4 = __in_dev_get_rtnl(dev); struct inet6_dev *dev_v6 = __in6_dev_get(dev); struct wg_device *wg = netdev_priv(dev); struct wg_peer *peer; int ret; if (dev_v4) { /* At some point we might put this check near the ip_rt_send_ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar * to the current secpath check. */ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; } if (dev_v6) dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; mutex_lock(&wg->device_update_lock); ret = wg_socket_init(wg, wg->incoming_port); if (ret < 0) goto out; list_for_each_entry(peer, &wg->peer_list, peer_list) { wg_packet_send_staged_packets(peer); if (peer->persistent_keepalive_interval) wg_packet_send_keepalive(peer); } out: mutex_unlock(&wg->device_update_lock); return ret; } static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data) { struct wg_device *wg; struct wg_peer *peer; /* If the machine is constantly suspending and resuming, as part of * its normal operation rather than as a somewhat rare event, then we * don't actually want to clear keys. */ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)) return 0; if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) return 0; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { timer_delete(&peer->timer_zero_key_material); wg_noise_handshake_clear(&peer->handshake); wg_noise_keypairs_clear(&peer->keypairs); } mutex_unlock(&wg->device_update_lock); } rtnl_unlock(); rcu_barrier(); return 0; } static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data) { struct wg_device *wg; struct wg_peer *peer; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) wg_noise_expire_current_peer_keypairs(peer); mutex_unlock(&wg->device_update_lock); } rtnl_unlock(); return 0; } static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification }; static int wg_stop(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); struct wg_peer *peer; struct sk_buff *skb; mutex_lock(&wg->device_update_lock); list_for_each_entry(peer, &wg->peer_list, peer_list) { wg_packet_purge_staged_packets(peer); wg_timers_stop(peer); wg_noise_handshake_clear(&peer->handshake); wg_noise_keypairs_clear(&peer->keypairs); wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); } mutex_unlock(&wg->device_update_lock); while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL) kfree_skb(skb); atomic_set(&wg->handshake_queue_len, 0); wg_socket_reinit(wg, NULL, NULL); return 0; } static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); struct sk_buff_head packets; struct wg_peer *peer; struct sk_buff *next; sa_family_t family; u32 mtu; int ret; if (unlikely(!wg_check_packet_protocol(skb))) { ret = -EPROTONOSUPPORT; net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); goto err; } peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); if (unlikely(!peer)) { ret = -ENOKEY; if (skb->protocol == htons(ETH_P_IP)) net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", dev->name, &ip_hdr(skb)->daddr); else if (skb->protocol == htons(ETH_P_IPV6)) net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", dev->name, &ipv6_hdr(skb)->daddr); goto err_icmp; } family = READ_ONCE(peer->endpoint.addr.sa_family); if (unlikely(family != AF_INET && family != AF_INET6)) { ret = -EDESTADDRREQ; net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", dev->name, peer->internal_id); goto err_peer; } mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; __skb_queue_head_init(&packets); if (!skb_is_gso(skb)) { skb_mark_not_on_list(skb); } else { struct sk_buff *segs = skb_gso_segment(skb, 0); if (IS_ERR(segs)) { ret = PTR_ERR(segs); goto err_peer; } dev_kfree_skb(skb); skb = segs; } skb_list_walk_safe(skb, skb, next) { skb_mark_not_on_list(skb); skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) continue; /* We only need to keep the original dst around for icmp, * so at this point we're in a position to drop it. */ skb_dst_drop(skb); PACKET_CB(skb)->mtu = mtu; __skb_queue_tail(&packets, skb); } spin_lock_bh(&peer->staged_packet_queue.lock); /* If the queue is getting too big, we start removing the oldest packets * until it's small again. We do this before adding the new packet, so * we don't remove GSO segments that are in excess. */ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); DEV_STATS_INC(dev, tx_dropped); } skb_queue_splice_tail(&packets, &peer->staged_packet_queue); spin_unlock_bh(&peer->staged_packet_queue.lock); wg_packet_send_staged_packets(peer); wg_peer_put(peer); return NETDEV_TX_OK; err_peer: wg_peer_put(peer); err_icmp: if (skb->protocol == htons(ETH_P_IP)) icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); else if (skb->protocol == htons(ETH_P_IPV6)) icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); err: DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); return ret; } static const struct net_device_ops netdev_ops = { .ndo_open = wg_open, .ndo_stop = wg_stop, .ndo_start_xmit = wg_xmit, }; static void wg_destruct(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); rtnl_lock(); list_del(&wg->device_list); rtnl_unlock(); mutex_lock(&wg->device_update_lock); rcu_assign_pointer(wg->creating_net, NULL); wg->incoming_port = 0; wg_socket_reinit(wg, NULL, NULL); /* The final references are cleared in the below calls to destroy_workqueue. */ wg_peer_remove_all(wg); destroy_workqueue(wg->handshake_receive_wq); destroy_workqueue(wg->handshake_send_wq); destroy_workqueue(wg->packet_crypt_wq); wg_packet_queue_free(&wg->handshake_queue, true); wg_packet_queue_free(&wg->decrypt_queue, false); wg_packet_queue_free(&wg->encrypt_queue, false); rcu_barrier(); /* Wait for all the peers to be actually freed. */ wg_ratelimiter_uninit(); memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); kvfree(wg->index_hashtable); kvfree(wg->peer_hashtable); mutex_unlock(&wg->device_update_lock); pr_debug("%s: Interface destroyed\n", dev->name); free_netdev(dev); } static const struct device_type device_type = { .name = KBUILD_MODNAME }; static void wg_setup(struct net_device *dev) { struct wg_device *wg = netdev_priv(dev); enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); dev->netdev_ops = &netdev_ops; dev->header_ops = &ip_tunnel_header_ops; dev->hard_header_len = 0; dev->addr_len = 0; dev->needed_headroom = DATA_PACKET_HEAD_ROOM; dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; dev->features |= WG_NETDEV_FEATURES; dev->hw_features |= WG_NETDEV_FEATURES; dev->hw_enc_features |= WG_NETDEV_FEATURES; dev->mtu = ETH_DATA_LEN - overhead; dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; SET_NETDEV_DEVTYPE(dev, &device_type); /* We need to keep the dst around in case of icmp replies. */ netif_keep_dst(dev); netif_set_tso_max_size(dev, GSO_MAX_SIZE); wg->dev = dev; } static int wg_newlink(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net *link_net = rtnl_newlink_link_net(params); struct wg_device *wg = netdev_priv(dev); int ret = -ENOMEM; rcu_assign_pointer(wg->creating_net, link_net); init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); wg_allowedips_init(&wg->peer_allowedips); wg_cookie_checker_init(&wg->cookie_checker, wg); INIT_LIST_HEAD(&wg->peer_list); wg->device_update_gen = 1; wg->peer_hashtable = wg_pubkey_hashtable_alloc(); if (!wg->peer_hashtable) return ret; wg->index_hashtable = wg_index_hashtable_alloc(); if (!wg->index_hashtable) goto err_free_peer_hashtable; wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); if (!wg->handshake_receive_wq) goto err_free_index_hashtable; wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); if (!wg->handshake_send_wq) goto err_destroy_handshake_receive; wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); if (!wg->packet_crypt_wq) goto err_destroy_handshake_send; ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, MAX_QUEUED_PACKETS); if (ret < 0) goto err_destroy_packet_crypt; ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, MAX_QUEUED_PACKETS); if (ret < 0) goto err_free_encrypt_queue; ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker, MAX_QUEUED_INCOMING_HANDSHAKES); if (ret < 0) goto err_free_decrypt_queue; ret = wg_ratelimiter_init(); if (ret < 0) goto err_free_handshake_queue; dev_set_threaded(dev, true); ret = register_netdevice(dev); if (ret < 0) goto err_uninit_ratelimiter; list_add(&wg->device_list, &device_list); /* We wait until the end to assign priv_destructor, so that * register_netdevice doesn't call it for us if it fails. */ dev->priv_destructor = wg_destruct; pr_debug("%s: Interface created\n", dev->name); return ret; err_uninit_ratelimiter: wg_ratelimiter_uninit(); err_free_handshake_queue: wg_packet_queue_free(&wg->handshake_queue, false); err_free_decrypt_queue: wg_packet_queue_free(&wg->decrypt_queue, false); err_free_encrypt_queue: wg_packet_queue_free(&wg->encrypt_queue, false); err_destroy_packet_crypt: destroy_workqueue(wg->packet_crypt_wq); err_destroy_handshake_send: destroy_workqueue(wg->handshake_send_wq); err_destroy_handshake_receive: destroy_workqueue(wg->handshake_receive_wq); err_free_index_hashtable: kvfree(wg->index_hashtable); err_free_peer_hashtable: kvfree(wg->peer_hashtable); return ret; } static struct rtnl_link_ops link_ops __read_mostly = { .kind = KBUILD_MODNAME, .priv_size = sizeof(struct wg_device), .setup = wg_setup, .newlink = wg_newlink, }; static void wg_netns_pre_exit(struct net *net) { struct wg_device *wg; struct wg_peer *peer; rtnl_lock(); list_for_each_entry(wg, &device_list, device_list) { if (rcu_access_pointer(wg->creating_net) == net) { pr_debug("%s: Creating namespace exiting\n", wg->dev->name); netif_carrier_off(wg->dev); mutex_lock(&wg->device_update_lock); rcu_assign_pointer(wg->creating_net, NULL); wg_socket_reinit(wg, NULL, NULL); list_for_each_entry(peer, &wg->peer_list, peer_list) wg_socket_clear_peer_endpoint_src(peer); mutex_unlock(&wg->device_update_lock); } } rtnl_unlock(); } static struct pernet_operations pernet_ops = { .pre_exit = wg_netns_pre_exit }; int __init wg_device_init(void) { int ret; ret = register_pm_notifier(&pm_notifier); if (ret) return ret; ret = register_random_vmfork_notifier(&vm_notifier); if (ret) goto error_pm; ret = register_pernet_device(&pernet_ops); if (ret) goto error_vm; ret = rtnl_link_register(&link_ops); if (ret) goto error_pernet; return 0; error_pernet: unregister_pernet_device(&pernet_ops); error_vm: unregister_random_vmfork_notifier(&vm_notifier); error_pm: unregister_pm_notifier(&pm_notifier); return ret; } void wg_device_uninit(void) { rtnl_link_unregister(&link_ops); unregister_pernet_device(&pernet_ops); unregister_random_vmfork_notifier(&vm_notifier); unregister_pm_notifier(&pm_notifier); rcu_barrier(); } |
6 6 1 6 5 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 | // SPDX-License-Identifier: GPL-2.0-only /* * event_inode.c - part of tracefs, a pseudo file system for activating tracing * * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt <rostedt@goodmis.org> * Copyright (C) 2020-23 VMware Inc, author: Ajay Kaher <akaher@vmware.com> * Copyright (C) 2023 Google, author: Steven Rostedt <rostedt@goodmis.org> * * eventfs is used to dynamically create inodes and dentries based on the * meta data provided by the tracing system. * * eventfs stores the meta-data of files/dirs and holds off on creating * inodes/dentries of the files. When accessed, the eventfs will create the * inodes/dentries in a just-in-time (JIT) manner. The eventfs will clean up * and delete the inodes/dentries when they are no longer referenced. */ #include <linux/fsnotify.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/tracefs.h> #include <linux/kref.h> #include <linux/delay.h> #include "internal.h" /* * eventfs_mutex protects the eventfs_inode (ei) dentry. Any access * to the ei->dentry must be done under this mutex and after checking * if ei->is_freed is not set. When ei->is_freed is set, the dentry * is on its way to being freed after the last dput() is made on it. */ static DEFINE_MUTEX(eventfs_mutex); /* Choose something "unique" ;-) */ #define EVENTFS_FILE_INODE_INO 0x12c4e37 struct eventfs_root_inode { struct eventfs_inode ei; struct dentry *events_dir; }; static struct eventfs_root_inode *get_root_inode(struct eventfs_inode *ei) { WARN_ON_ONCE(!ei->is_events); return container_of(ei, struct eventfs_root_inode, ei); } /* Just try to make something consistent and unique */ static int eventfs_dir_ino(struct eventfs_inode *ei) { if (!ei->ino) { ei->ino = get_next_ino(); /* Must not have the file inode number */ if (ei->ino == EVENTFS_FILE_INODE_INO) ei->ino = get_next_ino(); } return ei->ino; } /* * The eventfs_inode (ei) itself is protected by SRCU. It is released from * its parent's list and will have is_freed set (under eventfs_mutex). * After the SRCU grace period is over and the last dput() is called * the ei is freed. */ DEFINE_STATIC_SRCU(eventfs_srcu); /* Mode is unsigned short, use the upper bits for flags */ enum { EVENTFS_SAVE_MODE = BIT(16), EVENTFS_SAVE_UID = BIT(17), EVENTFS_SAVE_GID = BIT(18), }; #define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1) static void free_ei_rcu(struct rcu_head *rcu) { struct eventfs_inode *ei = container_of(rcu, struct eventfs_inode, rcu); struct eventfs_root_inode *rei; kfree(ei->entry_attrs); kfree_const(ei->name); if (ei->is_events) { rei = get_root_inode(ei); kfree(rei); } else { kfree(ei); } } /* * eventfs_inode reference count management. * * NOTE! We count only references from dentries, in the * form 'dentry->d_fsdata'. There are also references from * directory inodes ('ti->private'), but the dentry reference * count is always a superset of the inode reference count. */ static void release_ei(struct kref *ref) { struct eventfs_inode *ei = container_of(ref, struct eventfs_inode, kref); const struct eventfs_entry *entry; WARN_ON_ONCE(!ei->is_freed); for (int i = 0; i < ei->nr_entries; i++) { entry = &ei->entries[i]; if (entry->release) entry->release(entry->name, ei->data); } call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu); } static inline void put_ei(struct eventfs_inode *ei) { if (ei) kref_put(&ei->kref, release_ei); } static inline void free_ei(struct eventfs_inode *ei) { if (ei) { ei->is_freed = 1; put_ei(ei); } } /* * Called when creation of an ei fails, do not call release() functions. */ static inline void cleanup_ei(struct eventfs_inode *ei) { if (ei) { /* Set nr_entries to 0 to prevent release() function being called */ ei->nr_entries = 0; free_ei(ei); } } static inline struct eventfs_inode *get_ei(struct eventfs_inode *ei) { if (ei) kref_get(&ei->kref); return ei; } static struct dentry *eventfs_root_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int eventfs_iterate(struct file *file, struct dir_context *ctx); static void update_attr(struct eventfs_attr *attr, struct iattr *iattr) { unsigned int ia_valid = iattr->ia_valid; if (ia_valid & ATTR_MODE) { attr->mode = (attr->mode & ~EVENTFS_MODE_MASK) | (iattr->ia_mode & EVENTFS_MODE_MASK) | EVENTFS_SAVE_MODE; } if (ia_valid & ATTR_UID) { attr->mode |= EVENTFS_SAVE_UID; attr->uid = iattr->ia_uid; } if (ia_valid & ATTR_GID) { attr->mode |= EVENTFS_SAVE_GID; attr->gid = iattr->ia_gid; } } static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { const struct eventfs_entry *entry; struct eventfs_inode *ei; const char *name; int ret; mutex_lock(&eventfs_mutex); ei = dentry->d_fsdata; if (ei->is_freed) { /* Do not allow changes if the event is about to be removed. */ mutex_unlock(&eventfs_mutex); return -ENODEV; } /* Preallocate the children mode array if necessary */ if (!(dentry->d_inode->i_mode & S_IFDIR)) { if (!ei->entry_attrs) { ei->entry_attrs = kcalloc(ei->nr_entries, sizeof(*ei->entry_attrs), GFP_NOFS); if (!ei->entry_attrs) { ret = -ENOMEM; goto out; } } } ret = simple_setattr(idmap, dentry, iattr); if (ret < 0) goto out; /* * If this is a dir, then update the ei cache, only the file * mode is saved in the ei->m_children, and the ownership is * determined by the parent directory. */ if (dentry->d_inode->i_mode & S_IFDIR) { /* Just use the inode permissions for the events directory */ if (!ei->is_events) update_attr(&ei->attr, iattr); } else { name = dentry->d_name.name; for (int i = 0; i < ei->nr_entries; i++) { entry = &ei->entries[i]; if (strcmp(name, entry->name) == 0) { update_attr(&ei->entry_attrs[i], iattr); break; } } } out: mutex_unlock(&eventfs_mutex); return ret; } static const struct inode_operations eventfs_dir_inode_operations = { .lookup = eventfs_root_lookup, .setattr = eventfs_set_attr, }; static const struct inode_operations eventfs_file_inode_operations = { .setattr = eventfs_set_attr, }; static const struct file_operations eventfs_file_operations = { .read = generic_read_dir, .iterate_shared = eventfs_iterate, .llseek = generic_file_llseek, }; static void eventfs_set_attrs(struct eventfs_inode *ei, bool update_uid, kuid_t uid, bool update_gid, kgid_t gid, int level) { struct eventfs_inode *ei_child; /* Update events/<system>/<event> */ if (WARN_ON_ONCE(level > 3)) return; if (update_uid) { ei->attr.mode &= ~EVENTFS_SAVE_UID; ei->attr.uid = uid; } if (update_gid) { ei->attr.mode &= ~EVENTFS_SAVE_GID; ei->attr.gid = gid; } list_for_each_entry(ei_child, &ei->children, list) { eventfs_set_attrs(ei_child, update_uid, uid, update_gid, gid, level + 1); } if (!ei->entry_attrs) return; for (int i = 0; i < ei->nr_entries; i++) { if (update_uid) { ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID; ei->entry_attrs[i].uid = uid; } if (update_gid) { ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID; ei->entry_attrs[i].gid = gid; } } } /* * On a remount of tracefs, if UID or GID options are set, then * the mount point inode permissions should be used. * Reset the saved permission flags appropriately. */ void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid) { struct eventfs_inode *ei = ti->private; /* Only the events directory does the updates */ if (!ei || !ei->is_events || ei->is_freed) return; eventfs_set_attrs(ei, update_uid, ti->vfs_inode.i_uid, update_gid, ti->vfs_inode.i_gid, 0); } static void update_inode_attr(struct inode *inode, umode_t mode, struct eventfs_attr *attr, struct eventfs_root_inode *rei) { if (attr && attr->mode & EVENTFS_SAVE_MODE) inode->i_mode = attr->mode & EVENTFS_MODE_MASK; else inode->i_mode = mode; if (attr && attr->mode & EVENTFS_SAVE_UID) inode->i_uid = attr->uid; else inode->i_uid = rei->ei.attr.uid; if (attr && attr->mode & EVENTFS_SAVE_GID) inode->i_gid = attr->gid; else inode->i_gid = rei->ei.attr.gid; } static struct inode *eventfs_get_inode(struct dentry *dentry, struct eventfs_attr *attr, umode_t mode, struct eventfs_inode *ei) { struct eventfs_root_inode *rei; struct eventfs_inode *pei; struct tracefs_inode *ti; struct inode *inode; inode = tracefs_get_inode(dentry->d_sb); if (!inode) return NULL; ti = get_tracefs(inode); ti->private = ei; ti->flags |= TRACEFS_EVENT_INODE; /* Find the top dentry that holds the "events" directory */ do { dentry = dentry->d_parent; /* Directories always have d_fsdata */ pei = dentry->d_fsdata; } while (!pei->is_events); rei = get_root_inode(pei); update_inode_attr(inode, mode, attr, rei); return inode; } /** * lookup_file - look up a file in the tracefs filesystem * @parent_ei: Pointer to the eventfs_inode that represents parent of the file * @dentry: the dentry to look up * @mode: the permission that the file should have. * @attr: saved attributes changed by user * @data: something that the caller will want to get to later on. * @fop: struct file_operations that should be used for this file. * * This function creates a dentry that represents a file in the eventsfs_inode * directory. The inode.i_private pointer will point to @data in the open() * call. */ static struct dentry *lookup_file(struct eventfs_inode *parent_ei, struct dentry *dentry, umode_t mode, struct eventfs_attr *attr, void *data, const struct file_operations *fop) { struct inode *inode; if (!(mode & S_IFMT)) mode |= S_IFREG; if (WARN_ON_ONCE(!S_ISREG(mode))) return ERR_PTR(-EIO); /* Only directories have ti->private set to an ei, not files */ inode = eventfs_get_inode(dentry, attr, mode, NULL); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); inode->i_op = &eventfs_file_inode_operations; inode->i_fop = fop; inode->i_private = data; /* All files will have the same inode number */ inode->i_ino = EVENTFS_FILE_INODE_INO; // Files have their parent's ei as their fsdata dentry->d_fsdata = get_ei(parent_ei); d_add(dentry, inode); return NULL; }; /** * lookup_dir_entry - look up a dir in the tracefs filesystem * @dentry: the directory to look up * @pei: Pointer to the parent eventfs_inode if available * @ei: the eventfs_inode that represents the directory to create * * This function will look up a dentry for a directory represented by * a eventfs_inode. */ static struct dentry *lookup_dir_entry(struct dentry *dentry, struct eventfs_inode *pei, struct eventfs_inode *ei) { struct inode *inode; umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode = eventfs_get_inode(dentry, &ei->attr, mode, ei); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); inode->i_op = &eventfs_dir_inode_operations; inode->i_fop = &eventfs_file_operations; /* All directories will have the same inode number */ inode->i_ino = eventfs_dir_ino(ei); dentry->d_fsdata = get_ei(ei); d_add(dentry, inode); return NULL; } static inline struct eventfs_inode *init_ei(struct eventfs_inode *ei, const char *name) { ei->name = kstrdup_const(name, GFP_KERNEL); if (!ei->name) return NULL; kref_init(&ei->kref); return ei; } static inline struct eventfs_inode *alloc_ei(const char *name) { struct eventfs_inode *ei = kzalloc(sizeof(*ei), GFP_KERNEL); struct eventfs_inode *result; if (!ei) return NULL; result = init_ei(ei, name); if (!result) kfree(ei); return result; } static inline struct eventfs_inode *alloc_root_ei(const char *name) { struct eventfs_root_inode *rei = kzalloc(sizeof(*rei), GFP_KERNEL); struct eventfs_inode *ei; if (!rei) return NULL; rei->ei.is_events = 1; ei = init_ei(&rei->ei, name); if (!ei) kfree(rei); return ei; } /** * eventfs_d_release - dentry is going away * @dentry: dentry which has the reference to remove. * * Remove the association between a dentry from an eventfs_inode. */ void eventfs_d_release(struct dentry *dentry) { put_ei(dentry->d_fsdata); } /** * lookup_file_dentry - create a dentry for a file of an eventfs_inode * @dentry: The parent dentry under which the new file's dentry will be created * @ei: the eventfs_inode that the file will be created under * @idx: the index into the entry_attrs[] of the @ei * @mode: The mode of the file. * @data: The data to use to set the inode of the file with on open() * @fops: The fops of the file to be created. * * This function creates a dentry for a file associated with an * eventfs_inode @ei. It uses the entry attributes specified by @idx, * if available. The file will have the specified @mode and its inode will be * set up with @data upon open. The file operations will be set to @fops. * * Return: Returns a pointer to the newly created file's dentry or an error * pointer. */ static struct dentry * lookup_file_dentry(struct dentry *dentry, struct eventfs_inode *ei, int idx, umode_t mode, void *data, const struct file_operations *fops) { struct eventfs_attr *attr = NULL; if (ei->entry_attrs) attr = &ei->entry_attrs[idx]; return lookup_file(ei, dentry, mode, attr, data, fops); } /** * eventfs_root_lookup - lookup routine to create file/dir * @dir: in which a lookup is being done * @dentry: file/dir dentry * @flags: Just passed to simple_lookup() * * Used to create dynamic file/dir with-in @dir, search with-in @ei * list, if @dentry found go ahead and create the file/dir */ static struct dentry *eventfs_root_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct eventfs_inode *ei_child; struct tracefs_inode *ti; struct eventfs_inode *ei; const char *name = dentry->d_name.name; struct dentry *result = NULL; ti = get_tracefs(dir); if (WARN_ON_ONCE(!(ti->flags & TRACEFS_EVENT_INODE))) return ERR_PTR(-EIO); mutex_lock(&eventfs_mutex); ei = ti->private; if (!ei || ei->is_freed) goto out; list_for_each_entry(ei_child, &ei->children, list) { if (strcmp(ei_child->name, name) != 0) continue; /* A child is freed and removed from the list at the same time */ if (WARN_ON_ONCE(ei_child->is_freed)) goto out; result = lookup_dir_entry(dentry, ei, ei_child); goto out; } for (int i = 0; i < ei->nr_entries; i++) { void *data; umode_t mode; const struct file_operations *fops; const struct eventfs_entry *entry = &ei->entries[i]; if (strcmp(name, entry->name) != 0) continue; data = ei->data; if (entry->callback(name, &mode, &data, &fops) <= 0) goto out; result = lookup_file_dentry(dentry, ei, i, mode, data, fops); goto out; } out: mutex_unlock(&eventfs_mutex); return result; } /* * Walk the children of a eventfs_inode to fill in getdents(). */ static int eventfs_iterate(struct file *file, struct dir_context *ctx) { const struct file_operations *fops; struct inode *f_inode = file_inode(file); const struct eventfs_entry *entry; struct eventfs_inode *ei_child; struct tracefs_inode *ti; struct eventfs_inode *ei; const char *name; umode_t mode; int idx; int ret = -EINVAL; int ino; int i, r, c; if (!dir_emit_dots(file, ctx)) return 0; ti = get_tracefs(f_inode); if (!(ti->flags & TRACEFS_EVENT_INODE)) return -EINVAL; c = ctx->pos - 2; idx = srcu_read_lock(&eventfs_srcu); mutex_lock(&eventfs_mutex); ei = READ_ONCE(ti->private); if (ei && ei->is_freed) ei = NULL; mutex_unlock(&eventfs_mutex); if (!ei) goto out; /* * Need to create the dentries and inodes to have a consistent * inode number. */ ret = 0; /* Start at 'c' to jump over already read entries */ for (i = c; i < ei->nr_entries; i++, ctx->pos++) { void *cdata = ei->data; entry = &ei->entries[i]; name = entry->name; mutex_lock(&eventfs_mutex); /* If ei->is_freed then just bail here, nothing more to do */ if (ei->is_freed) { mutex_unlock(&eventfs_mutex); goto out; } r = entry->callback(name, &mode, &cdata, &fops); mutex_unlock(&eventfs_mutex); if (r <= 0) continue; ino = EVENTFS_FILE_INODE_INO; if (!dir_emit(ctx, name, strlen(name), ino, DT_REG)) goto out; } /* Subtract the skipped entries above */ c -= min((unsigned int)c, (unsigned int)ei->nr_entries); list_for_each_entry_srcu(ei_child, &ei->children, list, srcu_read_lock_held(&eventfs_srcu)) { if (c > 0) { c--; continue; } ctx->pos++; if (ei_child->is_freed) continue; name = ei_child->name; ino = eventfs_dir_ino(ei_child); if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR)) goto out_dec; } ret = 1; out: srcu_read_unlock(&eventfs_srcu, idx); return ret; out_dec: /* Incremented ctx->pos without adding something, reset it */ ctx->pos--; goto out; } /** * eventfs_create_dir - Create the eventfs_inode for this directory * @name: The name of the directory to create. * @parent: The eventfs_inode of the parent directory. * @entries: A list of entries that represent the files under this directory * @size: The number of @entries * @data: The default data to pass to the files (an entry may override it). * * This function creates the descriptor to represent a directory in the * eventfs. This descriptor is an eventfs_inode, and it is returned to be * used to create other children underneath. * * The @entries is an array of eventfs_entry structures which has: * const char *name * eventfs_callback callback; * * The name is the name of the file, and the callback is a pointer to a function * that will be called when the file is reference (either by lookup or by * reading a directory). The callback is of the prototype: * * int callback(const char *name, umode_t *mode, void **data, * const struct file_operations **fops); * * When a file needs to be created, this callback will be called with * name = the name of the file being created (so that the same callback * may be used for multiple files). * mode = a place to set the file's mode * data = A pointer to @data, and the callback may replace it, which will * cause the file created to pass the new data to the open() call. * fops = the fops to use for the created file. * * NB. @callback is called while holding internal locks of the eventfs * system. The callback must not call any code that might also call into * the tracefs or eventfs system or it will risk creating a deadlock. */ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent, const struct eventfs_entry *entries, int size, void *data) { struct eventfs_inode *ei; if (!parent) return ERR_PTR(-EINVAL); ei = alloc_ei(name); if (!ei) return ERR_PTR(-ENOMEM); ei->entries = entries; ei->nr_entries = size; ei->data = data; INIT_LIST_HEAD(&ei->children); INIT_LIST_HEAD(&ei->list); mutex_lock(&eventfs_mutex); if (!parent->is_freed) list_add_tail(&ei->list, &parent->children); mutex_unlock(&eventfs_mutex); /* Was the parent freed? */ if (list_empty(&ei->list)) { cleanup_ei(ei); ei = ERR_PTR(-EBUSY); } return ei; } /** * eventfs_create_events_dir - create the top level events directory * @name: The name of the top level directory to create. * @parent: Parent dentry for this file in the tracefs directory. * @entries: A list of entries that represent the files under this directory * @size: The number of @entries * @data: The default data to pass to the files (an entry may override it). * * This function creates the top of the trace event directory. * * See eventfs_create_dir() for use of @entries. */ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent, const struct eventfs_entry *entries, int size, void *data) { struct dentry *dentry = tracefs_start_creating(name, parent); struct eventfs_root_inode *rei; struct eventfs_inode *ei; struct tracefs_inode *ti; struct inode *inode; kuid_t uid; kgid_t gid; if (security_locked_down(LOCKDOWN_TRACEFS)) return NULL; if (IS_ERR(dentry)) return ERR_CAST(dentry); ei = alloc_root_ei(name); if (!ei) goto fail; inode = tracefs_get_inode(dentry->d_sb); if (unlikely(!inode)) goto fail; // Note: we have a ref to the dentry from tracefs_start_creating() rei = get_root_inode(ei); rei->events_dir = dentry; ei->entries = entries; ei->nr_entries = size; ei->data = data; /* Save the ownership of this directory */ uid = d_inode(dentry->d_parent)->i_uid; gid = d_inode(dentry->d_parent)->i_gid; /* * The ei->attr will be used as the default values for the * files beneath this directory. */ ei->attr.uid = uid; ei->attr.gid = gid; INIT_LIST_HEAD(&ei->children); INIT_LIST_HEAD(&ei->list); ti = get_tracefs(inode); ti->flags |= TRACEFS_EVENT_INODE; ti->private = ei; inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_uid = uid; inode->i_gid = gid; inode->i_op = &eventfs_dir_inode_operations; inode->i_fop = &eventfs_file_operations; dentry->d_fsdata = get_ei(ei); /* * Keep all eventfs directories with i_nlink == 1. * Due to the dynamic nature of the dentry creations and not * wanting to add a pointer to the parent eventfs_inode in the * eventfs_inode structure, keeping the i_nlink in sync with the * number of directories would cause too much complexity for * something not worth much. Keeping directory links at 1 * tells userspace not to trust the link number. */ d_instantiate(dentry, inode); /* The dentry of the "events" parent does keep track though */ inc_nlink(dentry->d_parent->d_inode); fsnotify_mkdir(dentry->d_parent->d_inode, dentry); tracefs_end_creating(dentry); return ei; fail: cleanup_ei(ei); tracefs_failed_creating(dentry); return ERR_PTR(-ENOMEM); } /** * eventfs_remove_rec - remove eventfs dir or file from list * @ei: eventfs_inode to be removed. * @level: prevent recursion from going more than 3 levels deep. * * This function recursively removes eventfs_inodes which * contains info of files and/or directories. */ static void eventfs_remove_rec(struct eventfs_inode *ei, int level) { struct eventfs_inode *ei_child; /* * Check recursion depth. It should never be greater than 3: * 0 - events/ * 1 - events/group/ * 2 - events/group/event/ * 3 - events/group/event/file */ if (WARN_ON_ONCE(level > 3)) return; /* search for nested folders or files */ list_for_each_entry(ei_child, &ei->children, list) eventfs_remove_rec(ei_child, level + 1); list_del_rcu(&ei->list); free_ei(ei); } /** * eventfs_remove_dir - remove eventfs dir or file from list * @ei: eventfs_inode to be removed. * * This function acquire the eventfs_mutex lock and call eventfs_remove_rec() */ void eventfs_remove_dir(struct eventfs_inode *ei) { if (!ei) return; mutex_lock(&eventfs_mutex); eventfs_remove_rec(ei, 0); mutex_unlock(&eventfs_mutex); } /** * eventfs_remove_events_dir - remove the top level eventfs directory * @ei: the event_inode returned by eventfs_create_events_dir(). * * This function removes the events main directory */ void eventfs_remove_events_dir(struct eventfs_inode *ei) { struct eventfs_root_inode *rei; struct dentry *dentry; rei = get_root_inode(ei); dentry = rei->events_dir; if (!dentry) return; rei->events_dir = NULL; eventfs_remove_dir(ei); /* * Matches the dget() done by tracefs_start_creating() * in eventfs_create_events_dir() when it the dentry was * created. In other words, it's a normal dentry that * sticks around while the other ei->dentry are created * and destroyed dynamically. */ d_invalidate(dentry); dput(dentry); } |
26 26 26 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 | // SPDX-License-Identifier: GPL-2.0-or-later /* handling of writes to regular files and writing back to the server * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/backing-dev.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/netfs.h> #include <trace/events/netfs.h> #include "internal.h" /* * completion of write to server */ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) { _enter("{%llx:%llu},{%x @%llx}", vnode->fid.vid, vnode->fid.vnode, len, start); afs_prune_wb_keys(vnode); _leave(""); } /* * Find a key to use for the writeback. We cached the keys used to author the * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key * record used or NULL and we need to start from there if it's set. * wreq->netfs_priv will be set to the key itself or NULL. */ static void afs_get_writeback_key(struct netfs_io_request *wreq) { struct afs_wb_key *wbk, *old = wreq->netfs_priv2; struct afs_vnode *vnode = AFS_FS_I(wreq->inode); key_put(wreq->netfs_priv); wreq->netfs_priv = NULL; wreq->netfs_priv2 = NULL; spin_lock(&vnode->wb_lock); if (old) wbk = list_next_entry(old, vnode_link); else wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link); list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) { _debug("wbk %u", key_serial(wbk->key)); if (key_validate(wbk->key) == 0) { refcount_inc(&wbk->usage); wreq->netfs_priv = key_get(wbk->key); wreq->netfs_priv2 = wbk; _debug("USE WB KEY %u", key_serial(wbk->key)); break; } } spin_unlock(&vnode->wb_lock); afs_put_wb_key(old); } static void afs_store_data_success(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; op->ctime = op->file[0].scb.status.mtime_client; afs_vnode_commit_status(op, &op->file[0]); if (!afs_op_error(op)) { afs_pages_written_back(vnode, op->store.pos, op->store.size); afs_stat_v(vnode, n_stores); atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); } } static const struct afs_operation_ops afs_store_data_operation = { .issue_afs_rpc = afs_fs_store_data, .issue_yfs_rpc = yfs_fs_store_data, .success = afs_store_data_success, }; /* * Prepare a subrequest to write to the server. This sets the max_len * parameter. */ void afs_prepare_write(struct netfs_io_subrequest *subreq) { struct netfs_io_stream *stream = &subreq->rreq->io_streams[subreq->stream_nr]; //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) // subreq->max_len = 512 * 1024; //else stream->sreq_max_len = 256 * 1024 * 1024; } /* * Issue a subrequest to write to the server. */ static void afs_issue_write_worker(struct work_struct *work) { struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); struct netfs_io_request *wreq = subreq->rreq; struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(wreq->inode); unsigned long long pos = subreq->start + subreq->transferred; size_t len = subreq->len - subreq->transferred; int ret = -ENOKEY; _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx", wreq->debug_id, subreq->debug_index, vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, pos, len); #if 0 // Error injection if (subreq->debug_index == 3) return netfs_write_subrequest_terminated(subreq, -ENOANO); if (!subreq->retry_count) { set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); return netfs_write_subrequest_terminated(subreq, -EAGAIN); } #endif op = afs_alloc_operation(wreq->netfs_priv, vnode->volume); if (IS_ERR(op)) return netfs_write_subrequest_terminated(subreq, -EAGAIN); afs_op_set_vnode(op, 0, vnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->store.pos = pos; op->store.size = len; op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation; afs_begin_vnode_operation(op); op->store.write_iter = &subreq->io_iter; op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size); op->mtime = inode_get_mtime(&vnode->netfs.inode); afs_wait_for_operation(op); ret = afs_put_operation(op); switch (ret) { case 0: __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); break; case -EACCES: case -EPERM: case -ENOKEY: case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: /* If there are more keys we can try, use the retry algorithm * to rotate the keys. */ if (wreq->netfs_priv2) set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); break; } netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len); } void afs_issue_write(struct netfs_io_subrequest *subreq) { subreq->work.func = afs_issue_write_worker; if (!queue_work(system_unbound_wq, &subreq->work)) WARN_ON_ONCE(1); } /* * Writeback calls this when it finds a folio that needs uploading. This isn't * called if writeback only has copy-to-cache to deal with. */ void afs_begin_writeback(struct netfs_io_request *wreq) { if (S_ISREG(wreq->inode->i_mode)) afs_get_writeback_key(wreq); } /* * Prepare to retry the writes in request. Use this to try rotating the * available writeback keys. */ void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream) { struct netfs_io_subrequest *subreq = list_first_entry(&stream->subrequests, struct netfs_io_subrequest, rreq_link); switch (wreq->origin) { case NETFS_READAHEAD: case NETFS_READPAGE: case NETFS_READ_GAPS: case NETFS_READ_SINGLE: case NETFS_READ_FOR_WRITE: case NETFS_UNBUFFERED_READ: case NETFS_DIO_READ: return; default: break; } switch (subreq->error) { case -EACCES: case -EPERM: case -ENOKEY: case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: afs_get_writeback_key(wreq); if (!wreq->netfs_priv) stream->failed = true; break; } } /* * write some of the pending data back to the server */ int afs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); int ret; /* We have to be careful as we can end up racing with setattr() * truncating the pagecache since the caller doesn't take a lock here * to prevent it. */ if (wbc->sync_mode == WB_SYNC_ALL) down_read(&vnode->validate_lock); else if (!down_read_trylock(&vnode->validate_lock)) return 0; ret = netfs_writepages(mapping, wbc); up_read(&vnode->validate_lock); return ret; } /* * flush any dirty pages for this process, and check for write errors. * - the return status from this call provides a reliable indication of * whether any write errors occurred for this process. */ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct afs_file *af = file->private_data; int ret; _enter("{%llx:%llu},{n=%pD},%d", vnode->fid.vid, vnode->fid.vnode, file, datasync); ret = afs_validate(vnode, af->key); if (ret < 0) return ret; return file_write_and_wait_range(file, start, end); } /* * notification that a previously read-only page is about to become writable * - if it returns an error, the caller will deliver a bus error signal */ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) { struct file *file = vmf->vma->vm_file; if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0) return VM_FAULT_SIGBUS; return netfs_page_mkwrite(vmf, NULL); } /* * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. */ void afs_prune_wb_keys(struct afs_vnode *vnode) { LIST_HEAD(graveyard); struct afs_wb_key *wbk, *tmp; /* Discard unused keys */ spin_lock(&vnode->wb_lock); if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { if (refcount_read(&wbk->usage) == 1) list_move(&wbk->vnode_link, &graveyard); } } spin_unlock(&vnode->wb_lock); while (!list_empty(&graveyard)) { wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); list_del(&wbk->vnode_link); afs_put_wb_key(wbk); } } |
4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) International Business Machines Corp., 2000-2004 * Portions Copyright (C) Christoph Hellwig, 2001-2002 */ #ifndef _H_JFS_INCORE #define _H_JFS_INCORE #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/uuid.h> #include "jfs_types.h" #include "jfs_xtree.h" #include "jfs_dtree.h" /* * JFS magic number */ #define JFS_SUPER_MAGIC 0x3153464a /* "JFS1" */ /* * JFS-private inode information */ struct jfs_inode_info { int fileset; /* fileset number (always 16)*/ uint mode2; /* jfs-specific mode */ kuid_t saved_uid; /* saved for uid mount option */ kgid_t saved_gid; /* saved for gid mount option */ pxd_t ixpxd; /* inode extent descriptor */ dxd_t acl; /* dxd describing acl */ dxd_t ea; /* dxd describing ea */ time64_t otime; /* time created */ uint next_index; /* next available directory entry index */ int acltype; /* Type of ACL */ short btorder; /* access order */ short btindex; /* btpage entry index*/ struct inode *ipimap; /* inode map */ unsigned long cflag; /* commit flags */ u64 agstart; /* agstart of the containing IAG */ u16 bxflag; /* xflag of pseudo buffer? */ unchar pad; signed char active_ag; /* ag currently allocating from */ lid_t blid; /* lid of pseudo buffer? */ lid_t atlhead; /* anonymous tlock list head */ lid_t atltail; /* anonymous tlock list tail */ spinlock_t ag_lock; /* protects active_ag */ struct list_head anon_inode_list; /* inodes having anonymous txns */ /* * rdwrlock serializes xtree between reads & writes and synchronizes * changes to special inodes. It's use would be redundant on * directories since the i_mutex taken in the VFS is sufficient. */ struct rw_semaphore rdwrlock; /* * commit_mutex serializes transaction processing on an inode. * It must be taken after beginning a transaction (txBegin), since * dirty inodes may be committed while a new transaction on the * inode is blocked in txBegin or TxBeginAnon */ struct mutex commit_mutex; /* xattr_sem allows us to access the xattrs without taking i_mutex */ struct rw_semaphore xattr_sem; lid_t xtlid; /* lid of xtree lock on directory */ union { struct { xtroot_t _xtroot; /* 288: xtree root */ struct inomap *_imap; /* 4: inode map header */ } file; struct { struct dir_table_slot _table[12]; /* 96: dir index */ dtroot_t _dtroot; /* 288: dtree root */ } dir; struct { unchar _unused[16]; /* 16: */ dxd_t _dxd; /* 16: */ /* _inline may overflow into _inline_ea when needed */ /* _inline_ea may overlay the last part of * file._xtroot if maxentry = XTROOTINITSLOT */ union { struct { /* 128: inline symlink */ unchar _inline[128]; /* 128: inline extended attr */ unchar _inline_ea[128]; }; unchar _inline_all[256]; }; } link; } u; #ifdef CONFIG_QUOTA struct dquot __rcu *i_dquot[MAXQUOTAS]; #endif u32 dev; /* will die when we get wide dev_t */ struct inode vfs_inode; }; #define i_xtroot u.file._xtroot #define i_imap u.file._imap #define i_dirtable u.dir._table #define i_dtroot u.dir._dtroot #define i_inline u.link._inline #define i_inline_ea u.link._inline_ea #define i_inline_all u.link._inline_all #define IREAD_LOCK(ip, subclass) \ down_read_nested(&JFS_IP(ip)->rdwrlock, subclass) #define IREAD_UNLOCK(ip) up_read(&JFS_IP(ip)->rdwrlock) #define IWRITE_LOCK(ip, subclass) \ down_write_nested(&JFS_IP(ip)->rdwrlock, subclass) #define IWRITE_UNLOCK(ip) up_write(&JFS_IP(ip)->rdwrlock) /* * cflag */ enum cflags { COMMIT_Nolink, /* inode committed with zero link count */ COMMIT_Inlineea, /* commit inode inline EA */ COMMIT_Freewmap, /* free WMAP at iClose() */ COMMIT_Dirty, /* Inode is really dirty */ COMMIT_Dirtable, /* commit changes to di_dirtable */ COMMIT_Stale, /* data extent is no longer valid */ COMMIT_Synclist, /* metadata pages on group commit synclist */ }; /* * commit_mutex nesting subclasses: */ enum commit_mutex_class { COMMIT_MUTEX_PARENT, COMMIT_MUTEX_CHILD, COMMIT_MUTEX_SECOND_PARENT, /* Renaming */ COMMIT_MUTEX_VICTIM /* Inode being unlinked due to rename */ }; /* * rdwrlock subclasses: * The dmap inode may be locked while a normal inode or the imap inode are * locked. */ enum rdwrlock_class { RDWRLOCK_NORMAL, RDWRLOCK_IMAP, RDWRLOCK_DMAP }; #define set_cflag(flag, ip) set_bit(flag, &(JFS_IP(ip)->cflag)) #define clear_cflag(flag, ip) clear_bit(flag, &(JFS_IP(ip)->cflag)) #define test_cflag(flag, ip) test_bit(flag, &(JFS_IP(ip)->cflag)) #define test_and_clear_cflag(flag, ip) \ test_and_clear_bit(flag, &(JFS_IP(ip)->cflag)) /* * JFS-private superblock information. */ struct jfs_sb_info { struct super_block *sb; /* Point back to vfs super block */ unsigned long mntflag; /* aggregate attributes */ struct inode *ipbmap; /* block map inode */ struct inode *ipaimap; /* aggregate inode map inode */ struct inode *ipaimap2; /* secondary aimap inode */ struct inode *ipimap; /* aggregate inode map inode */ struct jfs_log *log; /* log */ struct list_head log_list; /* volumes associated with a journal */ short bsize; /* logical block size */ short l2bsize; /* log2 logical block size */ short nbperpage; /* blocks per page */ short l2nbperpage; /* log2 blocks per page */ short l2niperblk; /* log2 inodes per page */ dev_t logdev; /* external log device */ uint aggregate; /* volume identifier in log record */ pxd_t logpxd; /* pxd describing log */ pxd_t fsckpxd; /* pxd describing fsck wkspc */ pxd_t ait2; /* pxd describing AIT copy */ uuid_t uuid; /* 128-bit uuid for volume */ uuid_t loguuid; /* 128-bit uuid for log */ /* * commit_state is used for synchronization of the jfs_commit * threads. It is protected by LAZY_LOCK(). */ int commit_state; /* commit state */ /* Formerly in ipimap */ uint gengen; /* inode generation generator*/ uint inostamp; /* shows inode belongs to fileset*/ /* Formerly in ipbmap */ struct bmap *bmap; /* incore bmap descriptor */ struct nls_table *nls_tab; /* current codepage */ struct inode *direct_inode; /* metadata inode */ uint state; /* mount/recovery state */ unsigned long flag; /* mount time flags */ uint p_state; /* state prior to going no integrity */ kuid_t uid; /* uid to override on-disk uid */ kgid_t gid; /* gid to override on-disk gid */ uint umask; /* umask to override on-disk umask */ uint minblks_trim; /* minimum blocks, for online trim */ }; /* jfs_sb_info commit_state */ #define IN_LAZYCOMMIT 1 static inline struct jfs_inode_info *JFS_IP(struct inode *inode) { return container_of(inode, struct jfs_inode_info, vfs_inode); } static inline int jfs_dirtable_inline(struct inode *inode) { return (JFS_IP(inode)->next_index <= (MAX_INLINE_DIRTABLE_ENTRY + 1)); } static inline struct jfs_sb_info *JFS_SBI(struct super_block *sb) { return sb->s_fs_info; } static inline int isReadOnly(struct inode *inode) { if (JFS_SBI(inode->i_sb)->log) return 0; return 1; } #endif /* _H_JFS_INCORE */ |
28 28 28 28 28 28 37 37 37 17 1 1 37 37 1 37 21 20 19 21 8 6 21 16 14 33 30 30 30 28 28 29 29 29 26 29 11 11 9 29 8 8 25 25 4 2 9 11 6 7 7 6 3 6 1 2 37 37 37 37 37 37 36 37 17 37 28 28 16 15 13 12 11 8 6 8 8 1 7 2 67 67 67 67 67 67 66 67 37 36 20 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 | // SPDX-License-Identifier: GPL-2.0 /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The IP fragmentation functionality. * * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox <alan@lxorguk.ukuu.org.uk> * * Fixes: * Alan Cox : Split from ip.c , see ip_input.c for history. * David S. Miller : Begin massive cleanup... * Andi Kleen : Add sysctls. * xxxx : Overlapfrag bug. * Ultima : ip_expire() kernel panic. * Bill Hawes : Frag accounting and evictor fixes. * John McDonald : 0 length frag bug. * Alexey Kuznetsov: SMP races, threading, cleanup. * Patrick McHardy : LRU queue of frag heads for evictor. */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/compiler.h> #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/jiffies.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/slab.h> #include <net/route.h> #include <net/dst.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/checksum.h> #include <net/inetpeer.h> #include <net/inet_frag.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/inet.h> #include <linux/netfilter_ipv4.h> #include <net/inet_ecn.h> #include <net/l3mdev.h> /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c * as well. Or notify me, at least. --ANK */ static const char ip_frag_cache_name[] = "ip4-frags"; /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; u8 ecn; /* RFC3168 support */ u16 max_df_size; /* largest frag with DF set seen */ int iif; unsigned int rid; struct inet_peer *peer; }; static u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); } static struct inet_frags ip4_frags; static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct sk_buff *prev_tail, struct net_device *dev, int *refs); static void ip4_frag_init(struct inet_frag_queue *q, const void *a) { struct ipq *qp = container_of(q, struct ipq, q); const struct frag_v4_compare_key *key = a; struct net *net = q->fqdir->net; struct inet_peer *p = NULL; q->key.v4 = *key; qp->ecn = 0; if (q->fqdir->max_dist) { rcu_read_lock(); p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif); if (p && !refcount_inc_not_zero(&p->refcnt)) p = NULL; rcu_read_unlock(); } qp->peer = p; } static void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; qp = container_of(q, struct ipq, q); if (qp->peer) inet_putpeer(qp->peer); } static bool frag_expire_skip_icmp(u32 user) { return user == IP_DEFRAG_AF_PACKET || ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, __IP_DEFRAG_CONNTRACK_IN_END) || ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, __IP_DEFRAG_CONNTRACK_BRIDGE_IN); } /* * Oops, a fragment queue timed out. Kill it and send an ICMP reply. */ static void ip_expire(struct timer_list *t) { enum skb_drop_reason reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; struct inet_frag_queue *frag = timer_container_of(frag, t, timer); const struct iphdr *iph; struct sk_buff *head = NULL; struct net *net; struct ipq *qp; int refs = 1; qp = container_of(frag, struct ipq, q); net = qp->q.fqdir->net; rcu_read_lock(); /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */ if (READ_ONCE(qp->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&qp->q.lock); if (qp->q.flags & INET_FRAG_COMPLETE) goto out; qp->q.flags |= INET_FRAG_DROP; inet_frag_kill(&qp->q, &refs); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); if (!(qp->q.flags & INET_FRAG_FIRST_IN)) goto out; /* sk_buff::dev and sk_buff::rbnode are unionized. So we * pull the head out of the tree in order to be able to * deal with head->dev. */ head = inet_frag_pull_head(&qp->q); if (!head) goto out; head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out; /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); reason = ip_route_input_noref(head, iph->daddr, iph->saddr, ip4h_dscp(iph), head->dev); if (reason) goto out; /* Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; if (frag_expire_skip_icmp(qp->q.key.v4.user) && (skb_rtable(head)->rt_type != RTN_LOCAL)) goto out; spin_unlock(&qp->q.lock); icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); goto out_rcu_unlock; out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); kfree_skb_reason(head, reason); inet_frag_putn(&qp->q, refs); } /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user, int vif) { struct frag_v4_compare_key key = { .saddr = iph->saddr, .daddr = iph->daddr, .user = user, .vif = vif, .id = iph->id, .protocol = iph->protocol, }; struct inet_frag_queue *q; q = inet_frag_find(net->ipv4.fqdir, &key); if (!q) return NULL; return container_of(q, struct ipq, q); } /* Is the fragment too far ahead to be part of ipq? */ static int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = qp->q.fqdir->max_dist; unsigned int start, end; int rc; if (!peer || !max) return 0; start = qp->rid; end = atomic_inc_return(&peer->rid); qp->rid = end; rc = qp->q.fragments_tail && (end - start) > max; if (rc) __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS); return rc; } static int ip_frag_reinit(struct ipq *qp) { unsigned int sum_truesize = 0; if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) { refcount_inc(&qp->q.refcnt); return -ETIMEDOUT; } sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments, SKB_DROP_REASON_FRAG_TOO_FAR); sub_frag_mem_limit(qp->q.fqdir, sum_truesize); qp->q.flags = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; qp->q.last_run_head = NULL; qp->iif = 0; qp->ecn = 0; return 0; } /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb, int *refs) { struct net *net = qp->q.fqdir->net; int ihl, end, flags, offset; struct sk_buff *prev_tail; struct net_device *dev; unsigned int fragsize; int err = -ENOENT; SKB_DR(reason); u8 ecn; /* If reassembly is already done, @skb must be a duplicate frag. */ if (qp->q.flags & INET_FRAG_COMPLETE) { SKB_DR_SET(reason, DUP_FRAG); goto err; } if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && unlikely(ip_frag_too_far(qp)) && unlikely(err = ip_frag_reinit(qp))) { inet_frag_kill(&qp->q, refs); goto err; } ecn = ip4_frag_ecn(ip_hdr(skb)->tos); offset = ntohs(ip_hdr(skb)->frag_off); flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ end = offset + skb->len - skb_network_offset(skb) - ihl; err = -EINVAL; /* Is this the final fragment? */ if ((flags & IP_MF) == 0) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < qp->q.len || ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) goto discard_qp; qp->q.flags |= INET_FRAG_LAST_IN; qp->q.len = end; } else { if (end&7) { end &= ~7; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } if (end > qp->q.len) { /* Some bits beyond end -> corruption. */ if (qp->q.flags & INET_FRAG_LAST_IN) goto discard_qp; qp->q.len = end; } } if (end == offset) goto discard_qp; err = -ENOMEM; if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) goto discard_qp; err = pskb_trim_rcsum(skb, end - offset); if (err) goto discard_qp; /* Note : skb->rbnode and skb->dev share the same location. */ dev = skb->dev; /* Makes sure compiler wont do silly aliasing games */ barrier(); prev_tail = qp->q.fragments_tail; err = inet_frag_queue_insert(&qp->q, skb, offset, end); if (err) goto insert_error; if (dev) qp->iif = dev->ifindex; qp->q.stamp = skb->tstamp; qp->q.tstamp_type = skb->tstamp_type; qp->q.meat += skb->len; qp->ecn |= ecn; add_frag_mem_limit(qp->q.fqdir, skb->truesize); if (offset == 0) qp->q.flags |= INET_FRAG_FIRST_IN; fragsize = skb->len + ihl; if (fragsize > qp->q.max_size) qp->q.max_size = fragsize; if (ip_hdr(skb)->frag_off & htons(IP_DF) && fragsize > qp->max_df_size) qp->max_df_size = fragsize; if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) { unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; err = ip_frag_reasm(qp, skb, prev_tail, dev, refs); skb->_skb_refdst = orefdst; if (err) inet_frag_kill(&qp->q, refs); return err; } skb_dst_drop(skb); skb_orphan(skb); return -EINPROGRESS; insert_error: if (err == IPFRAG_DUP) { SKB_DR_SET(reason, DUP_FRAG); err = -EINVAL; goto err; } err = -EINVAL; __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); discard_qp: inet_frag_kill(&qp->q, refs); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); err: kfree_skb_reason(skb, reason); return err; } static bool ip_frag_coalesce_ok(const struct ipq *qp) { return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER; } /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, struct sk_buff *prev_tail, struct net_device *dev, int *refs) { struct net *net = qp->q.fqdir->net; struct iphdr *iph; void *reasm_data; int len, err; u8 ecn; inet_frag_kill(&qp->q, refs); ecn = ip_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; } /* Make the one we just received the head. */ reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail); if (!reasm_data) goto out_nomem; len = ip_hdrlen(skb) + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; inet_frag_reasm_finish(&qp->q, skb, reasm_data, ip_frag_coalesce_ok(qp)); skb->dev = dev; IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); iph = ip_hdr(skb); iph->tot_len = htons(len); iph->tos |= ecn; /* When we set IP_DF on a refragmented skb we must also force a * call to ip_fragment to avoid forwarding a DF-skb of size s while * original sender only sent fragments of size f (where f < s). * * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest * frag seen to avoid sending tiny DF-fragments in case skb was built * from one very small df-fragment and one large non-df frag. */ if (qp->max_df_size == qp->q.max_size) { IPCB(skb)->flags |= IPSKB_FRAG_PMTU; iph->frag_off = htons(IP_DF); } else { iph->frag_off = 0; } ip_send_check(iph); __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; qp->q.last_run_head = NULL; return 0; out_nomem: net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); out_fail: __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); return err; } /* Process an incoming IP datagram fragment. */ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; int vif = l3mdev_master_ifindex_rcu(dev); struct ipq *qp; __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); /* Lookup (or create) queue header */ rcu_read_lock(); qp = ip_find(net, ip_hdr(skb), user, vif); if (qp) { int ret, refs = 0; spin_lock(&qp->q.lock); ret = ip_frag_queue(qp, skb, &refs); spin_unlock(&qp->q.lock); rcu_read_unlock(); inet_frag_putn(&qp->q, refs); return ret; } rcu_read_unlock(); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; } EXPORT_SYMBOL(ip_defrag); struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) { struct iphdr iph; int netoff; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; netoff = skb_network_offset(skb); if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) return skb; if (iph.ihl < 5 || iph.version != 4) return skb; len = ntohs(iph.tot_len); if (skb->len < netoff + len || len < (iph.ihl * 4)) return skb; if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { kfree_skb(skb); return NULL; } if (pskb_trim_rcsum(skb, netoff + len)) { kfree_skb(skb); return NULL; } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (ip_defrag(net, skb, user)) return NULL; skb_clear_hash(skb); } } return skb; } EXPORT_SYMBOL(ip_check_defrag); #ifdef CONFIG_SYSCTL static int dist_min; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "ipfrag_low_thresh", .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "ipfrag_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ipfrag_max_dist", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &dist_min, }, }; /* secret interval has been deprecated */ static int ip4_frags_secret_interval_unused; static struct ctl_table ip4_frags_ctl_table[] = { { .procname = "ipfrag_secret_interval", .data = &ip4_frags_secret_interval_unused, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, }; static int __net_init ip4_frags_ns_ctl_register(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = ip4_frags_ns_ctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); if (!table) goto err_alloc; } table[0].data = &net->ipv4.fqdir->high_thresh; table[0].extra1 = &net->ipv4.fqdir->low_thresh; table[1].data = &net->ipv4.fqdir->low_thresh; table[1].extra2 = &net->ipv4.fqdir->high_thresh; table[2].data = &net->ipv4.fqdir->timeout; table[3].data = &net->ipv4.fqdir->max_dist; hdr = register_net_sysctl_sz(net, "net/ipv4", table, ARRAY_SIZE(ip4_frags_ns_ctl_table)); if (!hdr) goto err_reg; net->ipv4.frags_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) { const struct ctl_table *table; table = net->ipv4.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.frags_hdr); kfree(table); } static void __init ip4_frags_ctl_register(void) { register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } #else static int ip4_frags_ns_ctl_register(struct net *net) { return 0; } static void ip4_frags_ns_ctl_unregister(struct net *net) { } static void __init ip4_frags_ctl_register(void) { } #endif static int __net_init ipv4_frags_init_net(struct net *net) { int res; res = fqdir_init(&net->ipv4.fqdir, &ip4_frags, net); if (res < 0) return res; /* Fragment cache limits. * * The fragment memory accounting code, (tries to) account for * the real memory usage, by measuring both the size of frag * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) * and the SKB's truesize. * * A 64K fragment consumes 129736 bytes (44*2944)+200 * (1500 truesize == 2944, sizeof(struct ipq) == 200) * * We will commit 4MB at one time. Should we cross that limit * we will prune down to 3MB, making room for approx 8 big 64K * fragments 8x128k. */ net->ipv4.fqdir->high_thresh = 4 * 1024 * 1024; net->ipv4.fqdir->low_thresh = 3 * 1024 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival * by TTL. */ net->ipv4.fqdir->timeout = IP_FRAG_TIME; net->ipv4.fqdir->max_dist = 64; res = ip4_frags_ns_ctl_register(net); if (res < 0) fqdir_exit(net->ipv4.fqdir); return res; } static void __net_exit ipv4_frags_pre_exit_net(struct net *net) { fqdir_pre_exit(net->ipv4.fqdir); } static void __net_exit ipv4_frags_exit_net(struct net *net) { ip4_frags_ns_ctl_unregister(net); fqdir_exit(net->ipv4.fqdir); } static struct pernet_operations ip4_frags_ops = { .init = ipv4_frags_init_net, .pre_exit = ipv4_frags_pre_exit_net, .exit = ipv4_frags_exit_net, }; static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) { return jhash2(data, sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); } static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) { const struct inet_frag_queue *fq = data; return jhash2((const u32 *)&fq->key.v4, sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); } static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) { const struct frag_v4_compare_key *key = arg->key; const struct inet_frag_queue *fq = ptr; return !!memcmp(&fq->key, key, sizeof(*key)); } static const struct rhashtable_params ip4_rhash_params = { .head_offset = offsetof(struct inet_frag_queue, node), .key_offset = offsetof(struct inet_frag_queue, key), .key_len = sizeof(struct frag_v4_compare_key), .hashfn = ip4_key_hashfn, .obj_hashfn = ip4_obj_hashfn, .obj_cmpfn = ip4_obj_cmpfn, .automatic_shrinking = true, }; void __init ipfrag_init(void) { ip4_frags.constructor = ip4_frag_init; ip4_frags.destructor = ip4_frag_free; ip4_frags.qsize = sizeof(struct ipq); ip4_frags.frag_expire = ip_expire; ip4_frags.frags_cache_name = ip_frag_cache_name; ip4_frags.rhash_params = ip4_rhash_params; if (inet_frags_init(&ip4_frags)) panic("IP: failed to allocate ip4_frags cache\n"); ip4_frags_ctl_register(); register_pernet_subsys(&ip4_frags_ops); } |
47 937 886 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* V4L2 device support header. Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl> */ #ifndef _V4L2_DEVICE_H #define _V4L2_DEVICE_H #include <media/media-device.h> #include <media/v4l2-subdev.h> #include <media/v4l2-dev.h> struct v4l2_ctrl_handler; /** * struct v4l2_device - main struct to for V4L2 device drivers * * @dev: pointer to struct device. * @mdev: pointer to struct media_device, may be NULL. * @subdevs: used to keep track of the registered subdevs * @lock: lock this struct; can be used by the driver as well * if this struct is embedded into a larger struct. * @name: unique device name, by default the driver name + bus ID * @notify: notify operation called by some sub-devices. * @ctrl_handler: The control handler. May be %NULL. * @prio: Device's priority state * @ref: Keep track of the references to this struct. * @release: Release function that is called when the ref count * goes to 0. * * Each instance of a V4L2 device should create the v4l2_device struct, * either stand-alone or embedded in a larger struct. * * It allows easy access to sub-devices (see v4l2-subdev.h) and provides * basic V4L2 device-level support. * * .. note:: * * #) @dev->driver_data points to this struct. * #) @dev might be %NULL if there is no parent device */ struct v4l2_device { struct device *dev; struct media_device *mdev; struct list_head subdevs; spinlock_t lock; char name[36]; void (*notify)(struct v4l2_subdev *sd, unsigned int notification, void *arg); struct v4l2_ctrl_handler *ctrl_handler; struct v4l2_prio_state prio; struct kref ref; void (*release)(struct v4l2_device *v4l2_dev); }; /** * v4l2_device_get - gets a V4L2 device reference * * @v4l2_dev: pointer to struct &v4l2_device * * This is an ancillary routine meant to increment the usage for the * struct &v4l2_device pointed by @v4l2_dev. */ static inline void v4l2_device_get(struct v4l2_device *v4l2_dev) { kref_get(&v4l2_dev->ref); } /** * v4l2_device_put - puts a V4L2 device reference * * @v4l2_dev: pointer to struct &v4l2_device * * This is an ancillary routine meant to decrement the usage for the * struct &v4l2_device pointed by @v4l2_dev. */ int v4l2_device_put(struct v4l2_device *v4l2_dev); /** * v4l2_device_register - Initialize v4l2_dev and make @dev->driver_data * point to @v4l2_dev. * * @dev: pointer to struct &device * @v4l2_dev: pointer to struct &v4l2_device * * .. note:: * @dev may be %NULL in rare cases (ISA devices). * In such case the caller must fill in the @v4l2_dev->name field * before calling this function. */ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev); /** * v4l2_device_set_name - Optional function to initialize the * name field of struct &v4l2_device * * @v4l2_dev: pointer to struct &v4l2_device * @basename: base name for the device name * @instance: pointer to a static atomic_t var with the instance usage for * the device driver. * * v4l2_device_set_name() initializes the name field of struct &v4l2_device * using the driver name and a driver-global atomic_t instance. * * This function will increment the instance counter and returns the * instance value used in the name. * * Example: * * static atomic_t drv_instance = ATOMIC_INIT(0); * * ... * * instance = v4l2_device_set_name(&\ v4l2_dev, "foo", &\ drv_instance); * * The first time this is called the name field will be set to foo0 and * this function returns 0. If the name ends with a digit (e.g. cx18), * then the name will be set to cx18-0 since cx180 would look really odd. */ int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename, atomic_t *instance); /** * v4l2_device_disconnect - Change V4L2 device state to disconnected. * * @v4l2_dev: pointer to struct v4l2_device * * Should be called when the USB parent disconnects. * Since the parent disappears, this ensures that @v4l2_dev doesn't have * an invalid parent pointer. * * .. note:: This function sets @v4l2_dev->dev to NULL. */ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev); /** * v4l2_device_unregister - Unregister all sub-devices and any other * resources related to @v4l2_dev. * * @v4l2_dev: pointer to struct v4l2_device */ void v4l2_device_unregister(struct v4l2_device *v4l2_dev); /** * v4l2_device_register_subdev - Registers a subdev with a v4l2 device. * * @v4l2_dev: pointer to struct &v4l2_device * @sd: pointer to &struct v4l2_subdev * * While registered, the subdev module is marked as in-use. * * An error is returned if the module is no longer loaded on any attempts * to register it. */ #define v4l2_device_register_subdev(v4l2_dev, sd) \ __v4l2_device_register_subdev(v4l2_dev, sd, THIS_MODULE) int __must_check __v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, struct v4l2_subdev *sd, struct module *module); /** * v4l2_device_unregister_subdev - Unregisters a subdev with a v4l2 device. * * @sd: pointer to &struct v4l2_subdev * * .. note :: * * Can also be called if the subdev wasn't registered. In such * case, it will do nothing. */ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd); /** * __v4l2_device_register_subdev_nodes - Registers device nodes for * all subdevs of the v4l2 device that are marked with the * %V4L2_SUBDEV_FL_HAS_DEVNODE flag. * * @v4l2_dev: pointer to struct v4l2_device * @read_only: subdevices read-only flag. True to register the subdevices * device nodes in read-only mode, false to allow full access to the * subdevice userspace API. */ int __must_check __v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, bool read_only); /** * v4l2_device_register_subdev_nodes - Registers subdevices device nodes with * unrestricted access to the subdevice userspace operations * * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation * for more details. * * @v4l2_dev: pointer to struct v4l2_device */ static inline int __must_check v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev) { #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) return __v4l2_device_register_subdev_nodes(v4l2_dev, false); #else return 0; #endif } /** * v4l2_device_register_ro_subdev_nodes - Registers subdevices device nodes * in read-only mode * * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation * for more details. * * @v4l2_dev: pointer to struct v4l2_device */ static inline int __must_check v4l2_device_register_ro_subdev_nodes(struct v4l2_device *v4l2_dev) { #if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) return __v4l2_device_register_subdev_nodes(v4l2_dev, true); #else return 0; #endif } /** * v4l2_subdev_notify - Sends a notification to v4l2_device. * * @sd: pointer to &struct v4l2_subdev * @notification: type of notification. Please notice that the notification * type is driver-specific. * @arg: arguments for the notification. Those are specific to each * notification type. */ static inline void v4l2_subdev_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg) { if (sd && sd->v4l2_dev && sd->v4l2_dev->notify) sd->v4l2_dev->notify(sd, notification, arg); } /** * v4l2_device_supports_requests - Test if requests are supported. * * @v4l2_dev: pointer to struct v4l2_device */ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev) { return v4l2_dev->mdev && v4l2_dev->mdev->ops && v4l2_dev->mdev->ops->req_queue; } /* Helper macros to iterate over all subdevs. */ /** * v4l2_device_for_each_subdev - Helper macro that interates over all * sub-devices of a given &v4l2_device. * * @sd: pointer that will be filled by the macro with all * &struct v4l2_subdev pointer used as an iterator by the loop. * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * * This macro iterates over all sub-devices owned by the @v4l2_dev device. * It acts as a for loop iterator and executes the next statement with * the @sd variable pointing to each sub-device in turn. */ #define v4l2_device_for_each_subdev(sd, v4l2_dev) \ list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) /** * __v4l2_device_call_subdevs_p - Calls the specified operation for * all subdevs matching the condition. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @sd: pointer that will be filled by the macro with all * &struct v4l2_subdev pointer used as an iterator by the loop. * @cond: condition to be match * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Ignore any errors. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define __v4l2_device_call_subdevs_p(v4l2_dev, sd, cond, o, f, args...) \ do { \ list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) \ if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ (sd)->ops->o->f((sd) , ##args); \ } while (0) /** * __v4l2_device_call_subdevs - Calls the specified operation for * all subdevs matching the condition. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @cond: condition to be match * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Ignore any errors. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \ do { \ struct v4l2_subdev *__sd; \ \ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, cond, o, \ f , ##args); \ } while (0) /** * __v4l2_device_call_subdevs_until_err_p - Calls the specified operation for * all subdevs matching the condition. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @sd: pointer that will be filled by the macro with all * &struct v4l2_subdev sub-devices associated with @v4l2_dev. * @cond: condition to be match * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Return: * * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` * for any subdevice, then abort and return with that error code, zero * otherwise. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define __v4l2_device_call_subdevs_until_err_p(v4l2_dev, sd, cond, o, f, args...) \ ({ \ long __err = 0; \ \ list_for_each_entry((sd), &(v4l2_dev)->subdevs, list) { \ if ((cond) && (sd)->ops->o && (sd)->ops->o->f) \ __err = (sd)->ops->o->f((sd) , ##args); \ if (__err && __err != -ENOIOCTLCMD) \ break; \ } \ (__err == -ENOIOCTLCMD) ? 0 : __err; \ }) /** * __v4l2_device_call_subdevs_until_err - Calls the specified operation for * all subdevs matching the condition. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @cond: condition to be match * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Return: * * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` * for any subdevice, then abort and return with that error code, * zero otherwise. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \ ({ \ struct v4l2_subdev *__sd; \ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ f , ##args); \ }) /** * v4l2_device_call_all - Calls the specified operation for * all subdevs matching the &v4l2_subdev.grp_id, as assigned * by the bridge driver. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @grpid: &struct v4l2_subdev->grp_id group ID to match. * Use 0 to match them all. * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Ignore any errors. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \ do { \ struct v4l2_subdev *__sd; \ \ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ ##args); \ } while (0) /** * v4l2_device_call_until_err - Calls the specified operation for * all subdevs matching the &v4l2_subdev.grp_id, as assigned * by the bridge driver, until an error occurs. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @grpid: &struct v4l2_subdev->grp_id group ID to match. * Use 0 to match them all. * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Return: * * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` * for any subdevice, then abort and return with that error code, * zero otherwise. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \ ({ \ struct v4l2_subdev *__sd; \ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \ ##args); \ }) /** * v4l2_device_mask_call_all - Calls the specified operation for * all subdevices where a group ID matches a specified bitmask. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id * group ID to be matched. Use 0 to match them all. * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Ignore any errors. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define v4l2_device_mask_call_all(v4l2_dev, grpmsk, o, f, args...) \ do { \ struct v4l2_subdev *__sd; \ \ __v4l2_device_call_subdevs_p(v4l2_dev, __sd, \ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ f , ##args); \ } while (0) /** * v4l2_device_mask_call_until_err - Calls the specified operation for * all subdevices where a group ID matches a specified bitmask. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id * group ID to be matched. Use 0 to match them all. * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. * @args: arguments for @f. * * Return: * * If the operation returns an error other than 0 or ``-ENOIOCTLCMD`` * for any subdevice, then abort and return with that error code, * zero otherwise. * * Note: subdevs cannot be added or deleted while walking * the subdevs list. */ #define v4l2_device_mask_call_until_err(v4l2_dev, grpmsk, o, f, args...) \ ({ \ struct v4l2_subdev *__sd; \ __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \ f , ##args); \ }) /** * v4l2_device_has_op - checks if any subdev with matching grpid has a * given ops. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @grpid: &struct v4l2_subdev->grp_id group ID to match. * Use 0 to match them all. * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. */ #define v4l2_device_has_op(v4l2_dev, grpid, o, f) \ ({ \ struct v4l2_subdev *__sd; \ bool __result = false; \ list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ if ((grpid) && __sd->grp_id != (grpid)) \ continue; \ if (v4l2_subdev_has_op(__sd, o, f)) { \ __result = true; \ break; \ } \ } \ __result; \ }) /** * v4l2_device_mask_has_op - checks if any subdev with matching group * mask has a given ops. * * @v4l2_dev: &struct v4l2_device owning the sub-devices to iterate over. * @grpmsk: bitmask to be checked against &struct v4l2_subdev->grp_id * group ID to be matched. Use 0 to match them all. * @o: name of the element at &struct v4l2_subdev_ops that contains @f. * Each element there groups a set of operations functions. * @f: operation function that will be called if @cond matches. * The operation functions are defined in groups, according to * each element at &struct v4l2_subdev_ops. */ #define v4l2_device_mask_has_op(v4l2_dev, grpmsk, o, f) \ ({ \ struct v4l2_subdev *__sd; \ bool __result = false; \ list_for_each_entry(__sd, &(v4l2_dev)->subdevs, list) { \ if ((grpmsk) && !(__sd->grp_id & (grpmsk))) \ continue; \ if (v4l2_subdev_has_op(__sd, o, f)) { \ __result = true; \ break; \ } \ } \ __result; \ }) #endif |
2 2 2 2 2 1 1 1 1 2 2 1 2 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 | // SPDX-License-Identifier: GPL-2.0-only /* * HIDPP protocol for Logitech receivers * * Copyright (c) 2011 Logitech (c) * Copyright (c) 2012-2013 Google (c) * Copyright (c) 2013-2014 Red Hat Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/kfifo.h> #include <linux/input/mt.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <linux/fixp-arith.h> #include <linux/unaligned.h> #include "usbhid/usbhid.h" #include "hid-ids.h" MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>"); MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); static bool disable_tap_to_click; module_param(disable_tap_to_click, bool, 0644); MODULE_PARM_DESC(disable_tap_to_click, "Disable Tap-To-Click mode reporting for touchpads (only on the K400 currently)."); /* Define a non-zero software ID to identify our own requests */ #define LINUX_KERNEL_SW_ID 0x01 #define REPORT_ID_HIDPP_SHORT 0x10 #define REPORT_ID_HIDPP_LONG 0x11 #define REPORT_ID_HIDPP_VERY_LONG 0x12 #define HIDPP_REPORT_SHORT_LENGTH 7 #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 #define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) #define HIDPP_REPORT_LONG_SUPPORTED BIT(1) #define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 #define HIDPP_SUB_ID_USER_IFACE_EVENT 0x08 #define HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST BIT(5) #define HIDPP_QUIRK_CLASS_WTP BIT(0) #define HIDPP_QUIRK_CLASS_M560 BIT(1) #define HIDPP_QUIRK_CLASS_K400 BIT(2) #define HIDPP_QUIRK_CLASS_G920 BIT(3) #define HIDPP_QUIRK_CLASS_K750 BIT(4) /* bits 2..20 are reserved for classes */ /* #define HIDPP_QUIRK_CONNECT_EVENTS BIT(21) disabled */ #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) #define HIDPP_QUIRK_DELAYED_INIT BIT(23) #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24) #define HIDPP_QUIRK_HIDPP_WHEELS BIT(25) #define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(26) #define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(27) #define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(28) #define HIDPP_QUIRK_WIRELESS_STATUS BIT(29) /* These are just aliases for now */ #define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS #define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS /* Convenience constant to check for any high-res support. */ #define HIDPP_CAPABILITY_HI_RES_SCROLL (HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL | \ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) #define HIDPP_CAPABILITY_HIDPP10_BATTERY BIT(0) #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) #define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) #define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5) #define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6) #define HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL BIT(7) #define HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL BIT(8) #define HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL BIT(9) #define HIDPP_CAPABILITY_ADC_MEASUREMENT BIT(10) #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) /* * There are two hidpp protocols in use, the first version hidpp10 is known * as register access protocol or RAP, the second version hidpp20 is known as * feature access protocol or FAP * * Most older devices (including the Unifying usb receiver) use the RAP protocol * where as most newer devices use the FAP protocol. Both protocols are * compatible with the underlying transport, which could be usb, Unifiying, or * bluetooth. The message lengths are defined by the hid vendor specific report * descriptor for the HIDPP_SHORT report type (total message lenth 7 bytes) and * the HIDPP_LONG report type (total message length 20 bytes) * * The RAP protocol uses both report types, whereas the FAP only uses HIDPP_LONG * messages. The Unifying receiver itself responds to RAP messages (device index * is 0xFF for the receiver), and all messages (short or long) with a device * index between 1 and 6 are passed untouched to the corresponding paired * Unifying device. * * The paired device can be RAP or FAP, it will receive the message untouched * from the Unifiying receiver. */ struct fap { u8 feature_index; u8 funcindex_clientid; u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U]; }; struct rap { u8 sub_id; u8 reg_address; u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U]; }; struct hidpp_report { u8 report_id; u8 device_index; union { struct fap fap; struct rap rap; u8 rawbytes[sizeof(struct fap)]; }; } __packed; struct hidpp_battery { u8 feature_index; u8 solar_feature_index; u8 voltage_feature_index; u8 adc_measurement_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; int voltage; int charge_type; bool online; u8 supported_levels_1004; }; /** * struct hidpp_scroll_counter - Utility class for processing high-resolution * scroll events. * @dev: the input device for which events should be reported. * @wheel_multiplier: the scalar multiplier to be applied to each wheel event * @remainder: counts the number of high-resolution units moved since the last * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should * only be used by class methods. * @direction: direction of last movement (1 or -1) * @last_time: last event time, used to reset remainder after inactivity */ struct hidpp_scroll_counter { int wheel_multiplier; int remainder; int direction; unsigned long long last_time; }; struct hidpp_device { struct hid_device *hid_dev; struct input_dev *input; struct mutex send_mutex; void *send_receive_buf; char *name; /* will never be NULL and should not be freed */ wait_queue_head_t wait; int very_long_report_length; bool answer_available; u8 protocol_major; u8 protocol_minor; void *private_data; struct work_struct work; struct kfifo delayed_work_fifo; struct input_dev *delayed_input; unsigned long quirks; unsigned long capabilities; u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; u8 wireless_feature_index; bool connected_once; }; /* HID++ 1.0 error codes */ #define HIDPP_ERROR 0x8f #define HIDPP_ERROR_SUCCESS 0x00 #define HIDPP_ERROR_INVALID_SUBID 0x01 #define HIDPP_ERROR_INVALID_ADRESS 0x02 #define HIDPP_ERROR_INVALID_VALUE 0x03 #define HIDPP_ERROR_CONNECT_FAIL 0x04 #define HIDPP_ERROR_TOO_MANY_DEVICES 0x05 #define HIDPP_ERROR_ALREADY_EXISTS 0x06 #define HIDPP_ERROR_BUSY 0x07 #define HIDPP_ERROR_UNKNOWN_DEVICE 0x08 #define HIDPP_ERROR_RESOURCE_ERROR 0x09 #define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a #define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b #define HIDPP_ERROR_WRONG_PIN_CODE 0x0c /* HID++ 2.0 error codes */ #define HIDPP20_ERROR_NO_ERROR 0x00 #define HIDPP20_ERROR_UNKNOWN 0x01 #define HIDPP20_ERROR_INVALID_ARGS 0x02 #define HIDPP20_ERROR_OUT_OF_RANGE 0x03 #define HIDPP20_ERROR_HW_ERROR 0x04 #define HIDPP20_ERROR_NOT_ALLOWED 0x05 #define HIDPP20_ERROR_INVALID_FEATURE_INDEX 0x06 #define HIDPP20_ERROR_INVALID_FUNCTION_ID 0x07 #define HIDPP20_ERROR_BUSY 0x08 #define HIDPP20_ERROR_UNSUPPORTED 0x09 #define HIDPP20_ERROR 0xff static int __hidpp_send_report(struct hid_device *hdev, struct hidpp_report *hidpp_report) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int fields_count, ret; switch (hidpp_report->report_id) { case REPORT_ID_HIDPP_SHORT: fields_count = HIDPP_REPORT_SHORT_LENGTH; break; case REPORT_ID_HIDPP_LONG: fields_count = HIDPP_REPORT_LONG_LENGTH; break; case REPORT_ID_HIDPP_VERY_LONG: fields_count = hidpp->very_long_report_length; break; default: return -ENODEV; } /* * set the device_index as the receiver, it will be overwritten by * hid_hw_request if needed */ hidpp_report->device_index = 0xff; if (hidpp->quirks & HIDPP_QUIRK_FORCE_OUTPUT_REPORTS) { ret = hid_hw_output_report(hdev, (u8 *)hidpp_report, fields_count); } else { ret = hid_hw_raw_request(hdev, hidpp_report->report_id, (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } return ret == fields_count ? 0 : -1; } /* * Effectively send the message to the device, waiting for its answer. * * Must be called with hidpp->send_mutex locked * * Same return protocol than hidpp_send_message_sync(): * - success on 0 * - negative error means transport error * - positive value means protocol error */ static int __do_hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { int ret; __must_hold(&hidpp->send_mutex); hidpp->send_receive_buf = response; hidpp->answer_available = false; /* * So that we can later validate the answer when it arrives * in hidpp_raw_event */ *response = *message; ret = __hidpp_send_report(hidpp->hid_dev, message); if (ret) { dbg_hid("__hidpp_send_report returned err: %d\n", ret); memset(response, 0, sizeof(struct hidpp_report)); return ret; } if (!wait_event_timeout(hidpp->wait, hidpp->answer_available, 5*HZ)) { dbg_hid("%s:timeout waiting for response\n", __func__); memset(response, 0, sizeof(struct hidpp_report)); return -ETIMEDOUT; } if (response->report_id == REPORT_ID_HIDPP_SHORT && response->rap.sub_id == HIDPP_ERROR) { ret = response->rap.params[1]; dbg_hid("%s:got hidpp error %02X\n", __func__, ret); return ret; } if ((response->report_id == REPORT_ID_HIDPP_LONG || response->report_id == REPORT_ID_HIDPP_VERY_LONG) && response->fap.feature_index == HIDPP20_ERROR) { ret = response->fap.params[1]; dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret); return ret; } return 0; } /* * hidpp_send_message_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { int ret; int max_retries = 3; mutex_lock(&hidpp->send_mutex); do { ret = __do_hidpp_send_message_sync(hidpp, message, response); if (ret != HIDPP20_ERROR_BUSY) break; dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret); } while (--max_retries); mutex_unlock(&hidpp->send_mutex); return ret; } /* * hidpp_send_fap_command_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp, u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count, struct hidpp_report *response) { struct hidpp_report *message; int ret; if (param_count > sizeof(message->fap.params)) { hid_dbg(hidpp->hid_dev, "Invalid number of parameters passed to command (%d != %llu)\n", param_count, (unsigned long long) sizeof(message->fap.params)); return -EINVAL; } message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); if (!message) return -ENOMEM; if (param_count > (HIDPP_REPORT_LONG_LENGTH - 4)) message->report_id = REPORT_ID_HIDPP_VERY_LONG; else message->report_id = REPORT_ID_HIDPP_LONG; message->fap.feature_index = feat_index; message->fap.funcindex_clientid = funcindex_clientid | LINUX_KERNEL_SW_ID; memcpy(&message->fap.params, params, param_count); ret = hidpp_send_message_sync(hidpp, message, response); kfree(message); return ret; } /* * hidpp_send_rap_command_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count, struct hidpp_report *response) { struct hidpp_report *message; int ret, max_count; /* Send as long report if short reports are not supported. */ if (report_id == REPORT_ID_HIDPP_SHORT && !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) report_id = REPORT_ID_HIDPP_LONG; switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; break; case REPORT_ID_HIDPP_LONG: max_count = HIDPP_REPORT_LONG_LENGTH - 4; break; case REPORT_ID_HIDPP_VERY_LONG: max_count = hidpp_dev->very_long_report_length - 4; break; default: return -EINVAL; } if (param_count > max_count) return -EINVAL; message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); if (!message) return -ENOMEM; message->report_id = report_id; message->rap.sub_id = sub_id; message->rap.reg_address = reg_address; memcpy(&message->rap.params, params, param_count); ret = hidpp_send_message_sync(hidpp_dev, message, response); kfree(message); return ret; } static inline bool hidpp_match_answer(struct hidpp_report *question, struct hidpp_report *answer) { return (answer->fap.feature_index == question->fap.feature_index) && (answer->fap.funcindex_clientid == question->fap.funcindex_clientid); } static inline bool hidpp_match_error(struct hidpp_report *question, struct hidpp_report *answer) { return ((answer->rap.sub_id == HIDPP_ERROR) || (answer->fap.feature_index == HIDPP20_ERROR)) && (answer->fap.funcindex_clientid == question->fap.feature_index) && (answer->fap.params[0] == question->fap.funcindex_clientid); } static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, struct hidpp_report *report) { return (hidpp->wireless_feature_index && (report->fap.feature_index == hidpp->wireless_feature_index)) || ((report->report_id == REPORT_ID_HIDPP_SHORT) && (report->rap.sub_id == 0x41)); } /* * hidpp_prefix_name() prefixes the current given name with "Logitech ". */ static void hidpp_prefix_name(char **name, int name_length) { #define PREFIX_LENGTH 9 /* "Logitech " */ int new_length; char *new_name; if (name_length > PREFIX_LENGTH && strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) /* The prefix has is already in the name */ return; new_length = PREFIX_LENGTH + name_length; new_name = kzalloc(new_length, GFP_KERNEL); if (!new_name) return; snprintf(new_name, new_length, "Logitech %s", *name); kfree(*name); *name = new_name; } /* * Updates the USB wireless_status based on whether the headset * is turned on and reachable. */ static void hidpp_update_usb_wireless_status(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; struct usb_interface *intf; if (!(hidpp->quirks & HIDPP_QUIRK_WIRELESS_STATUS)) return; if (!hid_is_usb(hdev)) return; intf = to_usb_interface(hdev->dev.parent); usb_set_wireless_status(intf, hidpp->battery.online ? USB_WIRELESS_STATUS_CONNECTED : USB_WIRELESS_STATUS_DISCONNECTED); } /** * hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll * events given a high-resolution wheel * movement. * @input_dev: Pointer to the input device * @counter: a hid_scroll_counter struct describing the wheel. * @hi_res_value: the movement of the wheel, in the mouse's high-resolution * units. * * Given a high-resolution movement, this function converts the movement into * fractions of 120 and emits high-resolution scroll events for the input * device. It also uses the multiplier from &struct hid_scroll_counter to * emit low-resolution scroll events when appropriate for * backwards-compatibility with userspace input libraries. */ static void hidpp_scroll_counter_handle_scroll(struct input_dev *input_dev, struct hidpp_scroll_counter *counter, int hi_res_value) { int low_res_value, remainder, direction; unsigned long long now, previous; hi_res_value = hi_res_value * 120/counter->wheel_multiplier; input_report_rel(input_dev, REL_WHEEL_HI_RES, hi_res_value); remainder = counter->remainder; direction = hi_res_value > 0 ? 1 : -1; now = sched_clock(); previous = counter->last_time; counter->last_time = now; /* * Reset the remainder after a period of inactivity or when the * direction changes. This prevents the REL_WHEEL emulation point * from sliding for devices that don't always provide the same * number of movements per detent. */ if (now - previous > 1000000000 || direction != counter->direction) remainder = 0; counter->direction = direction; remainder += hi_res_value; /* Some wheels will rest 7/8ths of a detent from the previous detent * after slow movement, so we want the threshold for low-res events to * be in the middle between two detents (e.g. after 4/8ths) as * opposed to on the detents themselves (8/8ths). */ if (abs(remainder) >= 60) { /* Add (or subtract) 1 because we want to trigger when the wheel * is half-way to the next detent (i.e. scroll 1 detent after a * 1/2 detent movement, 2 detents after a 1 1/2 detent movement, * etc.). */ low_res_value = remainder / 120; if (low_res_value == 0) low_res_value = (hi_res_value > 0 ? 1 : -1); input_report_rel(input_dev, REL_WHEEL, low_res_value); remainder -= low_res_value * 120; } counter->remainder = remainder; } /* -------------------------------------------------------------------------- */ /* HIDP++ 1.0 commands */ /* -------------------------------------------------------------------------- */ #define HIDPP_SET_REGISTER 0x80 #define HIDPP_GET_REGISTER 0x81 #define HIDPP_SET_LONG_REGISTER 0x82 #define HIDPP_GET_LONG_REGISTER 0x83 /** * hidpp10_set_register - Modify a HID++ 1.0 register. * @hidpp_dev: the device to set the register on. * @register_address: the address of the register to modify. * @byte: the byte of the register to modify. Should be less than 3. * @mask: mask of the bits to modify * @value: new values for the bits in mask * Return: 0 if successful, otherwise a negative error code. */ static int hidpp10_set_register(struct hidpp_device *hidpp_dev, u8 register_address, u8 byte, u8 mask, u8 value) { struct hidpp_report response; int ret; u8 params[3] = { 0 }; ret = hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, register_address, NULL, 0, &response); if (ret) return ret; memcpy(params, response.rap.params, 3); params[byte] &= ~mask; params[byte] |= value & mask; return hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_SET_REGISTER, register_address, params, 3, &response); } #define HIDPP_REG_ENABLE_REPORTS 0x00 #define HIDPP_ENABLE_CONSUMER_REPORT BIT(0) #define HIDPP_ENABLE_WHEEL_REPORT BIT(2) #define HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT BIT(3) #define HIDPP_ENABLE_BAT_REPORT BIT(4) #define HIDPP_ENABLE_HWHEEL_REPORT BIT(5) static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev) { return hidpp10_set_register(hidpp_dev, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_BAT_REPORT, HIDPP_ENABLE_BAT_REPORT); } #define HIDPP_REG_FEATURES 0x01 #define HIDPP_ENABLE_SPECIAL_BUTTON_FUNC BIT(1) #define HIDPP_ENABLE_FAST_SCROLL BIT(6) /* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */ static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev) { return hidpp10_set_register(hidpp_dev, HIDPP_REG_FEATURES, 0, HIDPP_ENABLE_FAST_SCROLL, HIDPP_ENABLE_FAST_SCROLL); } #define HIDPP_REG_BATTERY_STATUS 0x07 static int hidpp10_battery_status_map_level(u8 param) { int level; switch (param) { case 1 ... 2: level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; break; case 3 ... 4: level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; break; case 5 ... 6: level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case 7: level = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; break; default: level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; } return level; } static int hidpp10_battery_status_map_status(u8 param) { int status; switch (param) { case 0x00: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x21: /* (standard) charging */ case 0x24: /* fast charging */ case 0x25: /* slow charging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x26: /* topping charge */ case 0x22: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; break; case 0x20: /* unknown */ status = POWER_SUPPLY_STATUS_UNKNOWN; break; /* * 0x01...0x1F = reserved (not charging) * 0x23 = charging error * 0x27..0xff = reserved */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp10_query_battery_status(struct hidpp_device *hidpp) { struct hidpp_report response; int ret, status; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, HIDPP_REG_BATTERY_STATUS, NULL, 0, &response); if (ret) return ret; hidpp->battery.level = hidpp10_battery_status_map_level(response.rap.params[0]); status = hidpp10_battery_status_map_status(response.rap.params[1]); hidpp->battery.status = status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } #define HIDPP_REG_BATTERY_MILEAGE 0x0D static int hidpp10_battery_mileage_map_status(u8 param) { int status; switch (param >> 6) { case 0x00: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x01: /* charging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x02: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; break; /* * 0x03 = charging error */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp10_query_battery_mileage(struct hidpp_device *hidpp) { struct hidpp_report response; int ret, status; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, HIDPP_REG_BATTERY_MILEAGE, NULL, 0, &response); if (ret) return ret; hidpp->battery.capacity = response.rap.params[0]; status = hidpp10_battery_mileage_map_status(response.rap.params[2]); hidpp->battery.status = status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } static int hidpp10_battery_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, capacity, level; bool changed; if (report->report_id != REPORT_ID_HIDPP_SHORT) return 0; switch (report->rap.sub_id) { case HIDPP_REG_BATTERY_STATUS: capacity = hidpp->battery.capacity; level = hidpp10_battery_status_map_level(report->rawbytes[1]); status = hidpp10_battery_status_map_status(report->rawbytes[2]); break; case HIDPP_REG_BATTERY_MILEAGE: capacity = report->rap.params[0]; level = hidpp->battery.level; status = hidpp10_battery_mileage_map_status(report->rawbytes[3]); break; default: return 0; } changed = capacity != hidpp->battery.capacity || level != hidpp->battery.level || status != hidpp->battery.status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; if (changed) { hidpp->battery.level = level; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } #define HIDPP_REG_PAIRING_INFORMATION 0xB5 #define HIDPP_EXTENDED_PAIRING 0x30 #define HIDPP_DEVICE_NAME 0x40 static char *hidpp_unifying_get_name(struct hidpp_device *hidpp_dev) { struct hidpp_report response; int ret; u8 params[1] = { HIDPP_DEVICE_NAME }; char *name; int len; ret = hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION, params, 1, &response); if (ret) return NULL; len = response.rap.params[1]; if (2 + len > sizeof(response.rap.params)) return NULL; if (len < 4) /* logitech devices are usually at least Xddd */ return NULL; name = kzalloc(len + 1, GFP_KERNEL); if (!name) return NULL; memcpy(name, &response.rap.params[2], len); /* include the terminating '\0' */ hidpp_prefix_name(&name, len + 1); return name; } static int hidpp_unifying_get_serial(struct hidpp_device *hidpp, u32 *serial) { struct hidpp_report response; int ret; u8 params[1] = { HIDPP_EXTENDED_PAIRING }; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION, params, 1, &response); if (ret) return ret; /* * We don't care about LE or BE, we will output it as a string * with %4phD, so we need to keep the order. */ *serial = *((u32 *)&response.rap.params[1]); return 0; } static int hidpp_unifying_init(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; const char *name; u32 serial; int ret; ret = hidpp_unifying_get_serial(hidpp, &serial); if (ret) return ret; snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial); dbg_hid("HID++ Unifying: Got serial: %s\n", hdev->uniq); name = hidpp_unifying_get_name(hidpp); if (!name) return -EIO; snprintf(hdev->name, sizeof(hdev->name), "%s", name); dbg_hid("HID++ Unifying: Got name: %s\n", name); kfree(name); return 0; } /* -------------------------------------------------------------------------- */ /* 0x0000: Root */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_ROOT 0x0000 #define HIDPP_PAGE_ROOT_IDX 0x00 #define CMD_ROOT_GET_FEATURE 0x00 #define CMD_ROOT_GET_PROTOCOL_VERSION 0x10 static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature, u8 *feature_index) { struct hidpp_report response; int ret; u8 params[2] = { feature >> 8, feature & 0x00FF }; ret = hidpp_send_fap_command_sync(hidpp, HIDPP_PAGE_ROOT_IDX, CMD_ROOT_GET_FEATURE, params, 2, &response); if (ret) return ret; if (response.fap.params[0] == 0) return -ENOENT; *feature_index = response.fap.params[0]; return ret; } static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp) { const u8 ping_byte = 0x5a; u8 ping_data[3] = { 0, 0, ping_byte }; struct hidpp_report response; int ret; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_PAGE_ROOT_IDX, CMD_ROOT_GET_PROTOCOL_VERSION | LINUX_KERNEL_SW_ID, ping_data, sizeof(ping_data), &response); if (ret == HIDPP_ERROR_INVALID_SUBID) { hidpp->protocol_major = 1; hidpp->protocol_minor = 0; goto print_version; } /* the device might not be connected */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; if (response.rap.params[2] != ping_byte) { hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n", __func__, response.rap.params[2], ping_byte); return -EPROTO; } hidpp->protocol_major = response.rap.params[0]; hidpp->protocol_minor = response.rap.params[1]; print_version: if (!hidpp->connected_once) { hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n", hidpp->protocol_major, hidpp->protocol_minor); hidpp->connected_once = true; } else hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n", hidpp->protocol_major, hidpp->protocol_minor); return 0; } /* -------------------------------------------------------------------------- */ /* 0x0003: Device Information */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_DEVICE_INFORMATION 0x0003 #define CMD_GET_DEVICE_INFO 0x00 static int hidpp_get_serial(struct hidpp_device *hidpp, u32 *serial) { struct hidpp_report response; u8 feature_index; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_DEVICE_INFORMATION, &feature_index); if (ret) return ret; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_GET_DEVICE_INFO, NULL, 0, &response); if (ret) return ret; /* See hidpp_unifying_get_serial() */ *serial = *((u32 *)&response.rap.params[1]); return 0; } static int hidpp_serial_init(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; u32 serial; int ret; ret = hidpp_get_serial(hidpp, &serial); if (ret) return ret; snprintf(hdev->uniq, sizeof(hdev->uniq), "%4phD", &serial); dbg_hid("HID++ DeviceInformation: Got serial: %s\n", hdev->uniq); return 0; } /* -------------------------------------------------------------------------- */ /* 0x0005: GetDeviceNameType */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005 #define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x00 #define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x10 #define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x20 static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp, u8 feature_index, u8 *nameLength) { struct hidpp_report response; int ret; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_GET_DEVICE_NAME_TYPE_GET_COUNT, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; *nameLength = response.fap.params[0]; return ret; } static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp, u8 feature_index, u8 char_index, char *device_name, int len_buf) { struct hidpp_report response; int ret, i; int count; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME, &char_index, 1, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; switch (response.report_id) { case REPORT_ID_HIDPP_VERY_LONG: count = hidpp->very_long_report_length - 4; break; case REPORT_ID_HIDPP_LONG: count = HIDPP_REPORT_LONG_LENGTH - 4; break; case REPORT_ID_HIDPP_SHORT: count = HIDPP_REPORT_SHORT_LENGTH - 4; break; default: return -EPROTO; } if (len_buf < count) count = len_buf; for (i = 0; i < count; i++) device_name[i] = response.fap.params[i]; return count; } static char *hidpp_get_device_name(struct hidpp_device *hidpp) { u8 feature_index; u8 __name_length; char *name; unsigned index = 0; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_GET_DEVICE_NAME_TYPE, &feature_index); if (ret) return NULL; ret = hidpp_devicenametype_get_count(hidpp, feature_index, &__name_length); if (ret) return NULL; name = kzalloc(__name_length + 1, GFP_KERNEL); if (!name) return NULL; while (index < __name_length) { ret = hidpp_devicenametype_get_device_name(hidpp, feature_index, index, name + index, __name_length - index); if (ret <= 0) { kfree(name); return NULL; } index += ret; } /* include the terminating '\0' */ hidpp_prefix_name(&name, __name_length + 1); return name; } /* -------------------------------------------------------------------------- */ /* 0x1000: Battery level status */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_BATTERY_LEVEL_STATUS 0x1000 #define CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS 0x00 #define CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_CAPABILITY 0x10 #define EVENT_BATTERY_LEVEL_STATUS_BROADCAST 0x00 #define FLAG_BATTERY_LEVEL_DISABLE_OSD BIT(0) #define FLAG_BATTERY_LEVEL_MILEAGE BIT(1) #define FLAG_BATTERY_LEVEL_RECHARGEABLE BIT(2) static int hidpp_map_battery_level(int capacity) { if (capacity < 11) return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; /* * The spec says this should be < 31 but some devices report 30 * with brand new batteries and Windows reports 30 as "Good". */ else if (capacity < 30) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (capacity < 81) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; return POWER_SUPPLY_CAPACITY_LEVEL_FULL; } static int hidpp20_batterylevel_map_status_capacity(u8 data[3], int *capacity, int *next_capacity, int *level) { int status; *capacity = data[0]; *next_capacity = data[1]; *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; /* When discharging, we can rely on the device reported capacity. * For all other states the device reports 0 (unknown). */ switch (data[2]) { case 0: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; *level = hidpp_map_battery_level(*capacity); break; case 1: /* recharging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 2: /* charge in final stage */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 3: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; *capacity = 100; break; case 4: /* recharging below optimal speed */ status = POWER_SUPPLY_STATUS_CHARGING; break; /* 5 = invalid battery type 6 = thermal error 7 = other charging error */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp, u8 feature_index, int *status, int *capacity, int *next_capacity, int *level) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS, NULL, 0, &response); /* Ignore these intermittent errors */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; *status = hidpp20_batterylevel_map_status_capacity(params, capacity, next_capacity, level); return 0; } static int hidpp20_batterylevel_get_battery_info(struct hidpp_device *hidpp, u8 feature_index) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; unsigned int level_count, flags; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_CAPABILITY, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; level_count = params[0]; flags = params[1]; if (level_count < 10 || !(flags & FLAG_BATTERY_LEVEL_MILEAGE)) hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS; else hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_MILEAGE; return 0; } static int hidpp20_query_battery_info_1000(struct hidpp_device *hidpp) { int ret; int status, capacity, next_capacity, level; if (hidpp->battery.feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_LEVEL_STATUS, &hidpp->battery.feature_index); if (ret) return ret; } ret = hidpp20_batterylevel_get_battery_capacity(hidpp, hidpp->battery.feature_index, &status, &capacity, &next_capacity, &level); if (ret) return ret; ret = hidpp20_batterylevel_get_battery_info(hidpp, hidpp->battery.feature_index); if (ret) return ret; hidpp->battery.status = status; hidpp->battery.capacity = capacity; hidpp->battery.level = level; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } static int hidpp20_battery_event_1000(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, capacity, next_capacity, level; bool changed; if (report->fap.feature_index != hidpp->battery.feature_index || report->fap.funcindex_clientid != EVENT_BATTERY_LEVEL_STATUS_BROADCAST) return 0; status = hidpp20_batterylevel_map_status_capacity(report->fap.params, &capacity, &next_capacity, &level); /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; changed = capacity != hidpp->battery.capacity || level != hidpp->battery.level || status != hidpp->battery.status; if (changed) { hidpp->battery.level = level; hidpp->battery.capacity = capacity; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x1001: Battery voltage */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001 #define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00 #define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00 static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, int *level, int *charge_type) { int status; long flags = (long) data[2]; *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; if (flags & 0x80) switch (flags & 0x07) { case 0: status = POWER_SUPPLY_STATUS_CHARGING; break; case 1: status = POWER_SUPPLY_STATUS_FULL; *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; break; case 2: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; default: status = POWER_SUPPLY_STATUS_UNKNOWN; break; } else status = POWER_SUPPLY_STATUS_DISCHARGING; *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; if (test_bit(3, &flags)) { *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; } if (test_bit(4, &flags)) { *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; } if (test_bit(5, &flags)) { *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; } *voltage = get_unaligned_be16(data); return status; } static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp, u8 feature_index, int *status, int *voltage, int *level, int *charge_type) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE; *status = hidpp20_battery_map_status_voltage(params, voltage, level, charge_type); return 0; } static int hidpp20_map_battery_capacity(struct hid_device *hid_dev, int voltage) { /* NB: This voltage curve doesn't necessarily map perfectly to all * devices that implement the BATTERY_VOLTAGE feature. This is because * there are a few devices that use different battery technology. */ static const int voltages[100] = { 4186, 4156, 4143, 4133, 4122, 4113, 4103, 4094, 4086, 4075, 4067, 4059, 4051, 4043, 4035, 4027, 4019, 4011, 4003, 3997, 3989, 3983, 3976, 3969, 3961, 3955, 3949, 3942, 3935, 3929, 3922, 3916, 3909, 3902, 3896, 3890, 3883, 3877, 3870, 3865, 3859, 3853, 3848, 3842, 3837, 3833, 3828, 3824, 3819, 3815, 3811, 3808, 3804, 3800, 3797, 3793, 3790, 3787, 3784, 3781, 3778, 3775, 3772, 3770, 3767, 3764, 3762, 3759, 3757, 3754, 3751, 3748, 3744, 3741, 3737, 3734, 3730, 3726, 3724, 3720, 3717, 3714, 3710, 3706, 3702, 3697, 3693, 3688, 3683, 3677, 3671, 3666, 3662, 3658, 3654, 3646, 3633, 3612, 3579, 3537 }; int i; if (unlikely(voltage < 3500 || voltage >= 5000)) hid_warn_once(hid_dev, "%s: possibly using the wrong voltage curve\n", __func__); for (i = 0; i < ARRAY_SIZE(voltages); i++) { if (voltage >= voltages[i]) return ARRAY_SIZE(voltages) - i; } return 0; } static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp) { int ret; int status, voltage, level, charge_type; if (hidpp->battery.voltage_feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE, &hidpp->battery.voltage_feature_index); if (ret) return ret; } ret = hidpp20_battery_get_battery_voltage(hidpp, hidpp->battery.voltage_feature_index, &status, &voltage, &level, &charge_type); if (ret) return ret; hidpp->battery.status = status; hidpp->battery.voltage = voltage; hidpp->battery.capacity = hidpp20_map_battery_capacity(hidpp->hid_dev, voltage); hidpp->battery.level = level; hidpp->battery.charge_type = charge_type; hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; return 0; } static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, voltage, level, charge_type; if (report->fap.feature_index != hidpp->battery.voltage_feature_index || report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST) return 0; status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage, &level, &charge_type); hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { hidpp->battery.voltage = voltage; hidpp->battery.capacity = hidpp20_map_battery_capacity(hidpp->hid_dev, voltage); hidpp->battery.status = status; hidpp->battery.level = level; hidpp->battery.charge_type = charge_type; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x1004: Unified battery */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_UNIFIED_BATTERY 0x1004 #define CMD_UNIFIED_BATTERY_GET_CAPABILITIES 0x00 #define CMD_UNIFIED_BATTERY_GET_STATUS 0x10 #define EVENT_UNIFIED_BATTERY_STATUS_EVENT 0x00 #define FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL BIT(0) #define FLAG_UNIFIED_BATTERY_LEVEL_LOW BIT(1) #define FLAG_UNIFIED_BATTERY_LEVEL_GOOD BIT(2) #define FLAG_UNIFIED_BATTERY_LEVEL_FULL BIT(3) #define FLAG_UNIFIED_BATTERY_FLAGS_RECHARGEABLE BIT(0) #define FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE BIT(1) static int hidpp20_unifiedbattery_get_capabilities(struct hidpp_device *hidpp, u8 feature_index) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS || hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) { /* we have already set the device capabilities, so let's skip */ return 0; } ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_UNIFIED_BATTERY_GET_CAPABILITIES, NULL, 0, &response); /* Ignore these intermittent errors */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; /* * If the device supports state of charge (battery percentage) we won't * export the battery level information. there are 4 possible battery * levels and they all are optional, this means that the device might * not support any of them, we are just better off with the battery * percentage. */ if (params[1] & FLAG_UNIFIED_BATTERY_FLAGS_STATE_OF_CHARGE) { hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_PERCENTAGE; hidpp->battery.supported_levels_1004 = 0; } else { hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS; hidpp->battery.supported_levels_1004 = params[0]; } return 0; } static int hidpp20_unifiedbattery_map_status(struct hidpp_device *hidpp, u8 charging_status, u8 external_power_status) { int status; switch (charging_status) { case 0: /* discharging */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 1: /* charging */ case 2: /* charging slow */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 3: /* complete */ status = POWER_SUPPLY_STATUS_FULL; break; case 4: /* error */ status = POWER_SUPPLY_STATUS_NOT_CHARGING; hid_info(hidpp->hid_dev, "%s: charging error", hidpp->name); break; default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp20_unifiedbattery_map_level(struct hidpp_device *hidpp, u8 battery_level) { /* cler unsupported level bits */ battery_level &= hidpp->battery.supported_levels_1004; if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_FULL) return POWER_SUPPLY_CAPACITY_LEVEL_FULL; else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_GOOD) return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_LOW) return POWER_SUPPLY_CAPACITY_LEVEL_LOW; else if (battery_level & FLAG_UNIFIED_BATTERY_LEVEL_CRITICAL) return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; return POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; } static int hidpp20_unifiedbattery_get_status(struct hidpp_device *hidpp, u8 feature_index, u8 *state_of_charge, int *status, int *level) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_UNIFIED_BATTERY_GET_STATUS, NULL, 0, &response); /* Ignore these intermittent errors */ if (ret == HIDPP_ERROR_RESOURCE_ERROR) return -EIO; if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; *state_of_charge = params[0]; *status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]); *level = hidpp20_unifiedbattery_map_level(hidpp, params[1]); return 0; } static int hidpp20_query_battery_info_1004(struct hidpp_device *hidpp) { int ret; u8 state_of_charge; int status, level; if (hidpp->battery.feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_UNIFIED_BATTERY, &hidpp->battery.feature_index); if (ret) return ret; } ret = hidpp20_unifiedbattery_get_capabilities(hidpp, hidpp->battery.feature_index); if (ret) return ret; ret = hidpp20_unifiedbattery_get_status(hidpp, hidpp->battery.feature_index, &state_of_charge, &status, &level); if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_UNIFIED_BATTERY; hidpp->battery.capacity = state_of_charge; hidpp->battery.status = status; hidpp->battery.level = level; hidpp->battery.online = true; return 0; } static int hidpp20_battery_event_1004(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; u8 *params = (u8 *)report->fap.params; int state_of_charge, status, level; bool changed; if (report->fap.feature_index != hidpp->battery.feature_index || report->fap.funcindex_clientid != EVENT_UNIFIED_BATTERY_STATUS_EVENT) return 0; state_of_charge = params[0]; status = hidpp20_unifiedbattery_map_status(hidpp, params[2], params[3]); level = hidpp20_unifiedbattery_map_level(hidpp, params[1]); changed = status != hidpp->battery.status || (state_of_charge != hidpp->battery.capacity && hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE) || (level != hidpp->battery.level && hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS); if (changed) { hidpp->battery.capacity = state_of_charge; hidpp->battery.status = status; hidpp->battery.level = level; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* Battery feature helpers */ /* -------------------------------------------------------------------------- */ static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_SERIAL_NUMBER, 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */ 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */ 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */ }; static int hidpp_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct hidpp_device *hidpp = power_supply_get_drvdata(psy); int ret = 0; switch(psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = hidpp->battery.status; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = hidpp->battery.capacity; break; case POWER_SUPPLY_PROP_CAPACITY_LEVEL: val->intval = hidpp->battery.level; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_ONLINE: val->intval = hidpp->battery.online; break; case POWER_SUPPLY_PROP_MODEL_NAME: if (!strncmp(hidpp->name, "Logitech ", 9)) val->strval = hidpp->name + 9; else val->strval = hidpp->name; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = "Logitech"; break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = hidpp->hid_dev->uniq; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: /* hardware reports voltage in mV. sysfs expects uV */ val->intval = hidpp->battery.voltage * 1000; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = hidpp->battery.charge_type; break; default: ret = -EINVAL; break; } return ret; } /* -------------------------------------------------------------------------- */ /* 0x1d4b: Wireless device status */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b static int hidpp_get_wireless_feature_index(struct hidpp_device *hidpp, u8 *feature_index) { return hidpp_root_get_feature(hidpp, HIDPP_PAGE_WIRELESS_DEVICE_STATUS, feature_index); } /* -------------------------------------------------------------------------- */ /* 0x1f20: ADC measurement */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_ADC_MEASUREMENT 0x1f20 #define CMD_ADC_MEASUREMENT_GET_ADC_MEASUREMENT 0x00 #define EVENT_ADC_MEASUREMENT_STATUS_BROADCAST 0x00 static int hidpp20_map_adc_measurement_1f20_capacity(struct hid_device *hid_dev, int voltage) { /* NB: This voltage curve doesn't necessarily map perfectly to all * devices that implement the ADC_MEASUREMENT feature. This is because * there are a few devices that use different battery technology. * * Adapted from: * https://github.com/Sapd/HeadsetControl/blob/acd972be0468e039b93aae81221f20a54d2d60f7/src/devices/logitech_g633_g933_935.c#L44-L52 */ static const int voltages[100] = { 4030, 4024, 4018, 4011, 4003, 3994, 3985, 3975, 3963, 3951, 3937, 3922, 3907, 3893, 3880, 3868, 3857, 3846, 3837, 3828, 3820, 3812, 3805, 3798, 3791, 3785, 3779, 3773, 3768, 3762, 3757, 3752, 3747, 3742, 3738, 3733, 3729, 3724, 3720, 3716, 3712, 3708, 3704, 3700, 3696, 3692, 3688, 3685, 3681, 3677, 3674, 3670, 3667, 3663, 3660, 3657, 3653, 3650, 3646, 3643, 3640, 3637, 3633, 3630, 3627, 3624, 3620, 3617, 3614, 3611, 3608, 3604, 3601, 3598, 3595, 3592, 3589, 3585, 3582, 3579, 3576, 3573, 3569, 3566, 3563, 3560, 3556, 3553, 3550, 3546, 3543, 3539, 3536, 3532, 3529, 3525, 3499, 3466, 3433, 3399, }; int i; if (voltage == 0) return 0; if (unlikely(voltage < 3400 || voltage >= 5000)) hid_warn_once(hid_dev, "%s: possibly using the wrong voltage curve\n", __func__); for (i = 0; i < ARRAY_SIZE(voltages); i++) { if (voltage >= voltages[i]) return ARRAY_SIZE(voltages) - i; } return 0; } static int hidpp20_map_adc_measurement_1f20(u8 data[3], int *voltage) { int status; u8 flags; flags = data[2]; switch (flags) { case 0x01: status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x03: status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x07: status = POWER_SUPPLY_STATUS_FULL; break; case 0x0F: default: status = POWER_SUPPLY_STATUS_UNKNOWN; break; } *voltage = get_unaligned_be16(data); dbg_hid("Parsed 1f20 data as flag 0x%02x voltage %dmV\n", flags, *voltage); return status; } /* Return value is whether the device is online */ static bool hidpp20_get_adc_measurement_1f20(struct hidpp_device *hidpp, u8 feature_index, int *status, int *voltage) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; *status = POWER_SUPPLY_STATUS_UNKNOWN; *voltage = 0; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_ADC_MEASUREMENT_GET_ADC_MEASUREMENT, NULL, 0, &response); if (ret > 0) { hid_dbg(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return false; } *status = hidpp20_map_adc_measurement_1f20(params, voltage); return true; } static int hidpp20_query_adc_measurement_info_1f20(struct hidpp_device *hidpp) { if (hidpp->battery.adc_measurement_feature_index == 0xff) { int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_ADC_MEASUREMENT, &hidpp->battery.adc_measurement_feature_index); if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_ADC_MEASUREMENT; } hidpp->battery.online = hidpp20_get_adc_measurement_1f20(hidpp, hidpp->battery.adc_measurement_feature_index, &hidpp->battery.status, &hidpp->battery.voltage); hidpp->battery.capacity = hidpp20_map_adc_measurement_1f20_capacity(hidpp->hid_dev, hidpp->battery.voltage); hidpp_update_usb_wireless_status(hidpp); return 0; } static int hidpp20_adc_measurement_event_1f20(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, voltage; if (report->fap.feature_index != hidpp->battery.adc_measurement_feature_index || report->fap.funcindex_clientid != EVENT_ADC_MEASUREMENT_STATUS_BROADCAST) return 0; status = hidpp20_map_adc_measurement_1f20(report->fap.params, &voltage); hidpp->battery.online = status != POWER_SUPPLY_STATUS_UNKNOWN; if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { hidpp->battery.status = status; hidpp->battery.voltage = voltage; hidpp->battery.capacity = hidpp20_map_adc_measurement_1f20_capacity(hidpp->hid_dev, voltage); if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); hidpp_update_usb_wireless_status(hidpp); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x2120: Hi-resolution scrolling */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120 #define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10 static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp, bool enabled, u8 *multiplier) { u8 feature_index; int ret; u8 params[1]; struct hidpp_report response; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING, &feature_index); if (ret) return ret; params[0] = enabled ? BIT(0) : 0; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE, params, sizeof(params), &response); if (ret) return ret; *multiplier = response.fap.params[1]; return 0; } /* -------------------------------------------------------------------------- */ /* 0x2121: HiRes Wheel */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_HIRES_WHEEL 0x2121 #define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00 #define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20 static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp, u8 *multiplier) { u8 feature_index; int ret; struct hidpp_report response; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, &feature_index); if (ret) goto return_default; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY, NULL, 0, &response); if (ret) goto return_default; *multiplier = response.fap.params[0]; return 0; return_default: hid_warn(hidpp->hid_dev, "Couldn't get wheel multiplier (error %d)\n", ret); return ret; } static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert, bool high_resolution, bool use_hidpp) { u8 feature_index; int ret; u8 params[1]; struct hidpp_report response; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, &feature_index); if (ret) return ret; params[0] = (invert ? BIT(2) : 0) | (high_resolution ? BIT(1) : 0) | (use_hidpp ? BIT(0) : 0); return hidpp_send_fap_command_sync(hidpp, feature_index, CMD_HIRES_WHEEL_SET_WHEEL_MODE, params, sizeof(params), &response); } /* -------------------------------------------------------------------------- */ /* 0x4301: Solar Keyboard */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_SOLAR_KEYBOARD 0x4301 #define CMD_SOLAR_SET_LIGHT_MEASURE 0x00 #define EVENT_SOLAR_BATTERY_BROADCAST 0x00 #define EVENT_SOLAR_BATTERY_LIGHT_MEASURE 0x10 #define EVENT_SOLAR_CHECK_LIGHT_BUTTON 0x20 static int hidpp_solar_request_battery_event(struct hidpp_device *hidpp) { struct hidpp_report response; u8 params[2] = { 1, 1 }; int ret; if (hidpp->battery.feature_index == 0xff) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_SOLAR_KEYBOARD, &hidpp->battery.solar_feature_index); if (ret) return ret; } ret = hidpp_send_fap_command_sync(hidpp, hidpp->battery.solar_feature_index, CMD_SOLAR_SET_LIGHT_MEASURE, params, 2, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_MILEAGE; return 0; } static int hidpp_solar_battery_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int capacity, lux, status; u8 function; function = report->fap.funcindex_clientid; if (report->fap.feature_index != hidpp->battery.solar_feature_index || !(function == EVENT_SOLAR_BATTERY_BROADCAST || function == EVENT_SOLAR_BATTERY_LIGHT_MEASURE || function == EVENT_SOLAR_CHECK_LIGHT_BUTTON)) return 0; capacity = report->fap.params[0]; switch (function) { case EVENT_SOLAR_BATTERY_LIGHT_MEASURE: lux = (report->fap.params[1] << 8) | report->fap.params[2]; if (lux > 200) status = POWER_SUPPLY_STATUS_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; break; case EVENT_SOLAR_CHECK_LIGHT_BUTTON: default: if (capacity < hidpp->battery.capacity) status = POWER_SUPPLY_STATUS_DISCHARGING; else status = POWER_SUPPLY_STATUS_CHARGING; } if (capacity == 100) status = POWER_SUPPLY_STATUS_FULL; hidpp->battery.online = true; if (capacity != hidpp->battery.capacity || status != hidpp->battery.status) { hidpp->battery.capacity = capacity; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } /* -------------------------------------------------------------------------- */ /* 0x6010: Touchpad FW items */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_TOUCHPAD_FW_ITEMS 0x6010 #define CMD_TOUCHPAD_FW_ITEMS_SET 0x10 struct hidpp_touchpad_fw_items { uint8_t presence; uint8_t desired_state; uint8_t state; uint8_t persistent; }; /* * send a set state command to the device by reading the current items->state * field. items is then filled with the current state. */ static int hidpp_touchpad_fw_items_set(struct hidpp_device *hidpp, u8 feature_index, struct hidpp_touchpad_fw_items *items) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_TOUCHPAD_FW_ITEMS_SET, &items->state, 1, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; items->presence = params[0]; items->desired_state = params[1]; items->state = params[2]; items->persistent = params[3]; return 0; } /* -------------------------------------------------------------------------- */ /* 0x6100: TouchPadRawXY */ /* -------------------------------------------------------------------------- */ #define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100 #define CMD_TOUCHPAD_GET_RAW_INFO 0x00 #define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x20 #define EVENT_TOUCHPAD_RAW_XY 0x00 #define TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT 0x01 #define TOUCHPAD_RAW_XY_ORIGIN_UPPER_LEFT 0x03 struct hidpp_touchpad_raw_info { u16 x_size; u16 y_size; u8 z_range; u8 area_range; u8 timestamp_unit; u8 maxcontacts; u8 origin; u16 res; }; struct hidpp_touchpad_raw_xy_finger { u8 contact_type; u8 contact_status; u16 x; u16 y; u8 z; u8 area; u8 finger_id; }; struct hidpp_touchpad_raw_xy { u16 timestamp; struct hidpp_touchpad_raw_xy_finger fingers[2]; u8 spurious_flag; u8 end_of_frame; u8 finger_count; u8 button; }; static int hidpp_touchpad_get_raw_info(struct hidpp_device *hidpp, u8 feature_index, struct hidpp_touchpad_raw_info *raw_info) { struct hidpp_report response; int ret; u8 *params = (u8 *)response.fap.params; ret = hidpp_send_fap_command_sync(hidpp, feature_index, CMD_TOUCHPAD_GET_RAW_INFO, NULL, 0, &response); if (ret > 0) { hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } if (ret) return ret; raw_info->x_size = get_unaligned_be16(¶ms[0]); raw_info->y_size = get_unaligned_be16(¶ms[2]); raw_info->z_range = params[4]; raw_info->area_range = params[5]; raw_info->maxcontacts = params[7]; raw_info->origin = params[8]; /* res is given in unit per inch */ raw_info->res = get_unaligned_be16(¶ms[13]) * 2 / 51; return ret; } static int hidpp_touchpad_set_raw_report_state(struct hidpp_device *hidpp_dev, u8 feature_index, bool send_raw_reports, bool sensor_enhanced_settings) { struct hidpp_report response; /* * Params: * bit 0 - enable raw * bit 1 - 16bit Z, no area * bit 2 - enhanced sensitivity * bit 3 - width, height (4 bits each) instead of area * bit 4 - send raw + gestures (degrades smoothness) * remaining bits - reserved */ u8 params = send_raw_reports | (sensor_enhanced_settings << 2); return hidpp_send_fap_command_sync(hidpp_dev, feature_index, CMD_TOUCHPAD_SET_RAW_REPORT_STATE, ¶ms, 1, &response); } static void hidpp_touchpad_touch_event(u8 *data, struct hidpp_touchpad_raw_xy_finger *finger) { u8 x_m = data[0] << 2; u8 y_m = data[2] << 2; finger->x = x_m << 6 | data[1]; finger->y = y_m << 6 | data[3]; finger->contact_type = data[0] >> 6; finger->contact_status = data[2] >> 6; finger->z = data[4]; finger->area = data[5]; finger->finger_id = data[6] >> 4; } static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev, u8 *data, struct hidpp_touchpad_raw_xy *raw_xy) { memset(raw_xy, 0, sizeof(struct hidpp_touchpad_raw_xy)); raw_xy->end_of_frame = data[8] & 0x01; raw_xy->spurious_flag = (data[8] >> 1) & 0x01; raw_xy->finger_count = data[15] & 0x0f; raw_xy->button = (data[8] >> 2) & 0x01; if (raw_xy->finger_count) { hidpp_touchpad_touch_event(&data[2], &raw_xy->fingers[0]); hidpp_touchpad_touch_event(&data[9], &raw_xy->fingers[1]); } } /* -------------------------------------------------------------------------- */ /* 0x8123: Force feedback support */ /* -------------------------------------------------------------------------- */ #define HIDPP_FF_GET_INFO 0x01 #define HIDPP_FF_RESET_ALL 0x11 #define HIDPP_FF_DOWNLOAD_EFFECT 0x21 #define HIDPP_FF_SET_EFFECT_STATE 0x31 #define HIDPP_FF_DESTROY_EFFECT 0x41 #define HIDPP_FF_GET_APERTURE 0x51 #define HIDPP_FF_SET_APERTURE 0x61 #define HIDPP_FF_GET_GLOBAL_GAINS 0x71 #define HIDPP_FF_SET_GLOBAL_GAINS 0x81 #define HIDPP_FF_EFFECT_STATE_GET 0x00 #define HIDPP_FF_EFFECT_STATE_STOP 0x01 #define HIDPP_FF_EFFECT_STATE_PLAY 0x02 #define HIDPP_FF_EFFECT_STATE_PAUSE 0x03 #define HIDPP_FF_EFFECT_CONSTANT 0x00 #define HIDPP_FF_EFFECT_PERIODIC_SINE 0x01 #define HIDPP_FF_EFFECT_PERIODIC_SQUARE 0x02 #define HIDPP_FF_EFFECT_PERIODIC_TRIANGLE 0x03 #define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP 0x04 #define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN 0x05 #define HIDPP_FF_EFFECT_SPRING 0x06 #define HIDPP_FF_EFFECT_DAMPER 0x07 #define HIDPP_FF_EFFECT_FRICTION 0x08 #define HIDPP_FF_EFFECT_INERTIA 0x09 #define HIDPP_FF_EFFECT_RAMP 0x0A #define HIDPP_FF_EFFECT_AUTOSTART 0x80 #define HIDPP_FF_EFFECTID_NONE -1 #define HIDPP_FF_EFFECTID_AUTOCENTER -2 #define HIDPP_AUTOCENTER_PARAMS_LENGTH 18 #define HIDPP_FF_MAX_PARAMS 20 #define HIDPP_FF_RESERVED_SLOTS 1 struct hidpp_ff_private_data { struct hidpp_device *hidpp; u8 feature_index; u8 version; u16 gain; s16 range; u8 slot_autocenter; u8 num_effects; int *effect_ids; struct workqueue_struct *wq; atomic_t workqueue_size; }; struct hidpp_ff_work_data { struct work_struct work; struct hidpp_ff_private_data *data; int effect_id; u8 command; u8 params[HIDPP_FF_MAX_PARAMS]; u8 size; }; static const signed short hidpp_ff_effects[] = { FF_CONSTANT, FF_PERIODIC, FF_SINE, FF_SQUARE, FF_SAW_UP, FF_SAW_DOWN, FF_TRIANGLE, FF_SPRING, FF_DAMPER, FF_AUTOCENTER, FF_GAIN, -1 }; static const signed short hidpp_ff_effects_v2[] = { FF_RAMP, FF_FRICTION, FF_INERTIA, -1 }; static const u8 HIDPP_FF_CONDITION_CMDS[] = { HIDPP_FF_EFFECT_SPRING, HIDPP_FF_EFFECT_FRICTION, HIDPP_FF_EFFECT_DAMPER, HIDPP_FF_EFFECT_INERTIA }; static const char *HIDPP_FF_CONDITION_NAMES[] = { "spring", "friction", "damper", "inertia" }; static u8 hidpp_ff_find_effect(struct hidpp_ff_private_data *data, int effect_id) { int i; for (i = 0; i < data->num_effects; i++) if (data->effect_ids[i] == effect_id) return i+1; return 0; } static void hidpp_ff_work_handler(struct work_struct *w) { struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work); struct hidpp_ff_private_data *data = wd->data; struct hidpp_report response; u8 slot; int ret; /* add slot number if needed */ switch (wd->effect_id) { case HIDPP_FF_EFFECTID_AUTOCENTER: wd->params[0] = data->slot_autocenter; break; case HIDPP_FF_EFFECTID_NONE: /* leave slot as zero */ break; default: /* find current slot for effect */ wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id); break; } /* send command and wait for reply */ ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index, wd->command, wd->params, wd->size, &response); if (ret) { hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n"); goto out; } /* parse return data */ switch (wd->command) { case HIDPP_FF_DOWNLOAD_EFFECT: slot = response.fap.params[0]; if (slot > 0 && slot <= data->num_effects) { if (wd->effect_id >= 0) /* regular effect uploaded */ data->effect_ids[slot-1] = wd->effect_id; else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER) /* autocenter spring uploaded */ data->slot_autocenter = slot; } break; case HIDPP_FF_DESTROY_EFFECT: if (wd->effect_id >= 0) /* regular effect destroyed */ data->effect_ids[wd->params[0]-1] = -1; else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER) /* autocenter spring destroyed */ data->slot_autocenter = 0; break; case HIDPP_FF_SET_GLOBAL_GAINS: data->gain = (wd->params[0] << 8) + wd->params[1]; break; case HIDPP_FF_SET_APERTURE: data->range = (wd->params[0] << 8) + wd->params[1]; break; default: /* no action needed */ break; } out: atomic_dec(&data->workqueue_size); kfree(wd); } static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id, u8 command, u8 *params, u8 size) { struct hidpp_ff_work_data *wd = kzalloc(sizeof(*wd), GFP_KERNEL); int s; if (!wd) return -ENOMEM; INIT_WORK(&wd->work, hidpp_ff_work_handler); wd->data = data; wd->effect_id = effect_id; wd->command = command; wd->size = size; memcpy(wd->params, params, size); s = atomic_inc_return(&data->workqueue_size); queue_work(data->wq, &wd->work); /* warn about excessive queue size */ if (s >= 20 && s % 20 == 0) hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s); return 0; } static int hidpp_ff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[20]; u8 size; int force; /* set common parameters */ params[2] = effect->replay.length >> 8; params[3] = effect->replay.length & 255; params[4] = effect->replay.delay >> 8; params[5] = effect->replay.delay & 255; switch (effect->type) { case FF_CONSTANT: force = (effect->u.constant.level * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[1] = HIDPP_FF_EFFECT_CONSTANT; params[6] = force >> 8; params[7] = force & 255; params[8] = effect->u.constant.envelope.attack_level >> 7; params[9] = effect->u.constant.envelope.attack_length >> 8; params[10] = effect->u.constant.envelope.attack_length & 255; params[11] = effect->u.constant.envelope.fade_level >> 7; params[12] = effect->u.constant.envelope.fade_length >> 8; params[13] = effect->u.constant.envelope.fade_length & 255; size = 14; dbg_hid("Uploading constant force level=%d in dir %d = %d\n", effect->u.constant.level, effect->direction, force); dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n", effect->u.constant.envelope.attack_level, effect->u.constant.envelope.attack_length, effect->u.constant.envelope.fade_level, effect->u.constant.envelope.fade_length); break; case FF_PERIODIC: { switch (effect->u.periodic.waveform) { case FF_SINE: params[1] = HIDPP_FF_EFFECT_PERIODIC_SINE; break; case FF_SQUARE: params[1] = HIDPP_FF_EFFECT_PERIODIC_SQUARE; break; case FF_SAW_UP: params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP; break; case FF_SAW_DOWN: params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN; break; case FF_TRIANGLE: params[1] = HIDPP_FF_EFFECT_PERIODIC_TRIANGLE; break; default: hid_err(data->hidpp->hid_dev, "Unexpected periodic waveform type %i!\n", effect->u.periodic.waveform); return -EINVAL; } force = (effect->u.periodic.magnitude * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[6] = effect->u.periodic.magnitude >> 8; params[7] = effect->u.periodic.magnitude & 255; params[8] = effect->u.periodic.offset >> 8; params[9] = effect->u.periodic.offset & 255; params[10] = effect->u.periodic.period >> 8; params[11] = effect->u.periodic.period & 255; params[12] = effect->u.periodic.phase >> 8; params[13] = effect->u.periodic.phase & 255; params[14] = effect->u.periodic.envelope.attack_level >> 7; params[15] = effect->u.periodic.envelope.attack_length >> 8; params[16] = effect->u.periodic.envelope.attack_length & 255; params[17] = effect->u.periodic.envelope.fade_level >> 7; params[18] = effect->u.periodic.envelope.fade_length >> 8; params[19] = effect->u.periodic.envelope.fade_length & 255; size = 20; dbg_hid("Uploading periodic force mag=%d/dir=%d, offset=%d, period=%d ms, phase=%d\n", effect->u.periodic.magnitude, effect->direction, effect->u.periodic.offset, effect->u.periodic.period, effect->u.periodic.phase); dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n", effect->u.periodic.envelope.attack_level, effect->u.periodic.envelope.attack_length, effect->u.periodic.envelope.fade_level, effect->u.periodic.envelope.fade_length); break; } case FF_RAMP: params[1] = HIDPP_FF_EFFECT_RAMP; force = (effect->u.ramp.start_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[6] = force >> 8; params[7] = force & 255; force = (effect->u.ramp.end_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15; params[8] = force >> 8; params[9] = force & 255; params[10] = effect->u.ramp.envelope.attack_level >> 7; params[11] = effect->u.ramp.envelope.attack_length >> 8; params[12] = effect->u.ramp.envelope.attack_length & 255; params[13] = effect->u.ramp.envelope.fade_level >> 7; params[14] = effect->u.ramp.envelope.fade_length >> 8; params[15] = effect->u.ramp.envelope.fade_length & 255; size = 16; dbg_hid("Uploading ramp force level=%d -> %d in dir %d = %d\n", effect->u.ramp.start_level, effect->u.ramp.end_level, effect->direction, force); dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n", effect->u.ramp.envelope.attack_level, effect->u.ramp.envelope.attack_length, effect->u.ramp.envelope.fade_level, effect->u.ramp.envelope.fade_length); break; case FF_FRICTION: case FF_INERTIA: case FF_SPRING: case FF_DAMPER: params[1] = HIDPP_FF_CONDITION_CMDS[effect->type - FF_SPRING]; params[6] = effect->u.condition[0].left_saturation >> 9; params[7] = (effect->u.condition[0].left_saturation >> 1) & 255; params[8] = effect->u.condition[0].left_coeff >> 8; params[9] = effect->u.condition[0].left_coeff & 255; params[10] = effect->u.condition[0].deadband >> 9; params[11] = (effect->u.condition[0].deadband >> 1) & 255; params[12] = effect->u.condition[0].center >> 8; params[13] = effect->u.condition[0].center & 255; params[14] = effect->u.condition[0].right_coeff >> 8; params[15] = effect->u.condition[0].right_coeff & 255; params[16] = effect->u.condition[0].right_saturation >> 9; params[17] = (effect->u.condition[0].right_saturation >> 1) & 255; size = 18; dbg_hid("Uploading %s force left coeff=%d, left sat=%d, right coeff=%d, right sat=%d\n", HIDPP_FF_CONDITION_NAMES[effect->type - FF_SPRING], effect->u.condition[0].left_coeff, effect->u.condition[0].left_saturation, effect->u.condition[0].right_coeff, effect->u.condition[0].right_saturation); dbg_hid(" deadband=%d, center=%d\n", effect->u.condition[0].deadband, effect->u.condition[0].center); break; default: hid_err(data->hidpp->hid_dev, "Unexpected force type %i!\n", effect->type); return -EINVAL; } return hidpp_ff_queue_work(data, effect->id, HIDPP_FF_DOWNLOAD_EFFECT, params, size); } static int hidpp_ff_playback(struct input_dev *dev, int effect_id, int value) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[2]; params[1] = value ? HIDPP_FF_EFFECT_STATE_PLAY : HIDPP_FF_EFFECT_STATE_STOP; dbg_hid("St%sing playback of effect %d.\n", value?"art":"opp", effect_id); return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_SET_EFFECT_STATE, params, ARRAY_SIZE(params)); } static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id) { struct hidpp_ff_private_data *data = dev->ff->private; u8 slot = 0; dbg_hid("Erasing effect %d.\n", effect_id); return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_DESTROY_EFFECT, &slot, 1); } static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH]; dbg_hid("Setting autocenter to %d.\n", magnitude); /* start a standard spring effect */ params[1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART; /* zero delay and duration */ params[2] = params[3] = params[4] = params[5] = 0; /* set coeff to 25% of saturation */ params[8] = params[14] = magnitude >> 11; params[9] = params[15] = (magnitude >> 3) & 255; params[6] = params[16] = magnitude >> 9; params[7] = params[17] = (magnitude >> 1) & 255; /* zero deadband and center */ params[10] = params[11] = params[12] = params[13] = 0; hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_AUTOCENTER, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params)); } static void hidpp_ff_set_gain(struct input_dev *dev, u16 gain) { struct hidpp_ff_private_data *data = dev->ff->private; u8 params[4]; dbg_hid("Setting gain to %d.\n", gain); params[0] = gain >> 8; params[1] = gain & 255; params[2] = 0; /* no boost */ params[3] = 0; hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_NONE, HIDPP_FF_SET_GLOBAL_GAINS, params, ARRAY_SIZE(params)); } static ssize_t hidpp_ff_range_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hid = to_hid_device(dev); struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *idev = hidinput->input; struct hidpp_ff_private_data *data = idev->ff->private; return scnprintf(buf, PAGE_SIZE, "%u\n", data->range); } static ssize_t hidpp_ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hid = to_hid_device(dev); struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *idev = hidinput->input; struct hidpp_ff_private_data *data = idev->ff->private; u8 params[2]; int range = simple_strtoul(buf, NULL, 10); range = clamp(range, 180, 900); params[0] = range >> 8; params[1] = range & 0x00FF; hidpp_ff_queue_work(data, -1, HIDPP_FF_SET_APERTURE, params, ARRAY_SIZE(params)); return count; } static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store); static void hidpp_ff_destroy(struct ff_device *ff) { struct hidpp_ff_private_data *data = ff->private; struct hid_device *hid = data->hidpp->hid_dev; hid_info(hid, "Unloading HID++ force feedback.\n"); device_remove_file(&hid->dev, &dev_attr_range); destroy_workqueue(data->wq); kfree(data->effect_ids); } static int hidpp_ff_init(struct hidpp_device *hidpp, struct hidpp_ff_private_data *data) { struct hid_device *hid = hidpp->hid_dev; struct hid_input *hidinput; struct input_dev *dev; struct usb_device_descriptor *udesc; u16 bcdDevice; struct ff_device *ff; int error, j, num_slots = data->num_effects; u8 version; if (!hid_is_usb(hid)) { hid_err(hid, "device is not USB\n"); return -ENODEV; } if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; if (!dev) { hid_err(hid, "Struct input_dev not set!\n"); return -EINVAL; } /* Get firmware release */ udesc = &(hid_to_usb_dev(hid)->descriptor); bcdDevice = le16_to_cpu(udesc->bcdDevice); version = bcdDevice & 255; /* Set supported force feedback capabilities */ for (j = 0; hidpp_ff_effects[j] >= 0; j++) set_bit(hidpp_ff_effects[j], dev->ffbit); if (version > 1) for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++) set_bit(hidpp_ff_effects_v2[j], dev->ffbit); error = input_ff_create(dev, num_slots); if (error) { hid_err(dev, "Failed to create FF device!\n"); return error; } /* * Create a copy of passed data, so we can transfer memory * ownership to FF core */ data = kmemdup(data, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL); if (!data->effect_ids) { kfree(data); return -ENOMEM; } data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); if (!data->wq) { kfree(data->effect_ids); kfree(data); return -ENOMEM; } data->hidpp = hidpp; data->version = version; for (j = 0; j < num_slots; j++) data->effect_ids[j] = -1; ff = dev->ff; ff->private = data; ff->upload = hidpp_ff_upload_effect; ff->erase = hidpp_ff_erase_effect; ff->playback = hidpp_ff_playback; ff->set_gain = hidpp_ff_set_gain; ff->set_autocenter = hidpp_ff_set_autocenter; ff->destroy = hidpp_ff_destroy; /* Create sysfs interface */ error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range); if (error) hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error); /* init the hardware command queue */ atomic_set(&data->workqueue_size, 0); hid_info(hid, "Force feedback support loaded (firmware release %d).\n", version); return 0; } /* ************************************************************************** */ /* */ /* Device Support */ /* */ /* ************************************************************************** */ /* -------------------------------------------------------------------------- */ /* Touchpad HID++ devices */ /* -------------------------------------------------------------------------- */ #define WTP_MANUAL_RESOLUTION 39 struct wtp_data { u16 x_size, y_size; u8 finger_count; u8 mt_feature_index; u8 button_feature_index; u8 maxcontacts; bool flip_y; unsigned int resolution; }; static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { return -1; } static void wtp_populate_input(struct hidpp_device *hidpp, struct input_dev *input_dev) { struct wtp_data *wd = hidpp->private_data; __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); __clear_bit(EV_REL, input_dev->evbit); __clear_bit(EV_LED, input_dev->evbit); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, wd->x_size, 0, 0); input_abs_set_res(input_dev, ABS_MT_POSITION_X, wd->resolution); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, wd->y_size, 0, 0); input_abs_set_res(input_dev, ABS_MT_POSITION_Y, wd->resolution); /* Max pressure is not given by the devices, pick one */ input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 50, 0, 0); input_set_capability(input_dev, EV_KEY, BTN_LEFT); if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) input_set_capability(input_dev, EV_KEY, BTN_RIGHT); else __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED); } static void wtp_touch_event(struct hidpp_device *hidpp, struct hidpp_touchpad_raw_xy_finger *touch_report) { struct wtp_data *wd = hidpp->private_data; int slot; if (!touch_report->finger_id || touch_report->contact_type) /* no actual data */ return; slot = input_mt_get_slot_by_key(hidpp->input, touch_report->finger_id); input_mt_slot(hidpp->input, slot); input_mt_report_slot_state(hidpp->input, MT_TOOL_FINGER, touch_report->contact_status); if (touch_report->contact_status) { input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_X, touch_report->x); input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_Y, wd->flip_y ? wd->y_size - touch_report->y : touch_report->y); input_event(hidpp->input, EV_ABS, ABS_MT_PRESSURE, touch_report->area); } } static void wtp_send_raw_xy_event(struct hidpp_device *hidpp, struct hidpp_touchpad_raw_xy *raw) { int i; for (i = 0; i < 2; i++) wtp_touch_event(hidpp, &(raw->fingers[i])); if (raw->end_of_frame && !(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS)) input_event(hidpp->input, EV_KEY, BTN_LEFT, raw->button); if (raw->end_of_frame || raw->finger_count <= 2) { input_mt_sync_frame(hidpp->input); input_sync(hidpp->input); } } static int wtp_mouse_raw_xy_event(struct hidpp_device *hidpp, u8 *data) { struct wtp_data *wd = hidpp->private_data; u8 c1_area = ((data[7] & 0xf) * (data[7] & 0xf) + (data[7] >> 4) * (data[7] >> 4)) / 2; u8 c2_area = ((data[13] & 0xf) * (data[13] & 0xf) + (data[13] >> 4) * (data[13] >> 4)) / 2; struct hidpp_touchpad_raw_xy raw = { .timestamp = data[1], .fingers = { { .contact_type = 0, .contact_status = !!data[7], .x = get_unaligned_le16(&data[3]), .y = get_unaligned_le16(&data[5]), .z = c1_area, .area = c1_area, .finger_id = data[2], }, { .contact_type = 0, .contact_status = !!data[13], .x = get_unaligned_le16(&data[9]), .y = get_unaligned_le16(&data[11]), .z = c2_area, .area = c2_area, .finger_id = data[8], } }, .finger_count = wd->maxcontacts, .spurious_flag = 0, .end_of_frame = (data[0] >> 7) == 0, .button = data[0] & 0x01, }; wtp_send_raw_xy_event(hidpp, &raw); return 1; } static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct wtp_data *wd = hidpp->private_data; struct hidpp_report *report = (struct hidpp_report *)data; struct hidpp_touchpad_raw_xy raw; if (!wd || !hidpp->input) return 1; switch (data[0]) { case 0x02: if (size < 2) { hid_err(hdev, "Received HID report of bad size (%d)", size); return 1; } if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { input_event(hidpp->input, EV_KEY, BTN_LEFT, !!(data[1] & 0x01)); input_event(hidpp->input, EV_KEY, BTN_RIGHT, !!(data[1] & 0x02)); input_sync(hidpp->input); return 0; } else { if (size < 21) return 1; return wtp_mouse_raw_xy_event(hidpp, &data[7]); } case REPORT_ID_HIDPP_LONG: /* size is already checked in hidpp_raw_event. */ if ((report->fap.feature_index != wd->mt_feature_index) || (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) return 1; hidpp_touchpad_raw_xy_event(hidpp, data + 4, &raw); wtp_send_raw_xy_event(hidpp, &raw); return 0; } return 0; } static int wtp_get_config(struct hidpp_device *hidpp) { struct wtp_data *wd = hidpp->private_data; struct hidpp_touchpad_raw_info raw_info = {0}; int ret; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_RAW_XY, &wd->mt_feature_index); if (ret) /* means that the device is not powered up */ return ret; ret = hidpp_touchpad_get_raw_info(hidpp, wd->mt_feature_index, &raw_info); if (ret) return ret; wd->x_size = raw_info.x_size; wd->y_size = raw_info.y_size; wd->maxcontacts = raw_info.maxcontacts; wd->flip_y = raw_info.origin == TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT; wd->resolution = raw_info.res; if (!wd->resolution) wd->resolution = WTP_MANUAL_RESOLUTION; return 0; } static int wtp_allocate(struct hid_device *hdev, const struct hid_device_id *id) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct wtp_data *wd; wd = devm_kzalloc(&hdev->dev, sizeof(struct wtp_data), GFP_KERNEL); if (!wd) return -ENOMEM; hidpp->private_data = wd; return 0; }; static int wtp_connect(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct wtp_data *wd = hidpp->private_data; int ret; if (!wd->x_size) { ret = wtp_get_config(hidpp); if (ret) { hid_err(hdev, "Can not get wtp config: %d\n", ret); return ret; } } return hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index, true, true); } /* ------------------------------------------------------------------------- */ /* Logitech M560 devices */ /* ------------------------------------------------------------------------- */ /* * Logitech M560 protocol overview * * The Logitech M560 mouse, is designed for windows 8. When the middle and/or * the sides buttons are pressed, it sends some keyboard keys events * instead of buttons ones. * To complicate things further, the middle button keys sequence * is different from the odd press and the even press. * * forward button -> Super_R * backward button -> Super_L+'d' (press only) * middle button -> 1st time: Alt_L+SuperL+XF86TouchpadOff (press only) * 2nd time: left-click (press only) * NB: press-only means that when the button is pressed, the * KeyPress/ButtonPress and KeyRelease/ButtonRelease events are generated * together sequentially; instead when the button is released, no event is * generated ! * * With the command * 10<xx>0a 3500af03 (where <xx> is the mouse id), * the mouse reacts differently: * - it never sends a keyboard key event * - for the three mouse button it sends: * middle button press 11<xx>0a 3500af00... * side 1 button (forward) press 11<xx>0a 3500b000... * side 2 button (backward) press 11<xx>0a 3500ae00... * middle/side1/side2 button release 11<xx>0a 35000000... */ static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03}; /* how buttons are mapped in the report */ #define M560_MOUSE_BTN_LEFT 0x01 #define M560_MOUSE_BTN_RIGHT 0x02 #define M560_MOUSE_BTN_WHEEL_LEFT 0x08 #define M560_MOUSE_BTN_WHEEL_RIGHT 0x10 #define M560_SUB_ID 0x0a #define M560_BUTTON_MODE_REGISTER 0x35 static int m560_send_config_command(struct hid_device *hdev) { struct hidpp_report response; struct hidpp_device *hidpp_dev; hidpp_dev = hid_get_drvdata(hdev); return hidpp_send_rap_command_sync( hidpp_dev, REPORT_ID_HIDPP_SHORT, M560_SUB_ID, M560_BUTTON_MODE_REGISTER, (u8 *)m560_config_parameter, sizeof(m560_config_parameter), &response ); } static int m560_raw_event(struct hid_device *hdev, u8 *data, int size) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); /* sanity check */ if (!hidpp->input) { hid_err(hdev, "error in parameter\n"); return -EINVAL; } if (size < 7) { hid_err(hdev, "error in report\n"); return 0; } if (data[0] == REPORT_ID_HIDPP_LONG && data[2] == M560_SUB_ID && data[6] == 0x00) { /* * m560 mouse report for middle, forward and backward button * * data[0] = 0x11 * data[1] = device-id * data[2] = 0x0a * data[5] = 0xaf -> middle * 0xb0 -> forward * 0xae -> backward * 0x00 -> release all * data[6] = 0x00 */ switch (data[5]) { case 0xaf: input_report_key(hidpp->input, BTN_MIDDLE, 1); break; case 0xb0: input_report_key(hidpp->input, BTN_FORWARD, 1); break; case 0xae: input_report_key(hidpp->input, BTN_BACK, 1); break; case 0x00: input_report_key(hidpp->input, BTN_BACK, 0); input_report_key(hidpp->input, BTN_FORWARD, 0); input_report_key(hidpp->input, BTN_MIDDLE, 0); break; default: hid_err(hdev, "error in report\n"); return 0; } input_sync(hidpp->input); } else if (data[0] == 0x02) { /* * Logitech M560 mouse report * * data[0] = type (0x02) * data[1..2] = buttons * data[3..5] = xy * data[6] = wheel */ int v; input_report_key(hidpp->input, BTN_LEFT, !!(data[1] & M560_MOUSE_BTN_LEFT)); input_report_key(hidpp->input, BTN_RIGHT, !!(data[1] & M560_MOUSE_BTN_RIGHT)); if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) { input_report_rel(hidpp->input, REL_HWHEEL, -1); input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, -120); } else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) { input_report_rel(hidpp->input, REL_HWHEEL, 1); input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, 120); } v = sign_extend32(hid_field_extract(hdev, data + 3, 0, 12), 11); input_report_rel(hidpp->input, REL_X, v); v = sign_extend32(hid_field_extract(hdev, data + 3, 12, 12), 11); input_report_rel(hidpp->input, REL_Y, v); v = sign_extend32(data[6], 7); if (v != 0) hidpp_scroll_counter_handle_scroll(hidpp->input, &hidpp->vertical_wheel_counter, v); input_sync(hidpp->input); } return 1; } static void m560_populate_input(struct hidpp_device *hidpp, struct input_dev *input_dev) { __set_bit(EV_KEY, input_dev->evbit); __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); __set_bit(REL_WHEEL, input_dev->relbit); __set_bit(REL_HWHEEL, input_dev->relbit); __set_bit(REL_WHEEL_HI_RES, input_dev->relbit); __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit); } static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { return -1; } /* ------------------------------------------------------------------------- */ /* Logitech K400 devices */ /* ------------------------------------------------------------------------- */ /* * The Logitech K400 keyboard has an embedded touchpad which is seen * as a mouse from the OS point of view. There is a hardware shortcut to disable * tap-to-click but the setting is not remembered accross reset, annoying some * users. * * We can toggle this feature from the host by using the feature 0x6010: * Touchpad FW items */ struct k400_private_data { u8 feature_index; }; static int k400_disable_tap_to_click(struct hidpp_device *hidpp) { struct k400_private_data *k400 = hidpp->private_data; struct hidpp_touchpad_fw_items items = {}; int ret; if (!k400->feature_index) { ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_FW_ITEMS, &k400->feature_index); if (ret) /* means that the device is not powered up */ return ret; } ret = hidpp_touchpad_fw_items_set(hidpp, k400->feature_index, &items); if (ret) return ret; return 0; } static int k400_allocate(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct k400_private_data *k400; k400 = devm_kzalloc(&hdev->dev, sizeof(struct k400_private_data), GFP_KERNEL); if (!k400) return -ENOMEM; hidpp->private_data = k400; return 0; }; static int k400_connect(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!disable_tap_to_click) return 0; return k400_disable_tap_to_click(hidpp); } /* ------------------------------------------------------------------------- */ /* Logitech G920 Driving Force Racing Wheel for Xbox One */ /* ------------------------------------------------------------------------- */ #define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123 static int g920_ff_set_autocenter(struct hidpp_device *hidpp, struct hidpp_ff_private_data *data) { struct hidpp_report response; u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = { [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART, }; int ret; /* initialize with zero autocenter to get wheel in usable state */ dbg_hid("Setting autocenter to 0.\n"); ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params), &response); if (ret) hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n"); else data->slot_autocenter = response.fap.params[0]; return ret; } static int g920_get_config(struct hidpp_device *hidpp, struct hidpp_ff_private_data *data) { struct hidpp_report response; int ret; memset(data, 0, sizeof(*data)); /* Find feature and store for later use */ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK, &data->feature_index); if (ret) return ret; /* Read number of slots available in device */ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_GET_INFO, NULL, 0, &response); if (ret) { if (ret < 0) return ret; hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", __func__, ret); return -EPROTO; } data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS; /* reset all forces */ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_RESET_ALL, NULL, 0, &response); if (ret) hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n"); ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_GET_APERTURE, NULL, 0, &response); if (ret) { hid_warn(hidpp->hid_dev, "Failed to read range from device!\n"); } data->range = ret ? 900 : get_unaligned_be16(&response.fap.params[0]); /* Read the current gain values */ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response); if (ret) hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n"); data->gain = ret ? 0xffff : get_unaligned_be16(&response.fap.params[0]); /* ignore boost value at response.fap.params[2] */ return g920_ff_set_autocenter(hidpp, data); } /* -------------------------------------------------------------------------- */ /* Logitech Dinovo Mini keyboard with builtin touchpad */ /* -------------------------------------------------------------------------- */ #define DINOVO_MINI_PRODUCT_ID 0xb30c static int lg_dinovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; switch (usage->hid & HID_USAGE) { case 0x00d: lg_map_key_clear(KEY_MEDIA); break; default: return 0; } return 1; } /* -------------------------------------------------------------------------- */ /* HID++1.0 devices which use HID++ reports for their wheels */ /* -------------------------------------------------------------------------- */ static int hidpp10_wheel_connect(struct hidpp_device *hidpp) { return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT, HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT); } static int hidpp10_wheel_raw_event(struct hidpp_device *hidpp, u8 *data, int size) { s8 value, hvalue; if (!hidpp->input) return -EINVAL; if (size < 7) return 0; if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_ROLLER) return 0; value = data[3]; hvalue = data[4]; input_report_rel(hidpp->input, REL_WHEEL, value); input_report_rel(hidpp->input, REL_WHEEL_HI_RES, value * 120); input_report_rel(hidpp->input, REL_HWHEEL, hvalue); input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, hvalue * 120); input_sync(hidpp->input); return 1; } static void hidpp10_wheel_populate_input(struct hidpp_device *hidpp, struct input_dev *input_dev) { __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_WHEEL, input_dev->relbit); __set_bit(REL_WHEEL_HI_RES, input_dev->relbit); __set_bit(REL_HWHEEL, input_dev->relbit); __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit); } /* -------------------------------------------------------------------------- */ /* HID++1.0 mice which use HID++ reports for extra mouse buttons */ /* -------------------------------------------------------------------------- */ static int hidpp10_extra_mouse_buttons_connect(struct hidpp_device *hidpp) { return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT, HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT); } static int hidpp10_extra_mouse_buttons_raw_event(struct hidpp_device *hidpp, u8 *data, int size) { int i; if (!hidpp->input) return -EINVAL; if (size < 7) return 0; if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_MOUSE_EXTRA_BTNS) return 0; /* * Buttons are either delivered through the regular mouse report *or* * through the extra buttons report. At least for button 6 how it is * delivered differs per receiver firmware version. Even receivers with * the same usb-id show different behavior, so we handle both cases. */ for (i = 0; i < 8; i++) input_report_key(hidpp->input, BTN_MOUSE + i, (data[3] & (1 << i))); /* Some mice report events on button 9+, use BTN_MISC */ for (i = 0; i < 8; i++) input_report_key(hidpp->input, BTN_MISC + i, (data[4] & (1 << i))); input_sync(hidpp->input); return 1; } static void hidpp10_extra_mouse_buttons_populate_input( struct hidpp_device *hidpp, struct input_dev *input_dev) { /* BTN_MOUSE - BTN_MOUSE+7 are set already by the descriptor */ __set_bit(BTN_0, input_dev->keybit); __set_bit(BTN_1, input_dev->keybit); __set_bit(BTN_2, input_dev->keybit); __set_bit(BTN_3, input_dev->keybit); __set_bit(BTN_4, input_dev->keybit); __set_bit(BTN_5, input_dev->keybit); __set_bit(BTN_6, input_dev->keybit); __set_bit(BTN_7, input_dev->keybit); } /* -------------------------------------------------------------------------- */ /* HID++1.0 kbds which only report 0x10xx consumer usages through sub-id 0x03 */ /* -------------------------------------------------------------------------- */ /* Find the consumer-page input report desc and change Maximums to 0x107f */ static u8 *hidpp10_consumer_keys_report_fixup(struct hidpp_device *hidpp, u8 *_rdesc, unsigned int *rsize) { /* Note 0 terminated so we can use strnstr to search for this. */ static const char consumer_rdesc_start[] = { 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */ 0x09, 0x01, /* USAGE (Consumer Control) */ 0xA1, 0x01, /* COLLECTION (Application) */ 0x85, 0x03, /* REPORT_ID = 3 */ 0x75, 0x10, /* REPORT_SIZE (16) */ 0x95, 0x02, /* REPORT_COUNT (2) */ 0x15, 0x01, /* LOGICAL_MIN (1) */ 0x26, 0x00 /* LOGICAL_MAX (... */ }; char *consumer_rdesc, *rdesc = (char *)_rdesc; unsigned int size; consumer_rdesc = strnstr(rdesc, consumer_rdesc_start, *rsize); size = *rsize - (consumer_rdesc - rdesc); if (consumer_rdesc && size >= 25) { consumer_rdesc[15] = 0x7f; consumer_rdesc[16] = 0x10; consumer_rdesc[20] = 0x7f; consumer_rdesc[21] = 0x10; } return _rdesc; } static int hidpp10_consumer_keys_connect(struct hidpp_device *hidpp) { return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_CONSUMER_REPORT, HIDPP_ENABLE_CONSUMER_REPORT); } static int hidpp10_consumer_keys_raw_event(struct hidpp_device *hidpp, u8 *data, int size) { u8 consumer_report[5]; if (size < 7) return 0; if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS) return 0; /* * Build a normal consumer report (3) out of the data, this detour * is necessary to get some keyboards to report their 0x10xx usages. */ consumer_report[0] = 0x03; memcpy(&consumer_report[1], &data[3], 4); /* We are called from atomic context */ hid_report_raw_event(hidpp->hid_dev, HID_INPUT_REPORT, consumer_report, 5, 1); return 1; } /* -------------------------------------------------------------------------- */ /* High-resolution scroll wheels */ /* -------------------------------------------------------------------------- */ static int hi_res_scroll_enable(struct hidpp_device *hidpp) { int ret; u8 multiplier = 1; if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) { ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false); if (ret == 0) ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL) { ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true, &multiplier); } else /* if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL) */ { ret = hidpp10_enable_scrolling_acceleration(hidpp); multiplier = 8; } if (ret) { hid_dbg(hidpp->hid_dev, "Could not enable hi-res scrolling: %d\n", ret); return ret; } if (multiplier == 0) { hid_dbg(hidpp->hid_dev, "Invalid multiplier 0 from device, setting it to 1\n"); multiplier = 1; } hidpp->vertical_wheel_counter.wheel_multiplier = multiplier; hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier); return 0; } static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp) { int ret; unsigned long capabilities; capabilities = hidpp->capabilities; if (hidpp->protocol_major >= 2) { u8 feature_index; ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL, &feature_index); if (!ret) { hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL; hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scroll wheel\n"); return 0; } ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HI_RESOLUTION_SCROLLING, &feature_index); if (!ret) { hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL; hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n"); } } else { /* We cannot detect fast scrolling support on HID++ 1.0 devices */ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) { hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL; hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n"); } } if (hidpp->capabilities == capabilities) hid_dbg(hidpp->hid_dev, "Did not detect HID++ hi-res scrolling hardware support\n"); return 0; } /* -------------------------------------------------------------------------- */ /* Generic HID++ devices */ /* -------------------------------------------------------------------------- */ static const u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *rsize) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return rdesc; /* For 27 MHz keyboards the quirk gets set after hid_parse. */ if (hdev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE || (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS)) rdesc = hidpp10_consumer_keys_report_fixup(hidpp, rdesc, rsize); return rdesc; } static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return 0; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) return wtp_input_mapping(hdev, hi, field, usage, bit, max); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 && field->application != HID_GD_MOUSE) return m560_input_mapping(hdev, hi, field, usage, bit, max); if (hdev->product == DINOVO_MINI_PRODUCT_ID) return lg_dinovo_input_mapping(hdev, hi, field, usage, bit, max); return 0; } static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return 0; /* Ensure that Logitech G920 is not given a default fuzz/flat value */ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { if (usage->type == EV_ABS && (usage->code == ABS_X || usage->code == ABS_Y || usage->code == ABS_Z || usage->code == ABS_RZ)) { field->application = HID_GD_MULTIAXIS; } } return 0; } static void hidpp_populate_input(struct hidpp_device *hidpp, struct input_dev *input) { hidpp->input = input; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) wtp_populate_input(hidpp, input); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) m560_populate_input(hidpp, input); if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) hidpp10_wheel_populate_input(hidpp, input); if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) hidpp10_extra_mouse_buttons_populate_input(hidpp, input); } static int hidpp_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct input_dev *input = hidinput->input; if (!hidpp) return 0; hidpp_populate_input(hidpp, input); return 0; } static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *question = hidpp->send_receive_buf; struct hidpp_report *answer = hidpp->send_receive_buf; struct hidpp_report *report = (struct hidpp_report *)data; int ret; /* * If the mutex is locked then we have a pending answer from a * previously sent command. */ if (unlikely(mutex_is_locked(&hidpp->send_mutex))) { /* * Check for a correct hidpp20 answer or the corresponding * error */ if (hidpp_match_answer(question, report) || hidpp_match_error(question, report)) { *answer = *report; hidpp->answer_available = true; wake_up(&hidpp->wait); /* * This was an answer to a command that this driver sent * We return 1 to hid-core to avoid forwarding the * command upstream as it has been treated by the driver */ return 1; } } if (unlikely(hidpp_report_is_connect_event(hidpp, report))) { if (schedule_work(&hidpp->work) == 0) dbg_hid("%s: connect event already queued\n", __func__); return 1; } if (hidpp->hid_dev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE && data[0] == REPORT_ID_HIDPP_SHORT && data[2] == HIDPP_SUB_ID_USER_IFACE_EVENT && (data[3] & HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST)) { dev_err_ratelimited(&hidpp->hid_dev->dev, "Error the keyboard's wireless encryption key has been lost, your keyboard will not work unless you re-configure encryption.\n"); dev_err_ratelimited(&hidpp->hid_dev->dev, "See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n"); } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { ret = hidpp20_battery_event_1000(hidpp, data, size); if (ret != 0) return ret; ret = hidpp20_battery_event_1004(hidpp, data, size); if (ret != 0) return ret; ret = hidpp_solar_battery_event(hidpp, data, size); if (ret != 0) return ret; ret = hidpp20_battery_voltage_event(hidpp, data, size); if (ret != 0) return ret; ret = hidpp20_adc_measurement_event_1f20(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { ret = hidpp10_battery_event(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) { ret = hidpp10_wheel_raw_event(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) { ret = hidpp10_extra_mouse_buttons_raw_event(hidpp, data, size); if (ret != 0) return ret; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) { ret = hidpp10_consumer_keys_raw_event(hidpp, data, size); if (ret != 0) return ret; } return 0; } static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int ret = 0; if (!hidpp) return 0; /* Generic HID++ processing. */ switch (data[0]) { case REPORT_ID_HIDPP_VERY_LONG: if (size != hidpp->very_long_report_length) { hid_err(hdev, "received hid++ report of bad size (%d)", size); return 1; } ret = hidpp_raw_hidpp_event(hidpp, data, size); break; case REPORT_ID_HIDPP_LONG: if (size != HIDPP_REPORT_LONG_LENGTH) { hid_err(hdev, "received hid++ report of bad size (%d)", size); return 1; } ret = hidpp_raw_hidpp_event(hidpp, data, size); break; case REPORT_ID_HIDPP_SHORT: if (size != HIDPP_REPORT_SHORT_LENGTH) { hid_err(hdev, "received hid++ report of bad size (%d)", size); return 1; } ret = hidpp_raw_hidpp_event(hidpp, data, size); break; } /* If no report is available for further processing, skip calling * raw_event of subclasses. */ if (ret != 0) return ret; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) return wtp_raw_event(hdev, data, size); else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) return m560_raw_event(hdev, data, size); return 0; } static int hidpp_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* This function will only be called for scroll events, due to the * restriction imposed in hidpp_usages. */ struct hidpp_device *hidpp = hid_get_drvdata(hdev); struct hidpp_scroll_counter *counter; if (!hidpp) return 0; counter = &hidpp->vertical_wheel_counter; /* A scroll event may occur before the multiplier has been retrieved or * the input device set, or high-res scroll enabling may fail. In such * cases we must return early (falling back to default behaviour) to * avoid a crash in hidpp_scroll_counter_handle_scroll. */ if (!(hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL) || value == 0 || hidpp->input == NULL || counter->wheel_multiplier == 0) return 0; hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value); return 1; } static int hidpp_initialize_battery(struct hidpp_device *hidpp) { static atomic_t battery_no = ATOMIC_INIT(0); struct power_supply_config cfg = { .drv_data = hidpp }; struct power_supply_desc *desc = &hidpp->battery.desc; enum power_supply_property *battery_props; struct hidpp_battery *battery; unsigned int num_battery_props; unsigned long n; int ret; if (hidpp->battery.ps) return 0; hidpp->battery.feature_index = 0xff; hidpp->battery.solar_feature_index = 0xff; hidpp->battery.voltage_feature_index = 0xff; hidpp->battery.adc_measurement_feature_index = 0xff; if (hidpp->protocol_major >= 2) { if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750) ret = hidpp_solar_request_battery_event(hidpp); else { /* we only support one battery feature right now, so let's first check the ones that support battery level first and leave voltage for last */ ret = hidpp20_query_battery_info_1000(hidpp); if (ret) ret = hidpp20_query_battery_info_1004(hidpp); if (ret) ret = hidpp20_query_battery_voltage_info(hidpp); if (ret) ret = hidpp20_query_adc_measurement_info_1f20(hidpp); } if (ret) return ret; hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP20_BATTERY; } else { ret = hidpp10_query_battery_status(hidpp); if (ret) { ret = hidpp10_query_battery_mileage(hidpp); if (ret) return -ENOENT; hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_MILEAGE; } else { hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS; } hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_BATTERY; } battery_props = devm_kmemdup(&hidpp->hid_dev->dev, hidpp_battery_props, sizeof(hidpp_battery_props), GFP_KERNEL); if (!battery_props) return -ENOMEM; num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE || hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_PERCENTAGE || hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE || hidpp->capabilities & HIDPP_CAPABILITY_ADC_MEASUREMENT) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY_LEVEL; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE || hidpp->capabilities & HIDPP_CAPABILITY_ADC_MEASUREMENT) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_VOLTAGE_NOW; battery = &hidpp->battery; n = atomic_inc_return(&battery_no) - 1; desc->properties = battery_props; desc->num_properties = num_battery_props; desc->get_property = hidpp_battery_get_property; sprintf(battery->name, "hidpp_battery_%ld", n); desc->name = battery->name; desc->type = POWER_SUPPLY_TYPE_BATTERY; desc->use_for_apm = 0; battery->ps = devm_power_supply_register(&hidpp->hid_dev->dev, &battery->desc, &cfg); if (IS_ERR(battery->ps)) return PTR_ERR(battery->ps); power_supply_powers(battery->ps, &hidpp->hid_dev->dev); return ret; } /* Get name + serial for USB and Bluetooth HID++ devices */ static void hidpp_non_unifying_init(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; char *name; /* Bluetooth devices already have their serialnr set */ if (hid_is_usb(hdev)) hidpp_serial_init(hidpp); name = hidpp_get_device_name(hidpp); if (name) { dbg_hid("HID++: Got name: %s\n", name); snprintf(hdev->name, sizeof(hdev->name), "%s", name); kfree(name); } } static int hidpp_input_open(struct input_dev *dev) { struct hid_device *hid = input_get_drvdata(dev); return hid_hw_open(hid); } static void hidpp_input_close(struct input_dev *dev) { struct hid_device *hid = input_get_drvdata(dev); hid_hw_close(hid); } static struct input_dev *hidpp_allocate_input(struct hid_device *hdev) { struct input_dev *input_dev = devm_input_allocate_device(&hdev->dev); struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!input_dev) return NULL; input_set_drvdata(input_dev, hdev); input_dev->open = hidpp_input_open; input_dev->close = hidpp_input_close; input_dev->name = hidpp->name; input_dev->phys = hdev->phys; input_dev->uniq = hdev->uniq; input_dev->id.bustype = hdev->bus; input_dev->id.vendor = hdev->vendor; input_dev->id.product = hdev->product; input_dev->id.version = hdev->version; input_dev->dev.parent = &hdev->dev; return input_dev; } static void hidpp_connect_event(struct work_struct *work) { struct hidpp_device *hidpp = container_of(work, struct hidpp_device, work); struct hid_device *hdev = hidpp->hid_dev; struct input_dev *input; char *name, *devm_name; int ret; /* Get device version to check if it is connected */ ret = hidpp_root_get_protocol_version(hidpp); if (ret) { hid_dbg(hidpp->hid_dev, "Disconnected\n"); if (hidpp->battery.ps) { hidpp->battery.online = false; hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN; hidpp->battery.level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; power_supply_changed(hidpp->battery.ps); } return; } if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) { ret = wtp_connect(hdev); if (ret) return; } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) { ret = m560_send_config_command(hdev); if (ret) return; } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) { ret = k400_connect(hdev); if (ret) return; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) { ret = hidpp10_wheel_connect(hidpp); if (ret) return; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) { ret = hidpp10_extra_mouse_buttons_connect(hidpp); if (ret) return; } if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) { ret = hidpp10_consumer_keys_connect(hidpp); if (ret) return; } if (hidpp->protocol_major >= 2) { u8 feature_index; if (!hidpp_get_wireless_feature_index(hidpp, &feature_index)) hidpp->wireless_feature_index = feature_index; } if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) { name = hidpp_get_device_name(hidpp); if (name) { devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name); kfree(name); if (!devm_name) return; hidpp->name = devm_name; } } hidpp_initialize_battery(hidpp); if (!hid_is_usb(hidpp->hid_dev)) hidpp_initialize_hires_scroll(hidpp); /* forward current battery state */ if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { hidpp10_enable_battery_reporting(hidpp); if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) hidpp10_query_battery_mileage(hidpp); else hidpp10_query_battery_status(hidpp); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) hidpp20_query_battery_voltage_info(hidpp); else if (hidpp->capabilities & HIDPP_CAPABILITY_UNIFIED_BATTERY) hidpp20_query_battery_info_1004(hidpp); else if (hidpp->capabilities & HIDPP_CAPABILITY_ADC_MEASUREMENT) hidpp20_query_adc_measurement_info_1f20(hidpp); else hidpp20_query_battery_info_1000(hidpp); } if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL) hi_res_scroll_enable(hidpp); if (!(hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) || hidpp->delayed_input) /* if the input nodes are already created, we can stop now */ return; input = hidpp_allocate_input(hdev); if (!input) { hid_err(hdev, "cannot allocate new input device: %d\n", ret); return; } hidpp_populate_input(hidpp, input); ret = input_register_device(input); if (ret) { input_free_device(input); return; } hidpp->delayed_input = input; } static DEVICE_ATTR(builtin_power_supply, 0000, NULL, NULL); static struct attribute *sysfs_attrs[] = { &dev_attr_builtin_power_supply.attr, NULL }; static const struct attribute_group ps_attribute_group = { .attrs = sysfs_attrs }; static int hidpp_get_report_length(struct hid_device *hdev, int id) { struct hid_report_enum *re; struct hid_report *report; re = &(hdev->report_enum[HID_OUTPUT_REPORT]); report = re->report_id_hash[id]; if (!report) return 0; return report->field[0]->report_count + 1; } static u8 hidpp_validate_device(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int id, report_length; u8 supported_reports = 0; id = REPORT_ID_HIDPP_SHORT; report_length = hidpp_get_report_length(hdev, id); if (report_length) { if (report_length < HIDPP_REPORT_SHORT_LENGTH) goto bad_device; supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED; } id = REPORT_ID_HIDPP_LONG; report_length = hidpp_get_report_length(hdev, id); if (report_length) { if (report_length < HIDPP_REPORT_LONG_LENGTH) goto bad_device; supported_reports |= HIDPP_REPORT_LONG_SUPPORTED; } id = REPORT_ID_HIDPP_VERY_LONG; report_length = hidpp_get_report_length(hdev, id); if (report_length) { if (report_length < HIDPP_REPORT_LONG_LENGTH || report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) goto bad_device; supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED; hidpp->very_long_report_length = report_length; } return supported_reports; bad_device: hid_warn(hdev, "not enough values in hidpp report %d\n", id); return false; } static bool hidpp_application_equals(struct hid_device *hdev, unsigned int application) { struct list_head *report_list; struct hid_report *report; report_list = &hdev->report_enum[HID_INPUT_REPORT].report_list; report = list_first_entry_or_null(report_list, struct hid_report, list); return report && report->application == application; } static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct hidpp_device *hidpp; int ret; unsigned int connect_mask = HID_CONNECT_DEFAULT; /* report_fixup needs drvdata to be set before we call hid_parse */ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); if (!hidpp) return -ENOMEM; hidpp->hid_dev = hdev; hidpp->name = hdev->name; hidpp->quirks = id->driver_data; hid_set_drvdata(hdev, hidpp); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "%s:parse failed\n", __func__); return ret; } /* * Make sure the device is HID++ capable, otherwise treat as generic HID */ hidpp->supported_reports = hidpp_validate_device(hdev); if (!hidpp->supported_reports) { hid_set_drvdata(hdev, NULL); devm_kfree(&hdev->dev, hidpp); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE && hidpp_application_equals(hdev, HID_GD_MOUSE)) hidpp->quirks |= HIDPP_QUIRK_HIDPP_WHEELS | HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS; if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE && hidpp_application_equals(hdev, HID_GD_KEYBOARD)) hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS; if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) { ret = wtp_allocate(hdev, id); if (ret) return ret; } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) { ret = k400_allocate(hdev); if (ret) return ret; } INIT_WORK(&hidpp->work, hidpp_connect_event); mutex_init(&hidpp->send_mutex); init_waitqueue_head(&hidpp->wait); /* indicates we are handling the battery properties in the kernel */ ret = sysfs_create_group(&hdev->dev.kobj, &ps_attribute_group); if (ret) hid_warn(hdev, "Cannot allocate sysfs group for %s\n", hdev->name); /* * First call hid_hw_start(hdev, 0) to allow IO without connecting any * hid subdrivers (hid-input, hidraw). This allows retrieving the dev's * name and serial number and store these in hdev->name and hdev->uniq, * before the hid-input and hidraw drivers expose these to userspace. */ ret = hid_hw_start(hdev, 0); if (ret) { hid_err(hdev, "hw start failed\n"); goto hid_hw_start_fail; } ret = hid_hw_open(hdev); if (ret < 0) { dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n", __func__, ret); goto hid_hw_open_fail; } /* Allow incoming packets */ hid_device_io_start(hdev); /* Get name + serial, store in hdev->name + hdev->uniq */ if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE) hidpp_unifying_init(hidpp); else hidpp_non_unifying_init(hidpp); if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) connect_mask &= ~HID_CONNECT_HIDINPUT; /* Now export the actual inputs and hidraw nodes to the world */ hid_device_io_stop(hdev); ret = hid_connect(hdev, connect_mask); if (ret) { hid_err(hdev, "%s:hid_connect returned error %d\n", __func__, ret); goto hid_hw_init_fail; } /* Check for connected devices now that incoming packets will not be disabled again */ hid_device_io_start(hdev); schedule_work(&hidpp->work); flush_work(&hidpp->work); if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { struct hidpp_ff_private_data data; ret = g920_get_config(hidpp, &data); if (!ret) ret = hidpp_ff_init(hidpp, &data); if (ret) hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n", ret); } /* * This relies on logi_dj_ll_close() being a no-op so that DJ connection * events will still be received. */ hid_hw_close(hdev); return ret; hid_hw_init_fail: hid_hw_close(hdev); hid_hw_open_fail: hid_hw_stop(hdev); hid_hw_start_fail: sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group); cancel_work_sync(&hidpp->work); mutex_destroy(&hidpp->send_mutex); return ret; } static void hidpp_remove(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); if (!hidpp) return hid_hw_stop(hdev); sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group); hid_hw_stop(hdev); cancel_work_sync(&hidpp->work); mutex_destroy(&hidpp->send_mutex); } #define LDJ_DEVICE(product) \ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \ USB_VENDOR_ID_LOGITECH, (product)) #define L27MHZ_DEVICE(product) \ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_27MHZ_DEVICE, \ USB_VENDOR_ID_LOGITECH, (product)) static const struct hid_device_id hidpp_devices[] = { { /* wireless touchpad */ LDJ_DEVICE(0x4011), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS }, { /* wireless touchpad T650 */ LDJ_DEVICE(0x4101), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, { /* wireless touchpad T651 */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651), .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, { /* Mouse Logitech Anywhere MX */ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Mouse logitech M560 */ LDJ_DEVICE(0x402d), .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 }, { /* Mouse Logitech M705 (firmware RQM17) */ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Mouse Logitech Performance MX */ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ LDJ_DEVICE(0x4024), .driver_data = HIDPP_QUIRK_CLASS_K400 }, { /* Solar Keyboard Logitech K750 */ LDJ_DEVICE(0x4002), .driver_data = HIDPP_QUIRK_CLASS_K750 }, { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb305), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Dinovo Edge (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb309), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */ LDJ_DEVICE(0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { LDJ_DEVICE(HID_ANY_ID) }, { /* Keyboard LX501 (Y-RR53) */ L27MHZ_DEVICE(0x0049), .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL }, { /* Keyboard MX3000 (Y-RAM74) */ L27MHZ_DEVICE(0x0057), .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL }, { /* Keyboard MX3200 (Y-RAV80) */ L27MHZ_DEVICE(0x005c), .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL }, { /* S510 Media Remote */ L27MHZ_DEVICE(0x00fe), .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL }, { L27MHZ_DEVICE(HID_ANY_ID) }, { /* Logitech G403 Wireless Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, { /* Logitech G502 Lightspeed Wireless Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08D) }, { /* Logitech G703 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, { /* Logitech G703 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) }, { /* Logitech G900 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) }, { /* Logitech G903 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) }, { /* Logitech G Pro Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) }, { /* MX Vertical over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08A) }, { /* Logitech G703 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) }, { /* Logitech G903 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) }, { /* Logitech G915 TKL Keyboard over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) }, { /* Logitech G920 Wheel over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, { /* Logitech G923 Wheel (Xbox version) over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G923_XBOX_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS }, { /* Logitech G Pro X Superlight Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) }, { /* Logitech G Pro X Superlight 2 Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC09b) }, { /* G935 Gaming Headset */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87), .driver_data = HIDPP_QUIRK_WIRELESS_STATUS }, { /* MX5000 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Dinovo Edge keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb309), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, { /* Logitech G915 TKL keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) }, { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012) }, { /* M720 Triathlon mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb015) }, { /* MX Master 2S mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb019) }, { /* MX Ergo trackball over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) }, { /* MX Vertical mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb020) }, { /* Signature M650 over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) }, { /* MX Master 3 mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) }, { /* MX Anywhere 3 mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb025) }, { /* MX Master 3S mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) }, { /* MX Anywhere 3SB mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) }, {} }; MODULE_DEVICE_TABLE(hid, hidpp_devices); static const struct hid_usage_id hidpp_usages[] = { { HID_GD_WHEEL, EV_REL, REL_WHEEL_HI_RES }, { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} }; static struct hid_driver hidpp_driver = { .name = "logitech-hidpp-device", .id_table = hidpp_devices, .report_fixup = hidpp_report_fixup, .probe = hidpp_probe, .remove = hidpp_remove, .raw_event = hidpp_raw_event, .usage_table = hidpp_usages, .event = hidpp_event, .input_configured = hidpp_input_configured, .input_mapping = hidpp_input_mapping, .input_mapped = hidpp_input_mapped, }; module_hid_driver(hidpp_driver); |
4 2 3 1 2 1 1 4 1 1 2 2 1 1 2 2 2 1 2 2 2 2 2 2 2 1 2 4 5 1 4 5 4 4 2 2 2 2 2 2 2 2 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Sony DualSense(TM) controller. * * Copyright (c) 2020-2022 Sony Interactive Entertainment */ #include <linux/bits.h> #include <linux/crc32.h> #include <linux/device.h> #include <linux/hid.h> #include <linux/idr.h> #include <linux/input/mt.h> #include <linux/leds.h> #include <linux/led-class-multicolor.h> #include <linux/module.h> #include <linux/unaligned.h> #include "hid-ids.h" /* List of connected playstation devices. */ static DEFINE_MUTEX(ps_devices_lock); static LIST_HEAD(ps_devices_list); static DEFINE_IDA(ps_player_id_allocator); #define HID_PLAYSTATION_VERSION_PATCH 0x8000 enum PS_TYPE { PS_TYPE_PS4_DUALSHOCK4, PS_TYPE_PS5_DUALSENSE, }; /* Base class for playstation devices. */ struct ps_device { struct list_head list; struct hid_device *hdev; spinlock_t lock; uint32_t player_id; struct power_supply_desc battery_desc; struct power_supply *battery; uint8_t battery_capacity; int battery_status; const char *input_dev_name; /* Name of primary input device. */ uint8_t mac_address[6]; /* Note: stored in little endian order. */ uint32_t hw_version; uint32_t fw_version; int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size); void (*remove)(struct ps_device *dev); }; /* Calibration data for playstation motion sensors. */ struct ps_calibration_data { int abs_code; short bias; int sens_numer; int sens_denom; }; struct ps_led_info { const char *name; const char *color; int max_brightness; enum led_brightness (*brightness_get)(struct led_classdev *cdev); int (*brightness_set)(struct led_classdev *cdev, enum led_brightness); int (*blink_set)(struct led_classdev *led, unsigned long *on, unsigned long *off); }; /* Seed values for DualShock4 / DualSense CRC32 for different report types. */ #define PS_INPUT_CRC32_SEED 0xA1 #define PS_OUTPUT_CRC32_SEED 0xA2 #define PS_FEATURE_CRC32_SEED 0xA3 #define DS_INPUT_REPORT_USB 0x01 #define DS_INPUT_REPORT_USB_SIZE 64 #define DS_INPUT_REPORT_BT 0x31 #define DS_INPUT_REPORT_BT_SIZE 78 #define DS_OUTPUT_REPORT_USB 0x02 #define DS_OUTPUT_REPORT_USB_SIZE 63 #define DS_OUTPUT_REPORT_BT 0x31 #define DS_OUTPUT_REPORT_BT_SIZE 78 #define DS_FEATURE_REPORT_CALIBRATION 0x05 #define DS_FEATURE_REPORT_CALIBRATION_SIZE 41 #define DS_FEATURE_REPORT_PAIRING_INFO 0x09 #define DS_FEATURE_REPORT_PAIRING_INFO_SIZE 20 #define DS_FEATURE_REPORT_FIRMWARE_INFO 0x20 #define DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE 64 /* Button masks for DualSense input report. */ #define DS_BUTTONS0_HAT_SWITCH GENMASK(3, 0) #define DS_BUTTONS0_SQUARE BIT(4) #define DS_BUTTONS0_CROSS BIT(5) #define DS_BUTTONS0_CIRCLE BIT(6) #define DS_BUTTONS0_TRIANGLE BIT(7) #define DS_BUTTONS1_L1 BIT(0) #define DS_BUTTONS1_R1 BIT(1) #define DS_BUTTONS1_L2 BIT(2) #define DS_BUTTONS1_R2 BIT(3) #define DS_BUTTONS1_CREATE BIT(4) #define DS_BUTTONS1_OPTIONS BIT(5) #define DS_BUTTONS1_L3 BIT(6) #define DS_BUTTONS1_R3 BIT(7) #define DS_BUTTONS2_PS_HOME BIT(0) #define DS_BUTTONS2_TOUCHPAD BIT(1) #define DS_BUTTONS2_MIC_MUTE BIT(2) /* Status field of DualSense input report. */ #define DS_STATUS_BATTERY_CAPACITY GENMASK(3, 0) #define DS_STATUS_CHARGING GENMASK(7, 4) #define DS_STATUS_CHARGING_SHIFT 4 /* Feature version from DualSense Firmware Info report. */ #define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff)) /* * Status of a DualSense touch point contact. * Contact IDs, with highest bit set are 'inactive' * and any associated data is then invalid. */ #define DS_TOUCH_POINT_INACTIVE BIT(7) /* Magic value required in tag field of Bluetooth output report. */ #define DS_OUTPUT_TAG 0x10 /* Flags for DualSense output report. */ #define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0) #define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1) #define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0) #define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1) #define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2) #define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3) #define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4) #define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1) #define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2) #define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4) #define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1) /* DualSense hardware limits */ #define DS_ACC_RES_PER_G 8192 #define DS_ACC_RANGE (4*DS_ACC_RES_PER_G) #define DS_GYRO_RES_PER_DEG_S 1024 #define DS_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S) #define DS_TOUCHPAD_WIDTH 1920 #define DS_TOUCHPAD_HEIGHT 1080 struct dualsense { struct ps_device base; struct input_dev *gamepad; struct input_dev *sensors; struct input_dev *touchpad; /* Update version is used as a feature/capability version. */ uint16_t update_version; /* Calibration data for accelerometer and gyroscope. */ struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; /* Timestamp for sensor data */ bool sensor_timestamp_initialized; uint32_t prev_sensor_timestamp; uint32_t sensor_timestamp_us; /* Compatible rumble state */ bool use_vibration_v2; bool update_rumble; uint8_t motor_left; uint8_t motor_right; /* RGB lightbar */ struct led_classdev_mc lightbar; bool update_lightbar; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; /* Microphone */ bool update_mic_mute; bool mic_muted; bool last_btn_mic_state; /* Player leds */ bool update_player_leds; uint8_t player_leds_state; struct led_classdev player_leds[5]; struct work_struct output_worker; bool output_worker_initialized; void *output_report_dmabuf; uint8_t output_seq; /* Sequence number for output report. */ }; struct dualsense_touch_point { uint8_t contact; uint8_t x_lo; uint8_t x_hi:4, y_lo:4; uint8_t y_hi; } __packed; static_assert(sizeof(struct dualsense_touch_point) == 4); /* Main DualSense input report excluding any BT/USB specific headers. */ struct dualsense_input_report { uint8_t x, y; uint8_t rx, ry; uint8_t z, rz; uint8_t seq_number; uint8_t buttons[4]; uint8_t reserved[4]; /* Motion sensors */ __le16 gyro[3]; /* x, y, z */ __le16 accel[3]; /* x, y, z */ __le32 sensor_timestamp; uint8_t reserved2; /* Touchpad */ struct dualsense_touch_point points[2]; uint8_t reserved3[12]; uint8_t status; uint8_t reserved4[10]; } __packed; /* Common input report size shared equals the size of the USB report minus 1 byte for ReportID. */ static_assert(sizeof(struct dualsense_input_report) == DS_INPUT_REPORT_USB_SIZE - 1); /* Common data between DualSense BT/USB main output report. */ struct dualsense_output_report_common { uint8_t valid_flag0; uint8_t valid_flag1; /* For DualShock 4 compatibility mode. */ uint8_t motor_right; uint8_t motor_left; /* Audio controls */ uint8_t reserved[4]; uint8_t mute_button_led; uint8_t power_save_control; uint8_t reserved2[28]; /* LEDs and lightbar */ uint8_t valid_flag2; uint8_t reserved3[2]; uint8_t lightbar_setup; uint8_t led_brightness; uint8_t player_leds; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; } __packed; static_assert(sizeof(struct dualsense_output_report_common) == 47); struct dualsense_output_report_bt { uint8_t report_id; /* 0x31 */ uint8_t seq_tag; uint8_t tag; struct dualsense_output_report_common common; uint8_t reserved[24]; __le32 crc32; } __packed; static_assert(sizeof(struct dualsense_output_report_bt) == DS_OUTPUT_REPORT_BT_SIZE); struct dualsense_output_report_usb { uint8_t report_id; /* 0x02 */ struct dualsense_output_report_common common; uint8_t reserved[15]; } __packed; static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB_SIZE); /* * The DualSense has a main output report used to control most features. It is * largely the same between Bluetooth and USB except for different headers and CRC. * This structure hide the differences between the two to simplify sending output reports. */ struct dualsense_output_report { uint8_t *data; /* Start of data */ uint8_t len; /* Size of output report */ /* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */ struct dualsense_output_report_bt *bt; /* Points to USB data payload in case for a USB report else NULL. */ struct dualsense_output_report_usb *usb; /* Points to common section of report, so past any headers. */ struct dualsense_output_report_common *common; }; #define DS4_INPUT_REPORT_USB 0x01 #define DS4_INPUT_REPORT_USB_SIZE 64 #define DS4_INPUT_REPORT_BT_MINIMAL 0x01 #define DS4_INPUT_REPORT_BT_MINIMAL_SIZE 10 #define DS4_INPUT_REPORT_BT 0x11 #define DS4_INPUT_REPORT_BT_SIZE 78 #define DS4_OUTPUT_REPORT_USB 0x05 #define DS4_OUTPUT_REPORT_USB_SIZE 32 #define DS4_OUTPUT_REPORT_BT 0x11 #define DS4_OUTPUT_REPORT_BT_SIZE 78 #define DS4_FEATURE_REPORT_CALIBRATION 0x02 #define DS4_FEATURE_REPORT_CALIBRATION_SIZE 37 #define DS4_FEATURE_REPORT_CALIBRATION_BT 0x05 #define DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE 41 #define DS4_FEATURE_REPORT_FIRMWARE_INFO 0xa3 #define DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE 49 #define DS4_FEATURE_REPORT_PAIRING_INFO 0x12 #define DS4_FEATURE_REPORT_PAIRING_INFO_SIZE 16 /* * Status of a DualShock4 touch point contact. * Contact IDs, with highest bit set are 'inactive' * and any associated data is then invalid. */ #define DS4_TOUCH_POINT_INACTIVE BIT(7) /* Status field of DualShock4 input report. */ #define DS4_STATUS0_BATTERY_CAPACITY GENMASK(3, 0) #define DS4_STATUS0_CABLE_STATE BIT(4) /* Battery status within batery_status field. */ #define DS4_BATTERY_STATUS_FULL 11 /* Status1 bit2 contains dongle connection state: * 0 = connectd * 1 = disconnected */ #define DS4_STATUS1_DONGLE_STATE BIT(2) /* The lower 6 bits of hw_control of the Bluetooth main output report * control the interval at which Dualshock 4 reports data: * 0x00 - 1ms * 0x01 - 1ms * 0x02 - 2ms * 0x3E - 62ms * 0x3F - disabled */ #define DS4_OUTPUT_HWCTL_BT_POLL_MASK 0x3F /* Default to 4ms poll interval, which is same as USB (not adjustable). */ #define DS4_BT_DEFAULT_POLL_INTERVAL_MS 4 #define DS4_OUTPUT_HWCTL_CRC32 0x40 #define DS4_OUTPUT_HWCTL_HID 0x80 /* Flags for DualShock4 output report. */ #define DS4_OUTPUT_VALID_FLAG0_MOTOR 0x01 #define DS4_OUTPUT_VALID_FLAG0_LED 0x02 #define DS4_OUTPUT_VALID_FLAG0_LED_BLINK 0x04 /* DualShock4 hardware limits */ #define DS4_ACC_RES_PER_G 8192 #define DS4_ACC_RANGE (4*DS_ACC_RES_PER_G) #define DS4_GYRO_RES_PER_DEG_S 1024 #define DS4_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S) #define DS4_LIGHTBAR_MAX_BLINK 255 /* 255 centiseconds */ #define DS4_TOUCHPAD_WIDTH 1920 #define DS4_TOUCHPAD_HEIGHT 942 enum dualshock4_dongle_state { DONGLE_DISCONNECTED, DONGLE_CALIBRATING, DONGLE_CONNECTED, DONGLE_DISABLED }; struct dualshock4 { struct ps_device base; struct input_dev *gamepad; struct input_dev *sensors; struct input_dev *touchpad; /* Calibration data for accelerometer and gyroscope. */ struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; /* Only used on dongle to track state transitions. */ enum dualshock4_dongle_state dongle_state; /* Used during calibration. */ struct work_struct dongle_hotplug_worker; /* Timestamp for sensor data */ bool sensor_timestamp_initialized; uint32_t prev_sensor_timestamp; uint32_t sensor_timestamp_us; /* Bluetooth poll interval */ bool update_bt_poll_interval; uint8_t bt_poll_interval; bool update_rumble; uint8_t motor_left; uint8_t motor_right; /* Lightbar leds */ bool update_lightbar; bool update_lightbar_blink; bool lightbar_enabled; /* For use by global LED control. */ uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; uint8_t lightbar_blink_on; /* In increments of 10ms. */ uint8_t lightbar_blink_off; /* In increments of 10ms. */ struct led_classdev lightbar_leds[4]; struct work_struct output_worker; bool output_worker_initialized; void *output_report_dmabuf; }; struct dualshock4_touch_point { uint8_t contact; uint8_t x_lo; uint8_t x_hi:4, y_lo:4; uint8_t y_hi; } __packed; static_assert(sizeof(struct dualshock4_touch_point) == 4); struct dualshock4_touch_report { uint8_t timestamp; struct dualshock4_touch_point points[2]; } __packed; static_assert(sizeof(struct dualshock4_touch_report) == 9); /* Main DualShock4 input report excluding any BT/USB specific headers. */ struct dualshock4_input_report_common { uint8_t x, y; uint8_t rx, ry; uint8_t buttons[3]; uint8_t z, rz; /* Motion sensors */ __le16 sensor_timestamp; uint8_t sensor_temperature; __le16 gyro[3]; /* x, y, z */ __le16 accel[3]; /* x, y, z */ uint8_t reserved2[5]; uint8_t status[2]; uint8_t reserved3; } __packed; static_assert(sizeof(struct dualshock4_input_report_common) == 32); struct dualshock4_input_report_usb { uint8_t report_id; /* 0x01 */ struct dualshock4_input_report_common common; uint8_t num_touch_reports; struct dualshock4_touch_report touch_reports[3]; uint8_t reserved[3]; } __packed; static_assert(sizeof(struct dualshock4_input_report_usb) == DS4_INPUT_REPORT_USB_SIZE); struct dualshock4_input_report_bt { uint8_t report_id; /* 0x11 */ uint8_t reserved[2]; struct dualshock4_input_report_common common; uint8_t num_touch_reports; struct dualshock4_touch_report touch_reports[4]; /* BT has 4 compared to 3 for USB */ uint8_t reserved2[2]; __le32 crc32; } __packed; static_assert(sizeof(struct dualshock4_input_report_bt) == DS4_INPUT_REPORT_BT_SIZE); /* Common data between Bluetooth and USB DualShock4 output reports. */ struct dualshock4_output_report_common { uint8_t valid_flag0; uint8_t valid_flag1; uint8_t reserved; uint8_t motor_right; uint8_t motor_left; uint8_t lightbar_red; uint8_t lightbar_green; uint8_t lightbar_blue; uint8_t lightbar_blink_on; uint8_t lightbar_blink_off; } __packed; struct dualshock4_output_report_usb { uint8_t report_id; /* 0x5 */ struct dualshock4_output_report_common common; uint8_t reserved[21]; } __packed; static_assert(sizeof(struct dualshock4_output_report_usb) == DS4_OUTPUT_REPORT_USB_SIZE); struct dualshock4_output_report_bt { uint8_t report_id; /* 0x11 */ uint8_t hw_control; uint8_t audio_control; struct dualshock4_output_report_common common; uint8_t reserved[61]; __le32 crc32; } __packed; static_assert(sizeof(struct dualshock4_output_report_bt) == DS4_OUTPUT_REPORT_BT_SIZE); /* * The DualShock4 has a main output report used to control most features. It is * largely the same between Bluetooth and USB except for different headers and CRC. * This structure hide the differences between the two to simplify sending output reports. */ struct dualshock4_output_report { uint8_t *data; /* Start of data */ uint8_t len; /* Size of output report */ /* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */ struct dualshock4_output_report_bt *bt; /* Points to USB data payload in case for a USB report else NULL. */ struct dualshock4_output_report_usb *usb; /* Points to common section of report, so past any headers. */ struct dualshock4_output_report_common *common; }; /* * Common gamepad buttons across DualShock 3 / 4 and DualSense. * Note: for device with a touchpad, touchpad button is not included * as it will be part of the touchpad device. */ static const int ps_gamepad_buttons[] = { BTN_WEST, /* Square */ BTN_NORTH, /* Triangle */ BTN_EAST, /* Circle */ BTN_SOUTH, /* Cross */ BTN_TL, /* L1 */ BTN_TR, /* R1 */ BTN_TL2, /* L2 */ BTN_TR2, /* R2 */ BTN_SELECT, /* Create (PS5) / Share (PS4) */ BTN_START, /* Option */ BTN_THUMBL, /* L3 */ BTN_THUMBR, /* R3 */ BTN_MODE, /* PS Home */ }; static const struct {int x; int y; } ps_gamepad_hat_mapping[] = { {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}, {-1, -1}, {0, 0}, }; static int dualshock4_get_calibration_data(struct dualshock4 *ds4); static inline void dualsense_schedule_work(struct dualsense *ds); static inline void dualshock4_schedule_work(struct dualshock4 *ds4); static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue); static void dualshock4_set_default_lightbar_colors(struct dualshock4 *ds4); /* * Add a new ps_device to ps_devices if it doesn't exist. * Return error on duplicate device, which can happen if the same * device is connected using both Bluetooth and USB. */ static int ps_devices_list_add(struct ps_device *dev) { struct ps_device *entry; mutex_lock(&ps_devices_lock); list_for_each_entry(entry, &ps_devices_list, list) { if (!memcmp(entry->mac_address, dev->mac_address, sizeof(dev->mac_address))) { hid_err(dev->hdev, "Duplicate device found for MAC address %pMR.\n", dev->mac_address); mutex_unlock(&ps_devices_lock); return -EEXIST; } } list_add_tail(&dev->list, &ps_devices_list); mutex_unlock(&ps_devices_lock); return 0; } static int ps_devices_list_remove(struct ps_device *dev) { mutex_lock(&ps_devices_lock); list_del(&dev->list); mutex_unlock(&ps_devices_lock); return 0; } static int ps_device_set_player_id(struct ps_device *dev) { int ret = ida_alloc(&ps_player_id_allocator, GFP_KERNEL); if (ret < 0) return ret; dev->player_id = ret; return 0; } static void ps_device_release_player_id(struct ps_device *dev) { ida_free(&ps_player_id_allocator, dev->player_id); dev->player_id = U32_MAX; } static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const char *name_suffix) { struct input_dev *input_dev; input_dev = devm_input_allocate_device(&hdev->dev); if (!input_dev) return ERR_PTR(-ENOMEM); input_dev->id.bustype = hdev->bus; input_dev->id.vendor = hdev->vendor; input_dev->id.product = hdev->product; input_dev->id.version = hdev->version; input_dev->uniq = hdev->uniq; if (name_suffix) { input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, name_suffix); if (!input_dev->name) return ERR_PTR(-ENOMEM); } else { input_dev->name = hdev->name; } input_set_drvdata(input_dev, hdev); return input_dev; } static enum power_supply_property ps_power_supply_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, }; static int ps_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct ps_device *dev = power_supply_get_drvdata(psy); uint8_t battery_capacity; int battery_status; unsigned long flags; int ret = 0; spin_lock_irqsave(&dev->lock, flags); battery_capacity = dev->battery_capacity; battery_status = dev->battery_status; spin_unlock_irqrestore(&dev->lock, flags); switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = battery_status; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = battery_capacity; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; default: ret = -EINVAL; break; } return ret; } static int ps_device_register_battery(struct ps_device *dev) { struct power_supply *battery; struct power_supply_config battery_cfg = { .drv_data = dev }; int ret; dev->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; dev->battery_desc.properties = ps_power_supply_props; dev->battery_desc.num_properties = ARRAY_SIZE(ps_power_supply_props); dev->battery_desc.get_property = ps_battery_get_property; dev->battery_desc.name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL, "ps-controller-battery-%pMR", dev->mac_address); if (!dev->battery_desc.name) return -ENOMEM; battery = devm_power_supply_register(&dev->hdev->dev, &dev->battery_desc, &battery_cfg); if (IS_ERR(battery)) { ret = PTR_ERR(battery); hid_err(dev->hdev, "Unable to register battery device: %d\n", ret); return ret; } dev->battery = battery; ret = power_supply_powers(dev->battery, &dev->hdev->dev); if (ret) { hid_err(dev->hdev, "Unable to activate battery device: %d\n", ret); return ret; } return 0; } /* Compute crc32 of HID data and compare against expected CRC. */ static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t report_crc) { uint32_t crc; crc = crc32_le(0xFFFFFFFF, &seed, 1); crc = ~crc32_le(crc, data, len); return crc == report_crc; } static struct input_dev *ps_gamepad_create(struct hid_device *hdev, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct input_dev *gamepad; unsigned int i; int ret; gamepad = ps_allocate_input_dev(hdev, NULL); if (IS_ERR(gamepad)) return ERR_CAST(gamepad); input_set_abs_params(gamepad, ABS_X, 0, 255, 0, 0); input_set_abs_params(gamepad, ABS_Y, 0, 255, 0, 0); input_set_abs_params(gamepad, ABS_Z, 0, 255, 0, 0); input_set_abs_params(gamepad, ABS_RX, 0, 255, 0, 0); input_set_abs_params(gamepad, ABS_RY, 0, 255, 0, 0); input_set_abs_params(gamepad, ABS_RZ, 0, 255, 0, 0); input_set_abs_params(gamepad, ABS_HAT0X, -1, 1, 0, 0); input_set_abs_params(gamepad, ABS_HAT0Y, -1, 1, 0, 0); for (i = 0; i < ARRAY_SIZE(ps_gamepad_buttons); i++) input_set_capability(gamepad, EV_KEY, ps_gamepad_buttons[i]); #if IS_ENABLED(CONFIG_PLAYSTATION_FF) if (play_effect) { input_set_capability(gamepad, EV_FF, FF_RUMBLE); input_ff_create_memless(gamepad, NULL, play_effect); } #endif ret = input_register_device(gamepad); if (ret) return ERR_PTR(ret); return gamepad; } static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *buf, size_t size, bool check_crc) { int ret; ret = hid_hw_raw_request(hdev, report_id, buf, size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { hid_err(hdev, "Failed to retrieve feature with reportID %d: %d\n", report_id, ret); return ret; } if (ret != size) { hid_err(hdev, "Invalid byte count transferred, expected %zu got %d\n", size, ret); return -EINVAL; } if (buf[0] != report_id) { hid_err(hdev, "Invalid reportID received, expected %d got %d\n", report_id, buf[0]); return -EINVAL; } if (hdev->bus == BUS_BLUETOOTH && check_crc) { /* Last 4 bytes contains crc32. */ uint8_t crc_offset = size - 4; uint32_t report_crc = get_unaligned_le32(&buf[crc_offset]); if (!ps_check_crc32(PS_FEATURE_CRC32_SEED, buf, crc_offset, report_crc)) { hid_err(hdev, "CRC check failed for reportID=%d\n", report_id); return -EILSEQ; } } return 0; } static int ps_led_register(struct ps_device *ps_dev, struct led_classdev *led, const struct ps_led_info *led_info) { int ret; if (led_info->name) { led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, "%s:%s:%s", ps_dev->input_dev_name, led_info->color, led_info->name); } else { /* Backwards compatible mode for hid-sony, but not compliant with LED class spec. */ led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, "%s:%s", ps_dev->input_dev_name, led_info->color); } if (!led->name) return -ENOMEM; led->brightness = 0; led->max_brightness = led_info->max_brightness; led->flags = LED_CORE_SUSPENDRESUME; led->brightness_get = led_info->brightness_get; led->brightness_set_blocking = led_info->brightness_set; led->blink_set = led_info->blink_set; ret = devm_led_classdev_register(&ps_dev->hdev->dev, led); if (ret) { hid_err(ps_dev->hdev, "Failed to register LED %s: %d\n", led_info->name, ret); return ret; } return 0; } /* Register a DualSense/DualShock4 RGB lightbar represented by a multicolor LED. */ static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc *lightbar_mc_dev, int (*brightness_set)(struct led_classdev *, enum led_brightness)) { struct hid_device *hdev = ps_dev->hdev; struct mc_subled *mc_led_info; struct led_classdev *led_cdev; int ret; mc_led_info = devm_kmalloc_array(&hdev->dev, 3, sizeof(*mc_led_info), GFP_KERNEL | __GFP_ZERO); if (!mc_led_info) return -ENOMEM; mc_led_info[0].color_index = LED_COLOR_ID_RED; mc_led_info[1].color_index = LED_COLOR_ID_GREEN; mc_led_info[2].color_index = LED_COLOR_ID_BLUE; lightbar_mc_dev->subled_info = mc_led_info; lightbar_mc_dev->num_colors = 3; led_cdev = &lightbar_mc_dev->led_cdev; led_cdev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s:rgb:indicator", ps_dev->input_dev_name); if (!led_cdev->name) return -ENOMEM; led_cdev->brightness = 255; led_cdev->max_brightness = 255; led_cdev->brightness_set_blocking = brightness_set; ret = devm_led_classdev_multicolor_register(&hdev->dev, lightbar_mc_dev); if (ret < 0) { hid_err(hdev, "Cannot register multicolor LED device\n"); return ret; } return 0; } static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range, int accel_res, int gyro_range, int gyro_res) { struct input_dev *sensors; int ret; sensors = ps_allocate_input_dev(hdev, "Motion Sensors"); if (IS_ERR(sensors)) return ERR_CAST(sensors); __set_bit(INPUT_PROP_ACCELEROMETER, sensors->propbit); __set_bit(EV_MSC, sensors->evbit); __set_bit(MSC_TIMESTAMP, sensors->mscbit); /* Accelerometer */ input_set_abs_params(sensors, ABS_X, -accel_range, accel_range, 16, 0); input_set_abs_params(sensors, ABS_Y, -accel_range, accel_range, 16, 0); input_set_abs_params(sensors, ABS_Z, -accel_range, accel_range, 16, 0); input_abs_set_res(sensors, ABS_X, accel_res); input_abs_set_res(sensors, ABS_Y, accel_res); input_abs_set_res(sensors, ABS_Z, accel_res); /* Gyroscope */ input_set_abs_params(sensors, ABS_RX, -gyro_range, gyro_range, 16, 0); input_set_abs_params(sensors, ABS_RY, -gyro_range, gyro_range, 16, 0); input_set_abs_params(sensors, ABS_RZ, -gyro_range, gyro_range, 16, 0); input_abs_set_res(sensors, ABS_RX, gyro_res); input_abs_set_res(sensors, ABS_RY, gyro_res); input_abs_set_res(sensors, ABS_RZ, gyro_res); ret = input_register_device(sensors); if (ret) return ERR_PTR(ret); return sensors; } static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width, int height, unsigned int num_contacts) { struct input_dev *touchpad; int ret; touchpad = ps_allocate_input_dev(hdev, "Touchpad"); if (IS_ERR(touchpad)) return ERR_CAST(touchpad); /* Map button underneath touchpad to BTN_LEFT. */ input_set_capability(touchpad, EV_KEY, BTN_LEFT); __set_bit(INPUT_PROP_BUTTONPAD, touchpad->propbit); input_set_abs_params(touchpad, ABS_MT_POSITION_X, 0, width - 1, 0, 0); input_set_abs_params(touchpad, ABS_MT_POSITION_Y, 0, height - 1, 0, 0); ret = input_mt_init_slots(touchpad, num_contacts, INPUT_MT_POINTER); if (ret) return ERR_PTR(ret); ret = input_register_device(touchpad); if (ret) return ERR_PTR(ret); return touchpad; } static ssize_t firmware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct ps_device *ps_dev = hid_get_drvdata(hdev); return sysfs_emit(buf, "0x%08x\n", ps_dev->fw_version); } static DEVICE_ATTR_RO(firmware_version); static ssize_t hardware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct ps_device *ps_dev = hid_get_drvdata(hdev); return sysfs_emit(buf, "0x%08x\n", ps_dev->hw_version); } static DEVICE_ATTR_RO(hardware_version); static struct attribute *ps_device_attrs[] = { &dev_attr_firmware_version.attr, &dev_attr_hardware_version.attr, NULL }; ATTRIBUTE_GROUPS(ps_device); static int dualsense_get_calibration_data(struct dualsense *ds) { struct hid_device *hdev = ds->base.hdev; short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus; short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus; short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus; short gyro_speed_plus, gyro_speed_minus; short acc_x_plus, acc_x_minus; short acc_y_plus, acc_y_minus; short acc_z_plus, acc_z_minus; int speed_2x; int range_2g; int ret = 0; int i; uint8_t *buf; buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_CALIBRATION, buf, DS_FEATURE_REPORT_CALIBRATION_SIZE, true); if (ret) { hid_err(ds->base.hdev, "Failed to retrieve DualSense calibration info: %d\n", ret); goto err_free; } gyro_pitch_bias = get_unaligned_le16(&buf[1]); gyro_yaw_bias = get_unaligned_le16(&buf[3]); gyro_roll_bias = get_unaligned_le16(&buf[5]); gyro_pitch_plus = get_unaligned_le16(&buf[7]); gyro_pitch_minus = get_unaligned_le16(&buf[9]); gyro_yaw_plus = get_unaligned_le16(&buf[11]); gyro_yaw_minus = get_unaligned_le16(&buf[13]); gyro_roll_plus = get_unaligned_le16(&buf[15]); gyro_roll_minus = get_unaligned_le16(&buf[17]); gyro_speed_plus = get_unaligned_le16(&buf[19]); gyro_speed_minus = get_unaligned_le16(&buf[21]); acc_x_plus = get_unaligned_le16(&buf[23]); acc_x_minus = get_unaligned_le16(&buf[25]); acc_y_plus = get_unaligned_le16(&buf[27]); acc_y_minus = get_unaligned_le16(&buf[29]); acc_z_plus = get_unaligned_le16(&buf[31]); acc_z_minus = get_unaligned_le16(&buf[33]); /* * Set gyroscope calibration and normalization parameters. * Data values will be normalized to 1/DS_GYRO_RES_PER_DEG_S degree/s. */ speed_2x = (gyro_speed_plus + gyro_speed_minus); ds->gyro_calib_data[0].abs_code = ABS_RX; ds->gyro_calib_data[0].bias = 0; ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S; ds->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) + abs(gyro_pitch_minus - gyro_pitch_bias); ds->gyro_calib_data[1].abs_code = ABS_RY; ds->gyro_calib_data[1].bias = 0; ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S; ds->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) + abs(gyro_yaw_minus - gyro_yaw_bias); ds->gyro_calib_data[2].abs_code = ABS_RZ; ds->gyro_calib_data[2].bias = 0; ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S; ds->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) + abs(gyro_roll_minus - gyro_roll_bias); /* * Sanity check gyro calibration data. This is needed to prevent crashes * during report handling of virtual, clone or broken devices not implementing * calibration data properly. */ for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) { if (ds->gyro_calib_data[i].sens_denom == 0) { hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.", ds->gyro_calib_data[i].abs_code); ds->gyro_calib_data[i].bias = 0; ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE; ds->gyro_calib_data[i].sens_denom = S16_MAX; } } /* * Set accelerometer calibration and normalization parameters. * Data values will be normalized to 1/DS_ACC_RES_PER_G g. */ range_2g = acc_x_plus - acc_x_minus; ds->accel_calib_data[0].abs_code = ABS_X; ds->accel_calib_data[0].bias = acc_x_plus - range_2g / 2; ds->accel_calib_data[0].sens_numer = 2*DS_ACC_RES_PER_G; ds->accel_calib_data[0].sens_denom = range_2g; range_2g = acc_y_plus - acc_y_minus; ds->accel_calib_data[1].abs_code = ABS_Y; ds->accel_calib_data[1].bias = acc_y_plus - range_2g / 2; ds->accel_calib_data[1].sens_numer = 2*DS_ACC_RES_PER_G; ds->accel_calib_data[1].sens_denom = range_2g; range_2g = acc_z_plus - acc_z_minus; ds->accel_calib_data[2].abs_code = ABS_Z; ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2; ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G; ds->accel_calib_data[2].sens_denom = range_2g; /* * Sanity check accelerometer calibration data. This is needed to prevent crashes * during report handling of virtual, clone or broken devices not implementing calibration * data properly. */ for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) { if (ds->accel_calib_data[i].sens_denom == 0) { hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.", ds->accel_calib_data[i].abs_code); ds->accel_calib_data[i].bias = 0; ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE; ds->accel_calib_data[i].sens_denom = S16_MAX; } } err_free: kfree(buf); return ret; } static int dualsense_get_firmware_info(struct dualsense *ds) { uint8_t *buf; int ret; buf = kzalloc(DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_FIRMWARE_INFO, buf, DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, true); if (ret) { hid_err(ds->base.hdev, "Failed to retrieve DualSense firmware info: %d\n", ret); goto err_free; } ds->base.hw_version = get_unaligned_le32(&buf[24]); ds->base.fw_version = get_unaligned_le32(&buf[28]); /* Update version is some kind of feature version. It is distinct from * the firmware version as there can be many different variations of a * controller over time with the same physical shell, but with different * PCBs and other internal changes. The update version (internal name) is * used as a means to detect what features are available and change behavior. * Note: the version is different between DualSense and DualSense Edge. */ ds->update_version = get_unaligned_le16(&buf[44]); err_free: kfree(buf); return ret; } static int dualsense_get_mac_address(struct dualsense *ds) { uint8_t *buf; int ret = 0; buf = kzalloc(DS_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_PAIRING_INFO, buf, DS_FEATURE_REPORT_PAIRING_INFO_SIZE, true); if (ret) { hid_err(ds->base.hdev, "Failed to retrieve DualSense pairing info: %d\n", ret); goto err_free; } memcpy(ds->base.mac_address, &buf[1], sizeof(ds->base.mac_address)); err_free: kfree(buf); return ret; } static int dualsense_lightbar_set_brightness(struct led_classdev *cdev, enum led_brightness brightness) { struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev); struct dualsense *ds = container_of(mc_cdev, struct dualsense, lightbar); uint8_t red, green, blue; led_mc_calc_color_components(mc_cdev, brightness); red = mc_cdev->subled_info[0].brightness; green = mc_cdev->subled_info[1].brightness; blue = mc_cdev->subled_info[2].brightness; dualsense_set_lightbar(ds, red, green, blue); return 0; } static enum led_brightness dualsense_player_led_get_brightness(struct led_classdev *led) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct dualsense *ds = hid_get_drvdata(hdev); return !!(ds->player_leds_state & BIT(led - ds->player_leds)); } static int dualsense_player_led_set_brightness(struct led_classdev *led, enum led_brightness value) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct dualsense *ds = hid_get_drvdata(hdev); unsigned long flags; unsigned int led_index; spin_lock_irqsave(&ds->base.lock, flags); led_index = led - ds->player_leds; if (value == LED_OFF) ds->player_leds_state &= ~BIT(led_index); else ds->player_leds_state |= BIT(led_index); ds->update_player_leds = true; spin_unlock_irqrestore(&ds->base.lock, flags); dualsense_schedule_work(ds); return 0; } static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_output_report *rp, void *buf) { struct hid_device *hdev = ds->base.hdev; if (hdev->bus == BUS_BLUETOOTH) { struct dualsense_output_report_bt *bt = buf; memset(bt, 0, sizeof(*bt)); bt->report_id = DS_OUTPUT_REPORT_BT; bt->tag = DS_OUTPUT_TAG; /* Tag must be set. Exact meaning is unclear. */ /* * Highest 4-bit is a sequence number, which needs to be increased * every report. Lowest 4-bit is tag and can be zero for now. */ bt->seq_tag = (ds->output_seq << 4) | 0x0; if (++ds->output_seq == 16) ds->output_seq = 0; rp->data = buf; rp->len = sizeof(*bt); rp->bt = bt; rp->usb = NULL; rp->common = &bt->common; } else { /* USB */ struct dualsense_output_report_usb *usb = buf; memset(usb, 0, sizeof(*usb)); usb->report_id = DS_OUTPUT_REPORT_USB; rp->data = buf; rp->len = sizeof(*usb); rp->bt = NULL; rp->usb = usb; rp->common = &usb->common; } } static inline void dualsense_schedule_work(struct dualsense *ds) { unsigned long flags; spin_lock_irqsave(&ds->base.lock, flags); if (ds->output_worker_initialized) schedule_work(&ds->output_worker); spin_unlock_irqrestore(&ds->base.lock, flags); } /* * Helper function to send DualSense output reports. Applies a CRC at the end of a report * for Bluetooth reports. */ static void dualsense_send_output_report(struct dualsense *ds, struct dualsense_output_report *report) { struct hid_device *hdev = ds->base.hdev; /* Bluetooth packets need to be signed with a CRC in the last 4 bytes. */ if (report->bt) { uint32_t crc; uint8_t seed = PS_OUTPUT_CRC32_SEED; crc = crc32_le(0xFFFFFFFF, &seed, 1); crc = ~crc32_le(crc, report->data, report->len - 4); report->bt->crc32 = cpu_to_le32(crc); } hid_hw_output_report(hdev, report->data, report->len); } static void dualsense_output_worker(struct work_struct *work) { struct dualsense *ds = container_of(work, struct dualsense, output_worker); struct dualsense_output_report report; struct dualsense_output_report_common *common; unsigned long flags; dualsense_init_output_report(ds, &report, ds->output_report_dmabuf); common = report.common; spin_lock_irqsave(&ds->base.lock, flags); if (ds->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT; if (ds->use_vibration_v2) common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2; else common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; common->motor_left = ds->motor_left; common->motor_right = ds->motor_right; ds->update_rumble = false; } if (ds->update_lightbar) { common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE; common->lightbar_red = ds->lightbar_red; common->lightbar_green = ds->lightbar_green; common->lightbar_blue = ds->lightbar_blue; ds->update_lightbar = false; } if (ds->update_player_leds) { common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE; common->player_leds = ds->player_leds_state; ds->update_player_leds = false; } if (ds->update_mic_mute) { common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE; common->mute_button_led = ds->mic_muted; if (ds->mic_muted) { /* Disable microphone */ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE; common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE; } else { /* Enable microphone */ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE; common->power_save_control &= ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE; } ds->update_mic_mute = false; } spin_unlock_irqrestore(&ds->base.lock, flags); dualsense_send_output_report(ds, &report); } static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *report, u8 *data, int size) { struct hid_device *hdev = ps_dev->hdev; struct dualsense *ds = container_of(ps_dev, struct dualsense, base); struct dualsense_input_report *ds_report; uint8_t battery_data, battery_capacity, charging_status, value; int battery_status; uint32_t sensor_timestamp; bool btn_mic_state; unsigned long flags; int i; /* * DualSense in USB uses the full HID report for reportID 1, but * Bluetooth uses a minimal HID report for reportID 1 and reports * the full report using reportID 49. */ if (hdev->bus == BUS_USB && report->id == DS_INPUT_REPORT_USB && size == DS_INPUT_REPORT_USB_SIZE) { ds_report = (struct dualsense_input_report *)&data[1]; } else if (hdev->bus == BUS_BLUETOOTH && report->id == DS_INPUT_REPORT_BT && size == DS_INPUT_REPORT_BT_SIZE) { /* Last 4 bytes of input report contain crc32 */ uint32_t report_crc = get_unaligned_le32(&data[size - 4]); if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) { hid_err(hdev, "DualSense input CRC's check failed\n"); return -EILSEQ; } ds_report = (struct dualsense_input_report *)&data[2]; } else { hid_err(hdev, "Unhandled reportID=%d\n", report->id); return -1; } input_report_abs(ds->gamepad, ABS_X, ds_report->x); input_report_abs(ds->gamepad, ABS_Y, ds_report->y); input_report_abs(ds->gamepad, ABS_RX, ds_report->rx); input_report_abs(ds->gamepad, ABS_RY, ds_report->ry); input_report_abs(ds->gamepad, ABS_Z, ds_report->z); input_report_abs(ds->gamepad, ABS_RZ, ds_report->rz); value = ds_report->buttons[0] & DS_BUTTONS0_HAT_SWITCH; if (value >= ARRAY_SIZE(ps_gamepad_hat_mapping)) value = 8; /* center */ input_report_abs(ds->gamepad, ABS_HAT0X, ps_gamepad_hat_mapping[value].x); input_report_abs(ds->gamepad, ABS_HAT0Y, ps_gamepad_hat_mapping[value].y); input_report_key(ds->gamepad, BTN_WEST, ds_report->buttons[0] & DS_BUTTONS0_SQUARE); input_report_key(ds->gamepad, BTN_SOUTH, ds_report->buttons[0] & DS_BUTTONS0_CROSS); input_report_key(ds->gamepad, BTN_EAST, ds_report->buttons[0] & DS_BUTTONS0_CIRCLE); input_report_key(ds->gamepad, BTN_NORTH, ds_report->buttons[0] & DS_BUTTONS0_TRIANGLE); input_report_key(ds->gamepad, BTN_TL, ds_report->buttons[1] & DS_BUTTONS1_L1); input_report_key(ds->gamepad, BTN_TR, ds_report->buttons[1] & DS_BUTTONS1_R1); input_report_key(ds->gamepad, BTN_TL2, ds_report->buttons[1] & DS_BUTTONS1_L2); input_report_key(ds->gamepad, BTN_TR2, ds_report->buttons[1] & DS_BUTTONS1_R2); input_report_key(ds->gamepad, BTN_SELECT, ds_report->buttons[1] & DS_BUTTONS1_CREATE); input_report_key(ds->gamepad, BTN_START, ds_report->buttons[1] & DS_BUTTONS1_OPTIONS); input_report_key(ds->gamepad, BTN_THUMBL, ds_report->buttons[1] & DS_BUTTONS1_L3); input_report_key(ds->gamepad, BTN_THUMBR, ds_report->buttons[1] & DS_BUTTONS1_R3); input_report_key(ds->gamepad, BTN_MODE, ds_report->buttons[2] & DS_BUTTONS2_PS_HOME); input_sync(ds->gamepad); /* * The DualSense has an internal microphone, which can be muted through a mute button * on the device. The driver is expected to read the button state and program the device * to mute/unmute audio at the hardware level. */ btn_mic_state = !!(ds_report->buttons[2] & DS_BUTTONS2_MIC_MUTE); if (btn_mic_state && !ds->last_btn_mic_state) { spin_lock_irqsave(&ps_dev->lock, flags); ds->update_mic_mute = true; ds->mic_muted = !ds->mic_muted; /* toggle */ spin_unlock_irqrestore(&ps_dev->lock, flags); /* Schedule updating of microphone state at hardware level. */ dualsense_schedule_work(ds); } ds->last_btn_mic_state = btn_mic_state; /* Parse and calibrate gyroscope data. */ for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) { int raw_data = (short)le16_to_cpu(ds_report->gyro[i]); int calib_data = mult_frac(ds->gyro_calib_data[i].sens_numer, raw_data, ds->gyro_calib_data[i].sens_denom); input_report_abs(ds->sensors, ds->gyro_calib_data[i].abs_code, calib_data); } /* Parse and calibrate accelerometer data. */ for (i = 0; i < ARRAY_SIZE(ds_report->accel); i++) { int raw_data = (short)le16_to_cpu(ds_report->accel[i]); int calib_data = mult_frac(ds->accel_calib_data[i].sens_numer, raw_data - ds->accel_calib_data[i].bias, ds->accel_calib_data[i].sens_denom); input_report_abs(ds->sensors, ds->accel_calib_data[i].abs_code, calib_data); } /* Convert timestamp (in 0.33us unit) to timestamp_us */ sensor_timestamp = le32_to_cpu(ds_report->sensor_timestamp); if (!ds->sensor_timestamp_initialized) { ds->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp, 3); ds->sensor_timestamp_initialized = true; } else { uint32_t delta; if (ds->prev_sensor_timestamp > sensor_timestamp) delta = (U32_MAX - ds->prev_sensor_timestamp + sensor_timestamp + 1); else delta = sensor_timestamp - ds->prev_sensor_timestamp; ds->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta, 3); } ds->prev_sensor_timestamp = sensor_timestamp; input_event(ds->sensors, EV_MSC, MSC_TIMESTAMP, ds->sensor_timestamp_us); input_sync(ds->sensors); for (i = 0; i < ARRAY_SIZE(ds_report->points); i++) { struct dualsense_touch_point *point = &ds_report->points[i]; bool active = (point->contact & DS_TOUCH_POINT_INACTIVE) ? false : true; input_mt_slot(ds->touchpad, i); input_mt_report_slot_state(ds->touchpad, MT_TOOL_FINGER, active); if (active) { int x = (point->x_hi << 8) | point->x_lo; int y = (point->y_hi << 4) | point->y_lo; input_report_abs(ds->touchpad, ABS_MT_POSITION_X, x); input_report_abs(ds->touchpad, ABS_MT_POSITION_Y, y); } } input_mt_sync_frame(ds->touchpad); input_report_key(ds->touchpad, BTN_LEFT, ds_report->buttons[2] & DS_BUTTONS2_TOUCHPAD); input_sync(ds->touchpad); battery_data = ds_report->status & DS_STATUS_BATTERY_CAPACITY; charging_status = (ds_report->status & DS_STATUS_CHARGING) >> DS_STATUS_CHARGING_SHIFT; switch (charging_status) { case 0x0: /* * Each unit of battery data corresponds to 10% * 0 = 0-9%, 1 = 10-19%, .. and 10 = 100% */ battery_capacity = min(battery_data * 10 + 5, 100); battery_status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x1: battery_capacity = min(battery_data * 10 + 5, 100); battery_status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x2: battery_capacity = 100; battery_status = POWER_SUPPLY_STATUS_FULL; break; case 0xa: /* voltage or temperature out of range */ case 0xb: /* temperature error */ battery_capacity = 0; battery_status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case 0xf: /* charging error */ default: battery_capacity = 0; battery_status = POWER_SUPPLY_STATUS_UNKNOWN; } spin_lock_irqsave(&ps_dev->lock, flags); ps_dev->battery_capacity = battery_capacity; ps_dev->battery_status = battery_status; spin_unlock_irqrestore(&ps_dev->lock, flags); return 0; } static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hdev = input_get_drvdata(dev); struct dualsense *ds = hid_get_drvdata(hdev); unsigned long flags; if (effect->type != FF_RUMBLE) return 0; spin_lock_irqsave(&ds->base.lock, flags); ds->update_rumble = true; ds->motor_left = effect->u.rumble.strong_magnitude / 256; ds->motor_right = effect->u.rumble.weak_magnitude / 256; spin_unlock_irqrestore(&ds->base.lock, flags); dualsense_schedule_work(ds); return 0; } static void dualsense_remove(struct ps_device *ps_dev) { struct dualsense *ds = container_of(ps_dev, struct dualsense, base); unsigned long flags; spin_lock_irqsave(&ds->base.lock, flags); ds->output_worker_initialized = false; spin_unlock_irqrestore(&ds->base.lock, flags); cancel_work_sync(&ds->output_worker); } static int dualsense_reset_leds(struct dualsense *ds) { struct dualsense_output_report report; uint8_t *buf; buf = kzalloc(sizeof(struct dualsense_output_report_bt), GFP_KERNEL); if (!buf) return -ENOMEM; dualsense_init_output_report(ds, &report, buf); /* * On Bluetooth the DualSense outputs an animation on the lightbar * during startup and maintains a color afterwards. We need to explicitly * reconfigure the lightbar before we can do any programming later on. * In USB the lightbar is not on by default, but redoing the setup there * doesn't hurt. */ report.common->valid_flag2 = DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE; report.common->lightbar_setup = DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT; /* Fade light out. */ dualsense_send_output_report(ds, &report); kfree(buf); return 0; } static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue) { unsigned long flags; spin_lock_irqsave(&ds->base.lock, flags); ds->update_lightbar = true; ds->lightbar_red = red; ds->lightbar_green = green; ds->lightbar_blue = blue; spin_unlock_irqrestore(&ds->base.lock, flags); dualsense_schedule_work(ds); } static void dualsense_set_player_leds(struct dualsense *ds) { /* * The DualSense controller has a row of 5 LEDs used for player ids. * Behavior on the PlayStation 5 console is to center the player id * across the LEDs, so e.g. player 1 would be "--x--" with x being 'on'. * Follow a similar mapping here. */ static const int player_ids[5] = { BIT(2), BIT(3) | BIT(1), BIT(4) | BIT(2) | BIT(0), BIT(4) | BIT(3) | BIT(1) | BIT(0), BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0) }; uint8_t player_id = ds->base.player_id % ARRAY_SIZE(player_ids); ds->update_player_leds = true; ds->player_leds_state = player_ids[player_id]; dualsense_schedule_work(ds); } static struct ps_device *dualsense_create(struct hid_device *hdev) { struct dualsense *ds; struct ps_device *ps_dev; uint8_t max_output_report_size; int i, ret; static const struct ps_led_info player_leds_info[] = { { LED_FUNCTION_PLAYER1, "white", 1, dualsense_player_led_get_brightness, dualsense_player_led_set_brightness }, { LED_FUNCTION_PLAYER2, "white", 1, dualsense_player_led_get_brightness, dualsense_player_led_set_brightness }, { LED_FUNCTION_PLAYER3, "white", 1, dualsense_player_led_get_brightness, dualsense_player_led_set_brightness }, { LED_FUNCTION_PLAYER4, "white", 1, dualsense_player_led_get_brightness, dualsense_player_led_set_brightness }, { LED_FUNCTION_PLAYER5, "white", 1, dualsense_player_led_get_brightness, dualsense_player_led_set_brightness } }; ds = devm_kzalloc(&hdev->dev, sizeof(*ds), GFP_KERNEL); if (!ds) return ERR_PTR(-ENOMEM); /* * Patch version to allow userspace to distinguish between * hid-generic vs hid-playstation axis and button mapping. */ hdev->version |= HID_PLAYSTATION_VERSION_PATCH; ps_dev = &ds->base; ps_dev->hdev = hdev; spin_lock_init(&ps_dev->lock); ps_dev->battery_capacity = 100; /* initial value until parse_report. */ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; ps_dev->parse_report = dualsense_parse_report; ps_dev->remove = dualsense_remove; INIT_WORK(&ds->output_worker, dualsense_output_worker); ds->output_worker_initialized = true; hid_set_drvdata(hdev, ds); max_output_report_size = sizeof(struct dualsense_output_report_bt); ds->output_report_dmabuf = devm_kzalloc(&hdev->dev, max_output_report_size, GFP_KERNEL); if (!ds->output_report_dmabuf) return ERR_PTR(-ENOMEM); ret = dualsense_get_mac_address(ds); if (ret) { hid_err(hdev, "Failed to get MAC address from DualSense\n"); return ERR_PTR(ret); } snprintf(hdev->uniq, sizeof(hdev->uniq), "%pMR", ds->base.mac_address); ret = dualsense_get_firmware_info(ds); if (ret) { hid_err(hdev, "Failed to get firmware info from DualSense\n"); return ERR_PTR(ret); } /* Original DualSense firmware simulated classic controller rumble through * its new haptics hardware. It felt different from classic rumble users * were used to. Since then new firmwares were introduced to change behavior * and make this new 'v2' behavior default on PlayStation and other platforms. * The original DualSense requires a new enough firmware as bundled with PS5 * software released in 2021. DualSense edge supports it out of the box. * Both devices also support the old mode, but it is not really used. */ if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { /* Feature version 2.21 introduced new vibration method. */ ds->use_vibration_v2 = ds->update_version >= DS_FEATURE_VERSION(2, 21); } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { ds->use_vibration_v2 = true; } ret = ps_devices_list_add(ps_dev); if (ret) return ERR_PTR(ret); ret = dualsense_get_calibration_data(ds); if (ret) { hid_err(hdev, "Failed to get calibration data from DualSense\n"); goto err; } ds->gamepad = ps_gamepad_create(hdev, dualsense_play_effect); if (IS_ERR(ds->gamepad)) { ret = PTR_ERR(ds->gamepad); goto err; } /* Use gamepad input device name as primary device name for e.g. LEDs */ ps_dev->input_dev_name = dev_name(&ds->gamepad->dev); ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G, DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S); if (IS_ERR(ds->sensors)) { ret = PTR_ERR(ds->sensors); goto err; } ds->touchpad = ps_touchpad_create(hdev, DS_TOUCHPAD_WIDTH, DS_TOUCHPAD_HEIGHT, 2); if (IS_ERR(ds->touchpad)) { ret = PTR_ERR(ds->touchpad); goto err; } ret = ps_device_register_battery(ps_dev); if (ret) goto err; /* * The hardware may have control over the LEDs (e.g. in Bluetooth on startup). * Reset the LEDs (lightbar, mute, player leds), so we can control them * from software. */ ret = dualsense_reset_leds(ds); if (ret) goto err; ret = ps_lightbar_register(ps_dev, &ds->lightbar, dualsense_lightbar_set_brightness); if (ret) goto err; /* Set default lightbar color. */ dualsense_set_lightbar(ds, 0, 0, 128); /* blue */ for (i = 0; i < ARRAY_SIZE(player_leds_info); i++) { const struct ps_led_info *led_info = &player_leds_info[i]; ret = ps_led_register(ps_dev, &ds->player_leds[i], led_info); if (ret < 0) goto err; } ret = ps_device_set_player_id(ps_dev); if (ret) { hid_err(hdev, "Failed to assign player id for DualSense: %d\n", ret); goto err; } /* Set player LEDs to our player id. */ dualsense_set_player_leds(ds); /* * Reporting hardware and firmware is important as there are frequent updates, which * can change behavior. */ hid_info(hdev, "Registered DualSense controller hw_version=0x%08x fw_version=0x%08x\n", ds->base.hw_version, ds->base.fw_version); return &ds->base; err: ps_devices_list_remove(ps_dev); return ERR_PTR(ret); } static void dualshock4_dongle_calibration_work(struct work_struct *work) { struct dualshock4 *ds4 = container_of(work, struct dualshock4, dongle_hotplug_worker); unsigned long flags; enum dualshock4_dongle_state dongle_state; int ret; ret = dualshock4_get_calibration_data(ds4); if (ret < 0) { /* This call is very unlikely to fail for the dongle. When it * fails we are probably in a very bad state, so mark the * dongle as disabled. We will re-enable the dongle if a new * DS4 hotplug is detect from sony_raw_event as any issues * are likely resolved then (the dongle is quite stupid). */ hid_err(ds4->base.hdev, "DualShock 4 USB dongle: calibration failed, disabling device\n"); dongle_state = DONGLE_DISABLED; } else { hid_info(ds4->base.hdev, "DualShock 4 USB dongle: calibration completed\n"); dongle_state = DONGLE_CONNECTED; } spin_lock_irqsave(&ds4->base.lock, flags); ds4->dongle_state = dongle_state; spin_unlock_irqrestore(&ds4->base.lock, flags); } static int dualshock4_get_calibration_data(struct dualshock4 *ds4) { struct hid_device *hdev = ds4->base.hdev; short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus; short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus; short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus; short gyro_speed_plus, gyro_speed_minus; short acc_x_plus, acc_x_minus; short acc_y_plus, acc_y_minus; short acc_z_plus, acc_z_minus; int speed_2x; int range_2g; int ret = 0; int i; uint8_t *buf; if (ds4->base.hdev->bus == BUS_USB) { int retries; buf = kzalloc(DS4_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto transfer_failed; } /* We should normally receive the feature report data we asked * for, but hidraw applications such as Steam can issue feature * reports as well. In particular for Dongle reconnects, Steam * and this function are competing resulting in often receiving * data for a different HID report, so retry a few times. */ for (retries = 0; retries < 3; retries++) { ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION, buf, DS4_FEATURE_REPORT_CALIBRATION_SIZE, true); if (ret) { if (retries < 2) { hid_warn(hdev, "Retrying DualShock 4 get calibration report (0x02) request\n"); continue; } hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret); ret = -EILSEQ; goto transfer_failed; } else { break; } } } else { /* Bluetooth */ buf = kzalloc(DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto transfer_failed; } ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION_BT, buf, DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, true); if (ret) { hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret); goto transfer_failed; } } /* Transfer succeeded - parse the calibration data received. */ gyro_pitch_bias = get_unaligned_le16(&buf[1]); gyro_yaw_bias = get_unaligned_le16(&buf[3]); gyro_roll_bias = get_unaligned_le16(&buf[5]); if (ds4->base.hdev->bus == BUS_USB) { gyro_pitch_plus = get_unaligned_le16(&buf[7]); gyro_pitch_minus = get_unaligned_le16(&buf[9]); gyro_yaw_plus = get_unaligned_le16(&buf[11]); gyro_yaw_minus = get_unaligned_le16(&buf[13]); gyro_roll_plus = get_unaligned_le16(&buf[15]); gyro_roll_minus = get_unaligned_le16(&buf[17]); } else { /* BT + Dongle */ gyro_pitch_plus = get_unaligned_le16(&buf[7]); gyro_yaw_plus = get_unaligned_le16(&buf[9]); gyro_roll_plus = get_unaligned_le16(&buf[11]); gyro_pitch_minus = get_unaligned_le16(&buf[13]); gyro_yaw_minus = get_unaligned_le16(&buf[15]); gyro_roll_minus = get_unaligned_le16(&buf[17]); } gyro_speed_plus = get_unaligned_le16(&buf[19]); gyro_speed_minus = get_unaligned_le16(&buf[21]); acc_x_plus = get_unaligned_le16(&buf[23]); acc_x_minus = get_unaligned_le16(&buf[25]); acc_y_plus = get_unaligned_le16(&buf[27]); acc_y_minus = get_unaligned_le16(&buf[29]); acc_z_plus = get_unaligned_le16(&buf[31]); acc_z_minus = get_unaligned_le16(&buf[33]); /* Done parsing the buffer, so let's free it. */ kfree(buf); /* * Set gyroscope calibration and normalization parameters. * Data values will be normalized to 1/DS4_GYRO_RES_PER_DEG_S degree/s. */ speed_2x = (gyro_speed_plus + gyro_speed_minus); ds4->gyro_calib_data[0].abs_code = ABS_RX; ds4->gyro_calib_data[0].bias = 0; ds4->gyro_calib_data[0].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S; ds4->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) + abs(gyro_pitch_minus - gyro_pitch_bias); ds4->gyro_calib_data[1].abs_code = ABS_RY; ds4->gyro_calib_data[1].bias = 0; ds4->gyro_calib_data[1].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S; ds4->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) + abs(gyro_yaw_minus - gyro_yaw_bias); ds4->gyro_calib_data[2].abs_code = ABS_RZ; ds4->gyro_calib_data[2].bias = 0; ds4->gyro_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S; ds4->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) + abs(gyro_roll_minus - gyro_roll_bias); /* * Set accelerometer calibration and normalization parameters. * Data values will be normalized to 1/DS4_ACC_RES_PER_G g. */ range_2g = acc_x_plus - acc_x_minus; ds4->accel_calib_data[0].abs_code = ABS_X; ds4->accel_calib_data[0].bias = acc_x_plus - range_2g / 2; ds4->accel_calib_data[0].sens_numer = 2*DS4_ACC_RES_PER_G; ds4->accel_calib_data[0].sens_denom = range_2g; range_2g = acc_y_plus - acc_y_minus; ds4->accel_calib_data[1].abs_code = ABS_Y; ds4->accel_calib_data[1].bias = acc_y_plus - range_2g / 2; ds4->accel_calib_data[1].sens_numer = 2*DS4_ACC_RES_PER_G; ds4->accel_calib_data[1].sens_denom = range_2g; range_2g = acc_z_plus - acc_z_minus; ds4->accel_calib_data[2].abs_code = ABS_Z; ds4->accel_calib_data[2].bias = acc_z_plus - range_2g / 2; ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G; ds4->accel_calib_data[2].sens_denom = range_2g; transfer_failed: /* * Sanity check gyro calibration data. This is needed to prevent crashes * during report handling of virtual, clone or broken devices not implementing * calibration data properly. */ for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) { if (ds4->gyro_calib_data[i].sens_denom == 0) { ds4->gyro_calib_data[i].abs_code = ABS_RX + i; hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.", ds4->gyro_calib_data[i].abs_code); ds4->gyro_calib_data[i].bias = 0; ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE; ds4->gyro_calib_data[i].sens_denom = S16_MAX; } } /* * Sanity check accelerometer calibration data. This is needed to prevent crashes * during report handling of virtual, clone or broken devices not implementing calibration * data properly. */ for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) { if (ds4->accel_calib_data[i].sens_denom == 0) { ds4->accel_calib_data[i].abs_code = ABS_X + i; hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.", ds4->accel_calib_data[i].abs_code); ds4->accel_calib_data[i].bias = 0; ds4->accel_calib_data[i].sens_numer = DS4_ACC_RANGE; ds4->accel_calib_data[i].sens_denom = S16_MAX; } } return ret; } static int dualshock4_get_firmware_info(struct dualshock4 *ds4) { uint8_t *buf; int ret; buf = kzalloc(DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* Note USB and BT support the same feature report, but this report * lacks CRC support, so must be disabled in ps_get_report. */ ret = ps_get_report(ds4->base.hdev, DS4_FEATURE_REPORT_FIRMWARE_INFO, buf, DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, false); if (ret) { hid_err(ds4->base.hdev, "Failed to retrieve DualShock4 firmware info: %d\n", ret); goto err_free; } ds4->base.hw_version = get_unaligned_le16(&buf[35]); ds4->base.fw_version = get_unaligned_le16(&buf[41]); err_free: kfree(buf); return ret; } static int dualshock4_get_mac_address(struct dualshock4 *ds4) { struct hid_device *hdev = ds4->base.hdev; uint8_t *buf; int ret = 0; if (hdev->bus == BUS_USB) { buf = kzalloc(DS4_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; ret = ps_get_report(hdev, DS4_FEATURE_REPORT_PAIRING_INFO, buf, DS4_FEATURE_REPORT_PAIRING_INFO_SIZE, false); if (ret) { hid_err(hdev, "Failed to retrieve DualShock4 pairing info: %d\n", ret); goto err_free; } memcpy(ds4->base.mac_address, &buf[1], sizeof(ds4->base.mac_address)); } else { /* Rely on HIDP for Bluetooth */ if (strlen(hdev->uniq) != 17) return -EINVAL; ret = sscanf(hdev->uniq, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &ds4->base.mac_address[5], &ds4->base.mac_address[4], &ds4->base.mac_address[3], &ds4->base.mac_address[2], &ds4->base.mac_address[1], &ds4->base.mac_address[0]); if (ret != sizeof(ds4->base.mac_address)) return -EINVAL; return 0; } err_free: kfree(buf); return ret; } static enum led_brightness dualshock4_led_get_brightness(struct led_classdev *led) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct dualshock4 *ds4 = hid_get_drvdata(hdev); unsigned int led_index; led_index = led - ds4->lightbar_leds; switch (led_index) { case 0: return ds4->lightbar_red; case 1: return ds4->lightbar_green; case 2: return ds4->lightbar_blue; case 3: return ds4->lightbar_enabled; } return -1; } static int dualshock4_led_set_blink(struct led_classdev *led, unsigned long *delay_on, unsigned long *delay_off) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct dualshock4 *ds4 = hid_get_drvdata(hdev); unsigned long flags; spin_lock_irqsave(&ds4->base.lock, flags); if (!*delay_on && !*delay_off) { /* Default to 1 Hz (50 centiseconds on, 50 centiseconds off). */ ds4->lightbar_blink_on = 50; ds4->lightbar_blink_off = 50; } else { /* Blink delays in centiseconds. */ ds4->lightbar_blink_on = min_t(unsigned long, *delay_on/10, DS4_LIGHTBAR_MAX_BLINK); ds4->lightbar_blink_off = min_t(unsigned long, *delay_off/10, DS4_LIGHTBAR_MAX_BLINK); } ds4->update_lightbar_blink = true; spin_unlock_irqrestore(&ds4->base.lock, flags); dualshock4_schedule_work(ds4); /* Report scaled values back to LED subsystem */ *delay_on = ds4->lightbar_blink_on * 10; *delay_off = ds4->lightbar_blink_off * 10; return 0; } static int dualshock4_led_set_brightness(struct led_classdev *led, enum led_brightness value) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct dualshock4 *ds4 = hid_get_drvdata(hdev); unsigned long flags; unsigned int led_index; spin_lock_irqsave(&ds4->base.lock, flags); led_index = led - ds4->lightbar_leds; switch (led_index) { case 0: ds4->lightbar_red = value; break; case 1: ds4->lightbar_green = value; break; case 2: ds4->lightbar_blue = value; break; case 3: ds4->lightbar_enabled = !!value; /* brightness = 0 also cancels blinking in Linux. */ if (!ds4->lightbar_enabled) { ds4->lightbar_blink_off = 0; ds4->lightbar_blink_on = 0; ds4->update_lightbar_blink = true; } } ds4->update_lightbar = true; spin_unlock_irqrestore(&ds4->base.lock, flags); dualshock4_schedule_work(ds4); return 0; } static void dualshock4_init_output_report(struct dualshock4 *ds4, struct dualshock4_output_report *rp, void *buf) { struct hid_device *hdev = ds4->base.hdev; if (hdev->bus == BUS_BLUETOOTH) { struct dualshock4_output_report_bt *bt = buf; memset(bt, 0, sizeof(*bt)); bt->report_id = DS4_OUTPUT_REPORT_BT; rp->data = buf; rp->len = sizeof(*bt); rp->bt = bt; rp->usb = NULL; rp->common = &bt->common; } else { /* USB */ struct dualshock4_output_report_usb *usb = buf; memset(usb, 0, sizeof(*usb)); usb->report_id = DS4_OUTPUT_REPORT_USB; rp->data = buf; rp->len = sizeof(*usb); rp->bt = NULL; rp->usb = usb; rp->common = &usb->common; } } static void dualshock4_output_worker(struct work_struct *work) { struct dualshock4 *ds4 = container_of(work, struct dualshock4, output_worker); struct dualshock4_output_report report; struct dualshock4_output_report_common *common; unsigned long flags; dualshock4_init_output_report(ds4, &report, ds4->output_report_dmabuf); common = report.common; spin_lock_irqsave(&ds4->base.lock, flags); /* * Some 3rd party gamepads expect updates to rumble and lightbar * together, and setting one may cancel the other. * * Let's maximise compatibility by always sending rumble and lightbar * updates together, even when only one has been scheduled, resulting * in: * * ds4->valid_flag0 >= 0x03 * * Hopefully this will maximise compatibility with third-party pads. * * Any further update bits, such as 0x04 for lightbar blinking, will * be or'd on top of this like before. */ if (ds4->update_rumble || ds4->update_lightbar) { ds4->update_rumble = true; /* 0x01 */ ds4->update_lightbar = true; /* 0x02 */ } if (ds4->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR; common->motor_left = ds4->motor_left; common->motor_right = ds4->motor_right; ds4->update_rumble = false; } if (ds4->update_lightbar) { common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED; /* Comptabile behavior with hid-sony, which used a dummy global LED to * allow enabling/disabling the lightbar. The global LED maps to * lightbar_enabled. */ common->lightbar_red = ds4->lightbar_enabled ? ds4->lightbar_red : 0; common->lightbar_green = ds4->lightbar_enabled ? ds4->lightbar_green : 0; common->lightbar_blue = ds4->lightbar_enabled ? ds4->lightbar_blue : 0; ds4->update_lightbar = false; } if (ds4->update_lightbar_blink) { common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED_BLINK; common->lightbar_blink_on = ds4->lightbar_blink_on; common->lightbar_blink_off = ds4->lightbar_blink_off; ds4->update_lightbar_blink = false; } spin_unlock_irqrestore(&ds4->base.lock, flags); /* Bluetooth packets need additional flags as well as a CRC in the last 4 bytes. */ if (report.bt) { uint32_t crc; uint8_t seed = PS_OUTPUT_CRC32_SEED; /* Hardware control flags need to set to let the device know * there is HID data as well as CRC. */ report.bt->hw_control = DS4_OUTPUT_HWCTL_HID | DS4_OUTPUT_HWCTL_CRC32; if (ds4->update_bt_poll_interval) { report.bt->hw_control |= ds4->bt_poll_interval; ds4->update_bt_poll_interval = false; } crc = crc32_le(0xFFFFFFFF, &seed, 1); crc = ~crc32_le(crc, report.data, report.len - 4); report.bt->crc32 = cpu_to_le32(crc); } hid_hw_output_report(ds4->base.hdev, report.data, report.len); } static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *report, u8 *data, int size) { struct hid_device *hdev = ps_dev->hdev; struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base); struct dualshock4_input_report_common *ds4_report; struct dualshock4_touch_report *touch_reports; uint8_t battery_capacity, num_touch_reports, value; int battery_status, i, j; uint16_t sensor_timestamp; unsigned long flags; bool is_minimal = false; /* * DualShock4 in USB uses the full HID report for reportID 1, but * Bluetooth uses a minimal HID report for reportID 1 and reports * the full report using reportID 17. */ if (hdev->bus == BUS_USB && report->id == DS4_INPUT_REPORT_USB && size == DS4_INPUT_REPORT_USB_SIZE) { struct dualshock4_input_report_usb *usb = (struct dualshock4_input_report_usb *)data; ds4_report = &usb->common; num_touch_reports = usb->num_touch_reports; touch_reports = usb->touch_reports; } else if (hdev->bus == BUS_BLUETOOTH && report->id == DS4_INPUT_REPORT_BT && size == DS4_INPUT_REPORT_BT_SIZE) { struct dualshock4_input_report_bt *bt = (struct dualshock4_input_report_bt *)data; uint32_t report_crc = get_unaligned_le32(&bt->crc32); /* Last 4 bytes of input report contains CRC. */ if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) { hid_err(hdev, "DualShock4 input CRC's check failed\n"); return -EILSEQ; } ds4_report = &bt->common; num_touch_reports = bt->num_touch_reports; touch_reports = bt->touch_reports; } else if (hdev->bus == BUS_BLUETOOTH && report->id == DS4_INPUT_REPORT_BT_MINIMAL && size == DS4_INPUT_REPORT_BT_MINIMAL_SIZE) { /* Some third-party pads never switch to the full 0x11 report. * The short 0x01 report is 10 bytes long: * u8 report_id == 0x01 * u8 first_bytes_of_full_report[9] * So let's reuse the full report parser, and stop it after * parsing the buttons. */ ds4_report = (struct dualshock4_input_report_common *)&data[1]; is_minimal = true; } else { hid_err(hdev, "Unhandled reportID=%d\n", report->id); return -1; } input_report_abs(ds4->gamepad, ABS_X, ds4_report->x); input_report_abs(ds4->gamepad, ABS_Y, ds4_report->y); input_report_abs(ds4->gamepad, ABS_RX, ds4_report->rx); input_report_abs(ds4->gamepad, ABS_RY, ds4_report->ry); input_report_abs(ds4->gamepad, ABS_Z, ds4_report->z); input_report_abs(ds4->gamepad, ABS_RZ, ds4_report->rz); value = ds4_report->buttons[0] & DS_BUTTONS0_HAT_SWITCH; if (value >= ARRAY_SIZE(ps_gamepad_hat_mapping)) value = 8; /* center */ input_report_abs(ds4->gamepad, ABS_HAT0X, ps_gamepad_hat_mapping[value].x); input_report_abs(ds4->gamepad, ABS_HAT0Y, ps_gamepad_hat_mapping[value].y); input_report_key(ds4->gamepad, BTN_WEST, ds4_report->buttons[0] & DS_BUTTONS0_SQUARE); input_report_key(ds4->gamepad, BTN_SOUTH, ds4_report->buttons[0] & DS_BUTTONS0_CROSS); input_report_key(ds4->gamepad, BTN_EAST, ds4_report->buttons[0] & DS_BUTTONS0_CIRCLE); input_report_key(ds4->gamepad, BTN_NORTH, ds4_report->buttons[0] & DS_BUTTONS0_TRIANGLE); input_report_key(ds4->gamepad, BTN_TL, ds4_report->buttons[1] & DS_BUTTONS1_L1); input_report_key(ds4->gamepad, BTN_TR, ds4_report->buttons[1] & DS_BUTTONS1_R1); input_report_key(ds4->gamepad, BTN_TL2, ds4_report->buttons[1] & DS_BUTTONS1_L2); input_report_key(ds4->gamepad, BTN_TR2, ds4_report->buttons[1] & DS_BUTTONS1_R2); input_report_key(ds4->gamepad, BTN_SELECT, ds4_report->buttons[1] & DS_BUTTONS1_CREATE); input_report_key(ds4->gamepad, BTN_START, ds4_report->buttons[1] & DS_BUTTONS1_OPTIONS); input_report_key(ds4->gamepad, BTN_THUMBL, ds4_report->buttons[1] & DS_BUTTONS1_L3); input_report_key(ds4->gamepad, BTN_THUMBR, ds4_report->buttons[1] & DS_BUTTONS1_R3); input_report_key(ds4->gamepad, BTN_MODE, ds4_report->buttons[2] & DS_BUTTONS2_PS_HOME); input_sync(ds4->gamepad); if (is_minimal) return 0; /* Parse and calibrate gyroscope data. */ for (i = 0; i < ARRAY_SIZE(ds4_report->gyro); i++) { int raw_data = (short)le16_to_cpu(ds4_report->gyro[i]); int calib_data = mult_frac(ds4->gyro_calib_data[i].sens_numer, raw_data, ds4->gyro_calib_data[i].sens_denom); input_report_abs(ds4->sensors, ds4->gyro_calib_data[i].abs_code, calib_data); } /* Parse and calibrate accelerometer data. */ for (i = 0; i < ARRAY_SIZE(ds4_report->accel); i++) { int raw_data = (short)le16_to_cpu(ds4_report->accel[i]); int calib_data = mult_frac(ds4->accel_calib_data[i].sens_numer, raw_data - ds4->accel_calib_data[i].bias, ds4->accel_calib_data[i].sens_denom); input_report_abs(ds4->sensors, ds4->accel_calib_data[i].abs_code, calib_data); } /* Convert timestamp (in 5.33us unit) to timestamp_us */ sensor_timestamp = le16_to_cpu(ds4_report->sensor_timestamp); if (!ds4->sensor_timestamp_initialized) { ds4->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp*16, 3); ds4->sensor_timestamp_initialized = true; } else { uint16_t delta; if (ds4->prev_sensor_timestamp > sensor_timestamp) delta = (U16_MAX - ds4->prev_sensor_timestamp + sensor_timestamp + 1); else delta = sensor_timestamp - ds4->prev_sensor_timestamp; ds4->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta*16, 3); } ds4->prev_sensor_timestamp = sensor_timestamp; input_event(ds4->sensors, EV_MSC, MSC_TIMESTAMP, ds4->sensor_timestamp_us); input_sync(ds4->sensors); for (i = 0; i < num_touch_reports; i++) { struct dualshock4_touch_report *touch_report = &touch_reports[i]; for (j = 0; j < ARRAY_SIZE(touch_report->points); j++) { struct dualshock4_touch_point *point = &touch_report->points[j]; bool active = (point->contact & DS4_TOUCH_POINT_INACTIVE) ? false : true; input_mt_slot(ds4->touchpad, j); input_mt_report_slot_state(ds4->touchpad, MT_TOOL_FINGER, active); if (active) { int x = (point->x_hi << 8) | point->x_lo; int y = (point->y_hi << 4) | point->y_lo; input_report_abs(ds4->touchpad, ABS_MT_POSITION_X, x); input_report_abs(ds4->touchpad, ABS_MT_POSITION_Y, y); } } input_mt_sync_frame(ds4->touchpad); input_sync(ds4->touchpad); } input_report_key(ds4->touchpad, BTN_LEFT, ds4_report->buttons[2] & DS_BUTTONS2_TOUCHPAD); /* * Interpretation of the battery_capacity data depends on the cable state. * When no cable is connected (bit4 is 0): * - 0:10: percentage in units of 10%. * When a cable is plugged in: * - 0-10: percentage in units of 10%. * - 11: battery is full * - 14: not charging due to Voltage or temperature error * - 15: charge error */ if (ds4_report->status[0] & DS4_STATUS0_CABLE_STATE) { uint8_t battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY; if (battery_data < 10) { /* Take the mid-point for each battery capacity value, * because on the hardware side 0 = 0-9%, 1=10-19%, etc. * This matches official platform behavior, which does * the same. */ battery_capacity = battery_data * 10 + 5; battery_status = POWER_SUPPLY_STATUS_CHARGING; } else if (battery_data == 10) { battery_capacity = 100; battery_status = POWER_SUPPLY_STATUS_CHARGING; } else if (battery_data == DS4_BATTERY_STATUS_FULL) { battery_capacity = 100; battery_status = POWER_SUPPLY_STATUS_FULL; } else { /* 14, 15 and undefined values */ battery_capacity = 0; battery_status = POWER_SUPPLY_STATUS_UNKNOWN; } } else { uint8_t battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY; if (battery_data < 10) battery_capacity = battery_data * 10 + 5; else /* 10 */ battery_capacity = 100; battery_status = POWER_SUPPLY_STATUS_DISCHARGING; } spin_lock_irqsave(&ps_dev->lock, flags); ps_dev->battery_capacity = battery_capacity; ps_dev->battery_status = battery_status; spin_unlock_irqrestore(&ps_dev->lock, flags); return 0; } static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_report *report, u8 *data, int size) { struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base); bool connected = false; /* The dongle reports data using the main USB report (0x1) no matter whether a controller * is connected with mostly zeros. The report does contain dongle status, which we use to * determine if a controller is connected and if so we forward to the regular DualShock4 * parsing code. */ if (data[0] == DS4_INPUT_REPORT_USB && size == DS4_INPUT_REPORT_USB_SIZE) { struct dualshock4_input_report_common *ds4_report = (struct dualshock4_input_report_common *)&data[1]; unsigned long flags; connected = ds4_report->status[1] & DS4_STATUS1_DONGLE_STATE ? false : true; if (ds4->dongle_state == DONGLE_DISCONNECTED && connected) { hid_info(ps_dev->hdev, "DualShock 4 USB dongle: controller connected\n"); dualshock4_set_default_lightbar_colors(ds4); spin_lock_irqsave(&ps_dev->lock, flags); ds4->dongle_state = DONGLE_CALIBRATING; spin_unlock_irqrestore(&ps_dev->lock, flags); schedule_work(&ds4->dongle_hotplug_worker); /* Don't process the report since we don't have * calibration data, but let hidraw have it anyway. */ return 0; } else if ((ds4->dongle_state == DONGLE_CONNECTED || ds4->dongle_state == DONGLE_DISABLED) && !connected) { hid_info(ps_dev->hdev, "DualShock 4 USB dongle: controller disconnected\n"); spin_lock_irqsave(&ps_dev->lock, flags); ds4->dongle_state = DONGLE_DISCONNECTED; spin_unlock_irqrestore(&ps_dev->lock, flags); /* Return 0, so hidraw can get the report. */ return 0; } else if (ds4->dongle_state == DONGLE_CALIBRATING || ds4->dongle_state == DONGLE_DISABLED || ds4->dongle_state == DONGLE_DISCONNECTED) { /* Return 0, so hidraw can get the report. */ return 0; } } if (connected) return dualshock4_parse_report(ps_dev, report, data, size); return 0; } static int dualshock4_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hdev = input_get_drvdata(dev); struct dualshock4 *ds4 = hid_get_drvdata(hdev); unsigned long flags; if (effect->type != FF_RUMBLE) return 0; spin_lock_irqsave(&ds4->base.lock, flags); ds4->update_rumble = true; ds4->motor_left = effect->u.rumble.strong_magnitude / 256; ds4->motor_right = effect->u.rumble.weak_magnitude / 256; spin_unlock_irqrestore(&ds4->base.lock, flags); dualshock4_schedule_work(ds4); return 0; } static void dualshock4_remove(struct ps_device *ps_dev) { struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base); unsigned long flags; spin_lock_irqsave(&ds4->base.lock, flags); ds4->output_worker_initialized = false; spin_unlock_irqrestore(&ds4->base.lock, flags); cancel_work_sync(&ds4->output_worker); if (ps_dev->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) cancel_work_sync(&ds4->dongle_hotplug_worker); } static inline void dualshock4_schedule_work(struct dualshock4 *ds4) { unsigned long flags; spin_lock_irqsave(&ds4->base.lock, flags); if (ds4->output_worker_initialized) schedule_work(&ds4->output_worker); spin_unlock_irqrestore(&ds4->base.lock, flags); } static void dualshock4_set_bt_poll_interval(struct dualshock4 *ds4, uint8_t interval) { ds4->bt_poll_interval = interval; ds4->update_bt_poll_interval = true; dualshock4_schedule_work(ds4); } /* Set default lightbar color based on player. */ static void dualshock4_set_default_lightbar_colors(struct dualshock4 *ds4) { /* Use same player colors as PlayStation 4. * Array of colors is in RGB. */ static const int player_colors[4][3] = { { 0x00, 0x00, 0x40 }, /* Blue */ { 0x40, 0x00, 0x00 }, /* Red */ { 0x00, 0x40, 0x00 }, /* Green */ { 0x20, 0x00, 0x20 } /* Pink */ }; uint8_t player_id = ds4->base.player_id % ARRAY_SIZE(player_colors); ds4->lightbar_enabled = true; ds4->lightbar_red = player_colors[player_id][0]; ds4->lightbar_green = player_colors[player_id][1]; ds4->lightbar_blue = player_colors[player_id][2]; ds4->update_lightbar = true; dualshock4_schedule_work(ds4); } static struct ps_device *dualshock4_create(struct hid_device *hdev) { struct dualshock4 *ds4; struct ps_device *ps_dev; uint8_t max_output_report_size; int i, ret; /* The DualShock4 has an RGB lightbar, which the original hid-sony driver * exposed as a set of 4 LEDs for the 3 color channels and a global control. * Ideally this should have used the multi-color LED class, which didn't exist * yet. In addition the driver used a naming scheme not compliant with the LED * naming spec by using "<mac_address>:<color>", which contained many colons. * We use a more compliant by using "<device_name>:<color>" name now. Ideally * would have been "<device_name>:<color>:indicator", but that would break * existing applications (e.g. Android). Nothing matches against MAC address. */ static const struct ps_led_info lightbar_leds_info[] = { { NULL, "red", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness }, { NULL, "green", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness }, { NULL, "blue", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness }, { NULL, "global", 1, dualshock4_led_get_brightness, dualshock4_led_set_brightness, dualshock4_led_set_blink }, }; ds4 = devm_kzalloc(&hdev->dev, sizeof(*ds4), GFP_KERNEL); if (!ds4) return ERR_PTR(-ENOMEM); /* * Patch version to allow userspace to distinguish between * hid-generic vs hid-playstation axis and button mapping. */ hdev->version |= HID_PLAYSTATION_VERSION_PATCH; ps_dev = &ds4->base; ps_dev->hdev = hdev; spin_lock_init(&ps_dev->lock); ps_dev->battery_capacity = 100; /* initial value until parse_report. */ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; ps_dev->parse_report = dualshock4_parse_report; ps_dev->remove = dualshock4_remove; INIT_WORK(&ds4->output_worker, dualshock4_output_worker); ds4->output_worker_initialized = true; hid_set_drvdata(hdev, ds4); max_output_report_size = sizeof(struct dualshock4_output_report_bt); ds4->output_report_dmabuf = devm_kzalloc(&hdev->dev, max_output_report_size, GFP_KERNEL); if (!ds4->output_report_dmabuf) return ERR_PTR(-ENOMEM); if (hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) { ds4->dongle_state = DONGLE_DISCONNECTED; INIT_WORK(&ds4->dongle_hotplug_worker, dualshock4_dongle_calibration_work); /* Override parse report for dongle specific hotplug handling. */ ps_dev->parse_report = dualshock4_dongle_parse_report; } ret = dualshock4_get_mac_address(ds4); if (ret) { hid_err(hdev, "Failed to get MAC address from DualShock4\n"); return ERR_PTR(ret); } snprintf(hdev->uniq, sizeof(hdev->uniq), "%pMR", ds4->base.mac_address); ret = dualshock4_get_firmware_info(ds4); if (ret) { hid_warn(hdev, "Failed to get firmware info from DualShock4\n"); hid_warn(hdev, "HW/FW version data in sysfs will be invalid.\n"); } ret = ps_devices_list_add(ps_dev); if (ret) return ERR_PTR(ret); ret = dualshock4_get_calibration_data(ds4); if (ret) { hid_warn(hdev, "Failed to get calibration data from DualShock4\n"); hid_warn(hdev, "Gyroscope and accelerometer will be inaccurate.\n"); } ds4->gamepad = ps_gamepad_create(hdev, dualshock4_play_effect); if (IS_ERR(ds4->gamepad)) { ret = PTR_ERR(ds4->gamepad); goto err; } /* Use gamepad input device name as primary device name for e.g. LEDs */ ps_dev->input_dev_name = dev_name(&ds4->gamepad->dev); ds4->sensors = ps_sensors_create(hdev, DS4_ACC_RANGE, DS4_ACC_RES_PER_G, DS4_GYRO_RANGE, DS4_GYRO_RES_PER_DEG_S); if (IS_ERR(ds4->sensors)) { ret = PTR_ERR(ds4->sensors); goto err; } ds4->touchpad = ps_touchpad_create(hdev, DS4_TOUCHPAD_WIDTH, DS4_TOUCHPAD_HEIGHT, 2); if (IS_ERR(ds4->touchpad)) { ret = PTR_ERR(ds4->touchpad); goto err; } ret = ps_device_register_battery(ps_dev); if (ret) goto err; for (i = 0; i < ARRAY_SIZE(lightbar_leds_info); i++) { const struct ps_led_info *led_info = &lightbar_leds_info[i]; ret = ps_led_register(ps_dev, &ds4->lightbar_leds[i], led_info); if (ret < 0) goto err; } dualshock4_set_bt_poll_interval(ds4, DS4_BT_DEFAULT_POLL_INTERVAL_MS); ret = ps_device_set_player_id(ps_dev); if (ret) { hid_err(hdev, "Failed to assign player id for DualShock4: %d\n", ret); goto err; } dualshock4_set_default_lightbar_colors(ds4); /* * Reporting hardware and firmware is important as there are frequent updates, which * can change behavior. */ hid_info(hdev, "Registered DualShock4 controller hw_version=0x%08x fw_version=0x%08x\n", ds4->base.hw_version, ds4->base.fw_version); return &ds4->base; err: ps_devices_list_remove(ps_dev); return ERR_PTR(ret); } static int ps_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct ps_device *dev = hid_get_drvdata(hdev); if (dev && dev->parse_report) return dev->parse_report(dev, report, data, size); return 0; } static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct ps_device *dev; int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "Parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "Failed to start HID device\n"); return ret; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "Failed to open HID device\n"); goto err_stop; } if (id->driver_data == PS_TYPE_PS4_DUALSHOCK4) { dev = dualshock4_create(hdev); if (IS_ERR(dev)) { hid_err(hdev, "Failed to create dualshock4.\n"); ret = PTR_ERR(dev); goto err_close; } } else if (id->driver_data == PS_TYPE_PS5_DUALSENSE) { dev = dualsense_create(hdev); if (IS_ERR(dev)) { hid_err(hdev, "Failed to create dualsense.\n"); ret = PTR_ERR(dev); goto err_close; } } return ret; err_close: hid_hw_close(hdev); err_stop: hid_hw_stop(hdev); return ret; } static void ps_remove(struct hid_device *hdev) { struct ps_device *dev = hid_get_drvdata(hdev); ps_devices_list_remove(dev); ps_device_release_player_id(dev); if (dev->remove) dev->remove(dev); hid_hw_close(hdev); hid_hw_stop(hdev); } static const struct hid_device_id ps_devices[] = { /* Sony DualShock 4 controllers for PS4 */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), .driver_data = PS_TYPE_PS4_DUALSHOCK4 }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), .driver_data = PS_TYPE_PS4_DUALSHOCK4 }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2), .driver_data = PS_TYPE_PS4_DUALSHOCK4 }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2), .driver_data = PS_TYPE_PS4_DUALSHOCK4 }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE), .driver_data = PS_TYPE_PS4_DUALSHOCK4 }, /* Sony DualSense controllers for PS5 */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER), .driver_data = PS_TYPE_PS5_DUALSENSE }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER), .driver_data = PS_TYPE_PS5_DUALSENSE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2), .driver_data = PS_TYPE_PS5_DUALSENSE }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2), .driver_data = PS_TYPE_PS5_DUALSENSE }, { } }; MODULE_DEVICE_TABLE(hid, ps_devices); static struct hid_driver ps_driver = { .name = "playstation", .id_table = ps_devices, .probe = ps_probe, .remove = ps_remove, .raw_event = ps_raw_event, .driver = { .dev_groups = ps_device_groups, }, }; static int __init ps_init(void) { return hid_register_driver(&ps_driver); } static void __exit ps_exit(void) { hid_unregister_driver(&ps_driver); ida_destroy(&ps_player_id_allocator); } module_init(ps_init); module_exit(ps_exit); MODULE_AUTHOR("Sony Interactive Entertainment"); MODULE_DESCRIPTION("HID Driver for PlayStation peripherals."); MODULE_LICENSE("GPL"); |
2 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 | // SPDX-License-Identifier: GPL-2.0-only #include "netlink.h" #include "common.h" #include "bitset.h" struct eee_req_info { struct ethnl_req_info base; }; struct eee_reply_data { struct ethnl_reply_data base; struct ethtool_keee eee; }; #define EEE_REPDATA(__reply_base) \ container_of(__reply_base, struct eee_reply_data, base) const struct nla_policy ethnl_eee_get_policy[] = { [ETHTOOL_A_EEE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; static int eee_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct eee_reply_data *data = EEE_REPDATA(reply_base); struct net_device *dev = reply_base->dev; struct ethtool_keee *eee = &data->eee; int ret; if (!dev->ethtool_ops->get_eee) return -EOPNOTSUPP; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; ret = dev->ethtool_ops->get_eee(dev, eee); ethnl_ops_complete(dev); return ret; } static int eee_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct eee_reply_data *data = EEE_REPDATA(reply_base); const struct ethtool_keee *eee = &data->eee; int len = 0; int ret; /* MODES_OURS */ ret = ethnl_bitset_size(eee->advertised, eee->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; len += ret; /* MODES_PEERS */ ret = ethnl_bitset_size(eee->lp_advertised, NULL, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; len += ret; len += nla_total_size(sizeof(u8)) + /* _EEE_ACTIVE */ nla_total_size(sizeof(u8)) + /* _EEE_ENABLED */ nla_total_size(sizeof(u8)) + /* _EEE_TX_LPI_ENABLED */ nla_total_size(sizeof(u32)); /* _EEE_TX_LPI_TIMER */ return len; } static int eee_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct eee_reply_data *data = EEE_REPDATA(reply_base); const struct ethtool_keee *eee = &data->eee; int ret; ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_OURS, eee->advertised, eee->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; ret = ethnl_put_bitset(skb, ETHTOOL_A_EEE_MODES_PEER, eee->lp_advertised, NULL, __ETHTOOL_LINK_MODE_MASK_NBITS, link_mode_names, compact); if (ret < 0) return ret; if (nla_put_u8(skb, ETHTOOL_A_EEE_ACTIVE, eee->eee_active) || nla_put_u8(skb, ETHTOOL_A_EEE_ENABLED, eee->eee_enabled) || nla_put_u8(skb, ETHTOOL_A_EEE_TX_LPI_ENABLED, eee->tx_lpi_enabled) || nla_put_u32(skb, ETHTOOL_A_EEE_TX_LPI_TIMER, eee->tx_lpi_timer)) return -EMSGSIZE; return 0; } /* EEE_SET */ const struct nla_policy ethnl_eee_set_policy[] = { [ETHTOOL_A_EEE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_NESTED }, [ETHTOOL_A_EEE_ENABLED] = { .type = NLA_U8 }, [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_U8 }, [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_U32 }, }; static int ethnl_set_eee_validate(struct ethnl_req_info *req_info, struct genl_info *info) { const struct ethtool_ops *ops = req_info->dev->ethtool_ops; return ops->get_eee && ops->set_eee ? 1 : -EOPNOTSUPP; } static int ethnl_set_eee(struct ethnl_req_info *req_info, struct genl_info *info) { struct net_device *dev = req_info->dev; struct nlattr **tb = info->attrs; struct ethtool_keee eee = {}; bool mod = false; int ret; ret = dev->ethtool_ops->get_eee(dev, &eee); if (ret < 0) return ret; ret = ethnl_update_bitset(eee.advertised, __ETHTOOL_LINK_MODE_MASK_NBITS, tb[ETHTOOL_A_EEE_MODES_OURS], link_mode_names, info->extack, &mod); if (ret < 0) return ret; ethnl_update_bool(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod); ethnl_update_bool(&eee.tx_lpi_enabled, tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod); ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER], &mod); if (!mod) return 0; ret = dev->ethtool_ops->set_eee(dev, &eee); return ret < 0 ? ret : 1; } const struct ethnl_request_ops ethnl_eee_request_ops = { .request_cmd = ETHTOOL_MSG_EEE_GET, .reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY, .hdr_attr = ETHTOOL_A_EEE_HEADER, .req_info_size = sizeof(struct eee_req_info), .reply_data_size = sizeof(struct eee_reply_data), .prepare_data = eee_prepare_data, .reply_size = eee_reply_size, .fill_reply = eee_fill_reply, .set_validate = ethnl_set_eee_validate, .set = ethnl_set_eee, .set_ntf_cmd = ETHTOOL_MSG_EEE_NTF, }; |
17 17 17 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2017 Intel Corporation. All rights reserved. */ #include <linux/pagemap.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/magic.h> #include <linux/pfn_t.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/uio.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/cacheinfo.h> #include "dax-private.h" /** * struct dax_device - anchor object for dax services * @inode: core vfs * @cdev: optional character interface for "device dax" * @private: dax driver private data * @flags: state and boolean properties * @ops: operations for this device * @holder_data: holder of a dax_device: could be filesystem or mapped device * @holder_ops: operations for the inner holder */ struct dax_device { struct inode inode; struct cdev cdev; void *private; unsigned long flags; const struct dax_operations *ops; void *holder_data; const struct dax_holder_operations *holder_ops; }; static dev_t dax_devt; DEFINE_STATIC_SRCU(dax_srcu); static struct vfsmount *dax_mnt; static DEFINE_IDA(dax_minor_ida); static struct kmem_cache *dax_cache __read_mostly; static struct super_block *dax_superblock __read_mostly; int dax_read_lock(void) { return srcu_read_lock(&dax_srcu); } EXPORT_SYMBOL_GPL(dax_read_lock); void dax_read_unlock(int id) { srcu_read_unlock(&dax_srcu, id); } EXPORT_SYMBOL_GPL(dax_read_unlock); #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) #include <linux/blkdev.h> static DEFINE_XARRAY(dax_hosts); int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) { return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL); } EXPORT_SYMBOL_GPL(dax_add_host); void dax_remove_host(struct gendisk *disk) { xa_erase(&dax_hosts, (unsigned long)disk); } EXPORT_SYMBOL_GPL(dax_remove_host); /** * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax * @bdev: block device to find a dax_device for * @start_off: returns the byte offset into the dax_device that @bdev starts * @holder: filesystem or mapped device inside the dax_device * @ops: operations for the inner holder */ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, void *holder, const struct dax_holder_operations *ops) { struct dax_device *dax_dev; u64 part_size; int id; if (!blk_queue_dax(bdev->bd_disk->queue)) return NULL; *start_off = get_start_sect(bdev) * SECTOR_SIZE; part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE; if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) { pr_info("%pg: error: unaligned partition for dax\n", bdev); return NULL; } id = dax_read_lock(); dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk); if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode)) dax_dev = NULL; else if (holder) { if (!cmpxchg(&dax_dev->holder_data, NULL, holder)) dax_dev->holder_ops = ops; else dax_dev = NULL; } dax_read_unlock(id); return dax_dev; } EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); void fs_put_dax(struct dax_device *dax_dev, void *holder) { if (dax_dev && holder && cmpxchg(&dax_dev->holder_data, holder, NULL) == holder) dax_dev->holder_ops = NULL; put_dax(dax_dev); } EXPORT_SYMBOL_GPL(fs_put_dax); #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ enum dax_device_flags { /* !alive + rcu grace period == no new operations / mappings */ DAXDEV_ALIVE, /* gate whether dax_flush() calls the low level flush routine */ DAXDEV_WRITE_CACHE, /* flag to check if device supports synchronous flush */ DAXDEV_SYNC, /* do not leave the caches dirty after writes */ DAXDEV_NOCACHE, /* handle CPU fetch exceptions during reads */ DAXDEV_NOMC, }; /** * dax_direct_access() - translate a device pgoff to an absolute pfn * @dax_dev: a dax_device instance representing the logical memory range * @pgoff: offset in pages from the start of the device to translate * @nr_pages: number of consecutive pages caller can handle relative to @pfn * @mode: indicator on normal access or recovery write * @kaddr: output parameter that returns a virtual address mapping of pfn * @pfn: output parameter that returns an absolute pfn translation of @pgoff * * Return: negative errno if an error occurs, otherwise the number of * pages accessible at the device relative @pgoff. */ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, pfn_t *pfn) { long avail; if (!dax_dev) return -EOPNOTSUPP; if (!dax_alive(dax_dev)) return -ENXIO; if (nr_pages < 0) return -EINVAL; avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); if (!avail) return -ERANGE; return min(avail, nr_pages); } EXPORT_SYMBOL_GPL(dax_direct_access); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { if (!dax_alive(dax_dev)) return 0; /* * The userspace address for the memory copy has already been validated * via access_ok() in vfs_write, so use the 'no check' version to bypass * the HARDENED_USERCOPY overhead. */ if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags)) return _copy_from_iter_flushcache(addr, bytes, i); return _copy_from_iter(addr, bytes, i); } size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { if (!dax_alive(dax_dev)) return 0; /* * The userspace address for the memory copy has already been validated * via access_ok() in vfs_red, so use the 'no check' version to bypass * the HARDENED_USERCOPY overhead. */ if (test_bit(DAXDEV_NOMC, &dax_dev->flags)) return _copy_mc_to_iter(addr, bytes, i); return _copy_to_iter(addr, bytes, i); } int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) { int ret; if (!dax_alive(dax_dev)) return -ENXIO; /* * There are no callers that want to zero more than one page as of now. * Once users are there, this check can be removed after the * device mapper code has been updated to split ranges across targets. */ if (nr_pages != 1) return -EIO; ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); return dax_mem2blk_err(ret); } EXPORT_SYMBOL_GPL(dax_zero_page_range); size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *iter) { if (!dax_dev->ops->recovery_write) return 0; return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter); } EXPORT_SYMBOL_GPL(dax_recovery_write); int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, int mf_flags) { int rc, id; id = dax_read_lock(); if (!dax_alive(dax_dev)) { rc = -ENXIO; goto out; } if (!dax_dev->holder_ops) { rc = -EOPNOTSUPP; goto out; } rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags); out: dax_read_unlock(id); return rc; } EXPORT_SYMBOL_GPL(dax_holder_notify_failure); #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { if (unlikely(!dax_write_cache_enabled(dax_dev))) return; arch_wb_cache_pmem(addr, size); } #else void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { } #endif EXPORT_SYMBOL_GPL(dax_flush); void dax_write_cache(struct dax_device *dax_dev, bool wc) { if (wc) set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); else clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_write_cache); bool dax_write_cache_enabled(struct dax_device *dax_dev) { return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_write_cache_enabled); bool dax_synchronous(struct dax_device *dax_dev) { return test_bit(DAXDEV_SYNC, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_synchronous); void set_dax_synchronous(struct dax_device *dax_dev) { set_bit(DAXDEV_SYNC, &dax_dev->flags); } EXPORT_SYMBOL_GPL(set_dax_synchronous); void set_dax_nocache(struct dax_device *dax_dev) { set_bit(DAXDEV_NOCACHE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(set_dax_nocache); void set_dax_nomc(struct dax_device *dax_dev) { set_bit(DAXDEV_NOMC, &dax_dev->flags); } EXPORT_SYMBOL_GPL(set_dax_nomc); bool dax_alive(struct dax_device *dax_dev) { lockdep_assert_held(&dax_srcu); return test_bit(DAXDEV_ALIVE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_alive); /* * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring * that any fault handlers or operations that might have seen * dax_alive(), have completed. Any operations that start after * synchronize_srcu() has run will abort upon seeing !dax_alive(). * * Note, because alloc_dax() returns an ERR_PTR() on error, callers * typically store its result into a local variable in order to check * the result. Therefore, care must be taken to populate the struct * device dax_dev field make sure the dax_dev is not leaked. */ void kill_dax(struct dax_device *dax_dev) { if (!dax_dev) return; if (dax_dev->holder_data != NULL) dax_holder_notify_failure(dax_dev, 0, U64_MAX, MF_MEM_PRE_REMOVE); clear_bit(DAXDEV_ALIVE, &dax_dev->flags); synchronize_srcu(&dax_srcu); /* clear holder data */ dax_dev->holder_ops = NULL; dax_dev->holder_data = NULL; } EXPORT_SYMBOL_GPL(kill_dax); void run_dax(struct dax_device *dax_dev) { set_bit(DAXDEV_ALIVE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(run_dax); static struct inode *dax_alloc_inode(struct super_block *sb) { struct dax_device *dax_dev; struct inode *inode; dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL); if (!dax_dev) return NULL; inode = &dax_dev->inode; inode->i_rdev = 0; return inode; } static struct dax_device *to_dax_dev(struct inode *inode) { return container_of(inode, struct dax_device, inode); } static void dax_free_inode(struct inode *inode) { struct dax_device *dax_dev = to_dax_dev(inode); if (inode->i_rdev) ida_free(&dax_minor_ida, iminor(inode)); kmem_cache_free(dax_cache, dax_dev); } static void dax_destroy_inode(struct inode *inode) { struct dax_device *dax_dev = to_dax_dev(inode); WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), "kill_dax() must be called before final iput()\n"); } static const struct super_operations dax_sops = { .statfs = simple_statfs, .alloc_inode = dax_alloc_inode, .destroy_inode = dax_destroy_inode, .free_inode = dax_free_inode, .drop_inode = generic_delete_inode, }; static int dax_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC); if (!ctx) return -ENOMEM; ctx->ops = &dax_sops; return 0; } static struct file_system_type dax_fs_type = { .name = "dax", .init_fs_context = dax_init_fs_context, .kill_sb = kill_anon_super, }; static int dax_test(struct inode *inode, void *data) { dev_t devt = *(dev_t *) data; return inode->i_rdev == devt; } static int dax_set(struct inode *inode, void *data) { dev_t devt = *(dev_t *) data; inode->i_rdev = devt; return 0; } static struct dax_device *dax_dev_get(dev_t devt) { struct dax_device *dax_dev; struct inode *inode; inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), dax_test, dax_set, &devt); if (!inode) return NULL; dax_dev = to_dax_dev(inode); if (inode->i_state & I_NEW) { set_bit(DAXDEV_ALIVE, &dax_dev->flags); inode->i_cdev = &dax_dev->cdev; inode->i_mode = S_IFCHR; inode->i_flags = S_DAX; mapping_set_gfp_mask(&inode->i_data, GFP_USER); unlock_new_inode(inode); } return dax_dev; } struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) { struct dax_device *dax_dev; dev_t devt; int minor; /* * Unavailable on architectures with virtually aliased data caches, * except for device-dax (NULL operations pointer), which does * not use aliased mappings from the kernel. */ if (ops && cpu_dcache_is_aliasing()) return ERR_PTR(-EOPNOTSUPP); if (WARN_ON_ONCE(ops && !ops->zero_page_range)) return ERR_PTR(-EINVAL); minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL); if (minor < 0) return ERR_PTR(-ENOMEM); devt = MKDEV(MAJOR(dax_devt), minor); dax_dev = dax_dev_get(devt); if (!dax_dev) goto err_dev; dax_dev->ops = ops; dax_dev->private = private; return dax_dev; err_dev: ida_free(&dax_minor_ida, minor); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(alloc_dax); void put_dax(struct dax_device *dax_dev) { if (!dax_dev) return; iput(&dax_dev->inode); } EXPORT_SYMBOL_GPL(put_dax); /** * dax_holder() - obtain the holder of a dax device * @dax_dev: a dax_device instance * * Return: the holder's data which represents the holder if registered, * otherwize NULL. */ void *dax_holder(struct dax_device *dax_dev) { return dax_dev->holder_data; } EXPORT_SYMBOL_GPL(dax_holder); /** * inode_dax: convert a public inode into its dax_dev * @inode: An inode with i_cdev pointing to a dax_dev * * Note this is not equivalent to to_dax_dev() which is for private * internal use where we know the inode filesystem type == dax_fs_type. */ struct dax_device *inode_dax(struct inode *inode) { struct cdev *cdev = inode->i_cdev; return container_of(cdev, struct dax_device, cdev); } EXPORT_SYMBOL_GPL(inode_dax); struct inode *dax_inode(struct dax_device *dax_dev) { return &dax_dev->inode; } EXPORT_SYMBOL_GPL(dax_inode); void *dax_get_private(struct dax_device *dax_dev) { if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) return NULL; return dax_dev->private; } EXPORT_SYMBOL_GPL(dax_get_private); static void init_once(void *_dax_dev) { struct dax_device *dax_dev = _dax_dev; struct inode *inode = &dax_dev->inode; memset(dax_dev, 0, sizeof(*dax_dev)); inode_init_once(inode); } static int dax_fs_init(void) { int rc; dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, init_once); if (!dax_cache) return -ENOMEM; dax_mnt = kern_mount(&dax_fs_type); if (IS_ERR(dax_mnt)) { rc = PTR_ERR(dax_mnt); goto err_mount; } dax_superblock = dax_mnt->mnt_sb; return 0; err_mount: kmem_cache_destroy(dax_cache); return rc; } static void dax_fs_exit(void) { kern_unmount(dax_mnt); rcu_barrier(); kmem_cache_destroy(dax_cache); } static int __init dax_core_init(void) { int rc; rc = dax_fs_init(); if (rc) return rc; rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); if (rc) goto err_chrdev; rc = dax_bus_init(); if (rc) goto err_bus; return 0; err_bus: unregister_chrdev_region(dax_devt, MINORMASK+1); err_chrdev: dax_fs_exit(); return 0; } static void __exit dax_core_exit(void) { dax_bus_exit(); unregister_chrdev_region(dax_devt, MINORMASK+1); ida_destroy(&dax_minor_ida); dax_fs_exit(); } MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("DAX: direct access to differentiated memory"); MODULE_LICENSE("GPL v2"); subsys_initcall(dax_core_init); module_exit(dax_core_exit); |
1 2 2 2 1 1 3 1 1 2 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015, Marvell International Ltd. * * Inspired (hugely) by HCI LDISC implementation in Bluetooth. * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2005 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> /* TX states */ #define NCI_UART_SENDING 1 #define NCI_UART_TX_WAKEUP 2 static struct nci_uart *nci_uart_drivers[NCI_UART_DRIVER_MAX]; static inline struct sk_buff *nci_uart_dequeue(struct nci_uart *nu) { struct sk_buff *skb = nu->tx_skb; if (!skb) skb = skb_dequeue(&nu->tx_q); else nu->tx_skb = NULL; return skb; } static inline int nci_uart_queue_empty(struct nci_uart *nu) { if (nu->tx_skb) return 0; return skb_queue_empty(&nu->tx_q); } static int nci_uart_tx_wakeup(struct nci_uart *nu) { if (test_and_set_bit(NCI_UART_SENDING, &nu->tx_state)) { set_bit(NCI_UART_TX_WAKEUP, &nu->tx_state); return 0; } schedule_work(&nu->write_work); return 0; } static void nci_uart_write_work(struct work_struct *work) { struct nci_uart *nu = container_of(work, struct nci_uart, write_work); struct tty_struct *tty = nu->tty; struct sk_buff *skb; restart: clear_bit(NCI_UART_TX_WAKEUP, &nu->tx_state); if (nu->ops.tx_start) nu->ops.tx_start(nu); while ((skb = nci_uart_dequeue(nu))) { int len; set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); len = tty->ops->write(tty, skb->data, skb->len); skb_pull(skb, len); if (skb->len) { nu->tx_skb = skb; break; } kfree_skb(skb); } if (test_bit(NCI_UART_TX_WAKEUP, &nu->tx_state)) goto restart; if (nu->ops.tx_done && nci_uart_queue_empty(nu)) nu->ops.tx_done(nu); clear_bit(NCI_UART_SENDING, &nu->tx_state); } static int nci_uart_set_driver(struct tty_struct *tty, unsigned int driver) { struct nci_uart *nu = NULL; int ret; if (driver >= NCI_UART_DRIVER_MAX) return -EINVAL; if (!nci_uart_drivers[driver]) return -ENOENT; nu = kzalloc(sizeof(*nu), GFP_KERNEL); if (!nu) return -ENOMEM; memcpy(nu, nci_uart_drivers[driver], sizeof(struct nci_uart)); nu->tty = tty; tty->disc_data = nu; skb_queue_head_init(&nu->tx_q); INIT_WORK(&nu->write_work, nci_uart_write_work); spin_lock_init(&nu->rx_lock); ret = nu->ops.open(nu); if (ret) { tty->disc_data = NULL; kfree(nu); } else if (!try_module_get(nu->owner)) { nu->ops.close(nu); tty->disc_data = NULL; kfree(nu); return -ENOENT; } return ret; } /* ------ LDISC part ------ */ /* nci_uart_tty_open * * Called when line discipline changed to NCI_UART. * * Arguments: * tty pointer to tty info structure * Return Value: * 0 if success, otherwise error code */ static int nci_uart_tty_open(struct tty_struct *tty) { /* Error if the tty has no write op instead of leaving an exploitable * hole */ if (!tty->ops->write) return -EOPNOTSUPP; tty->disc_data = NULL; tty->receive_room = 65536; /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); return 0; } /* nci_uart_tty_close() * * Called when the line discipline is changed to something * else, the tty is closed, or the tty detects a hangup. */ static void nci_uart_tty_close(struct tty_struct *tty) { struct nci_uart *nu = tty->disc_data; /* Detach from the tty */ tty->disc_data = NULL; if (!nu) return; kfree_skb(nu->tx_skb); kfree_skb(nu->rx_skb); skb_queue_purge(&nu->tx_q); nu->ops.close(nu); nu->tty = NULL; module_put(nu->owner); cancel_work_sync(&nu->write_work); kfree(nu); } /* nci_uart_tty_wakeup() * * Callback for transmit wakeup. Called when low level * device driver can accept more send data. * * Arguments: tty pointer to associated tty instance data * Return Value: None */ static void nci_uart_tty_wakeup(struct tty_struct *tty) { struct nci_uart *nu = tty->disc_data; if (!nu) return; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); if (tty != nu->tty) return; nci_uart_tx_wakeup(nu); } /* -- Default recv_buf handler -- * * This handler supposes that NCI frames are sent over UART link without any * framing. It reads NCI header, retrieve the packet size and once all packet * bytes are received it passes it to nci_uart driver for processing. */ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data, int count) { int chunk_len; if (!nu->ndev) { nfc_err(nu->tty->dev, "receive data from tty but no NCI dev is attached yet, drop buffer\n"); return 0; } /* Decode all incoming data in packets * and enqueue then for processing. */ while (count > 0) { /* If this is the first data of a packet, allocate a buffer */ if (!nu->rx_skb) { nu->rx_packet_len = -1; nu->rx_skb = nci_skb_alloc(nu->ndev, NCI_MAX_PACKET_SIZE, GFP_ATOMIC); if (!nu->rx_skb) return -ENOMEM; } /* Eat byte after byte till full packet header is received */ if (nu->rx_skb->len < NCI_CTRL_HDR_SIZE) { skb_put_u8(nu->rx_skb, *data++); --count; continue; } /* Header was received but packet len was not read */ if (nu->rx_packet_len < 0) nu->rx_packet_len = NCI_CTRL_HDR_SIZE + nci_plen(nu->rx_skb->data); /* Compute how many bytes are missing and how many bytes can * be consumed. */ chunk_len = nu->rx_packet_len - nu->rx_skb->len; if (count < chunk_len) chunk_len = count; skb_put_data(nu->rx_skb, data, chunk_len); data += chunk_len; count -= chunk_len; /* Check if packet is fully received */ if (nu->rx_packet_len == nu->rx_skb->len) { /* Pass RX packet to driver */ if (nu->ops.recv(nu, nu->rx_skb) != 0) nfc_err(nu->tty->dev, "corrupted RX packet\n"); /* Next packet will be a new one */ nu->rx_skb = NULL; } } return 0; } /* nci_uart_tty_receive() * * Called by tty low level driver when receive data is * available. * * Arguments: tty pointer to tty instance data * data pointer to received data * flags pointer to flags for data * count count of received data in bytes * * Return Value: None */ static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data, const u8 *flags, size_t count) { struct nci_uart *nu = tty->disc_data; if (!nu || tty != nu->tty) return; spin_lock(&nu->rx_lock); nci_uart_default_recv_buf(nu, data, count); spin_unlock(&nu->rx_lock); tty_unthrottle(tty); } /* nci_uart_tty_ioctl() * * Process IOCTL system call for the tty device. * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg argument for IOCTL call (cmd dependent) * * Return Value: Command dependent */ static int nci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct nci_uart *nu = tty->disc_data; int err = 0; switch (cmd) { case NCIUARTSETDRIVER: if (!nu) return nci_uart_set_driver(tty, (unsigned int)arg); else return -EBUSY; break; default: err = n_tty_ioctl_helper(tty, cmd, arg); break; } return err; } /* We don't provide read/write/poll interface for user space. */ static ssize_t nci_uart_tty_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return 0; } static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file, const u8 *data, size_t count) { return 0; } static int nci_uart_send(struct nci_uart *nu, struct sk_buff *skb) { /* Queue TX packet */ skb_queue_tail(&nu->tx_q, skb); /* Try to start TX (if possible) */ nci_uart_tx_wakeup(nu); return 0; } int nci_uart_register(struct nci_uart *nu) { if (!nu || !nu->ops.open || !nu->ops.recv || !nu->ops.close) return -EINVAL; /* Set the send callback */ nu->ops.send = nci_uart_send; /* Add this driver in the driver list */ if (nci_uart_drivers[nu->driver]) { pr_err("driver %d is already registered\n", nu->driver); return -EBUSY; } nci_uart_drivers[nu->driver] = nu; pr_info("NCI uart driver '%s [%d]' registered\n", nu->name, nu->driver); return 0; } EXPORT_SYMBOL_GPL(nci_uart_register); void nci_uart_unregister(struct nci_uart *nu) { pr_info("NCI uart driver '%s [%d]' unregistered\n", nu->name, nu->driver); /* Remove this driver from the driver list */ nci_uart_drivers[nu->driver] = NULL; } EXPORT_SYMBOL_GPL(nci_uart_unregister); void nci_uart_set_config(struct nci_uart *nu, int baudrate, int flow_ctrl) { struct ktermios new_termios; if (!nu->tty) return; down_read(&nu->tty->termios_rwsem); new_termios = nu->tty->termios; up_read(&nu->tty->termios_rwsem); tty_termios_encode_baud_rate(&new_termios, baudrate, baudrate); if (flow_ctrl) new_termios.c_cflag |= CRTSCTS; else new_termios.c_cflag &= ~CRTSCTS; tty_set_termios(nu->tty, &new_termios); } EXPORT_SYMBOL_GPL(nci_uart_set_config); static struct tty_ldisc_ops nci_uart_ldisc = { .owner = THIS_MODULE, .num = N_NCI, .name = "n_nci", .open = nci_uart_tty_open, .close = nci_uart_tty_close, .read = nci_uart_tty_read, .write = nci_uart_tty_write, .receive_buf = nci_uart_tty_receive, .write_wakeup = nci_uart_tty_wakeup, .ioctl = nci_uart_tty_ioctl, .compat_ioctl = nci_uart_tty_ioctl, }; static int __init nci_uart_init(void) { return tty_register_ldisc(&nci_uart_ldisc); } static void __exit nci_uart_exit(void) { tty_unregister_ldisc(&nci_uart_ldisc); } module_init(nci_uart_init); module_exit(nci_uart_exit); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("NFC NCI UART driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_NCI); |
15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/xdr.c * * Generic XDR support. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/pagemap.h> #include <linux/errno.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> #include <linux/bvec.h> #include <trace/events/sunrpc.h> static void _copy_to_pages(struct page **, size_t, const char *, size_t); /* * XDR functions for basic NFS types */ __be32 * xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj) { unsigned int quadlen = XDR_QUADLEN(obj->len); p[quadlen] = 0; /* zero trailing bytes */ *p++ = cpu_to_be32(obj->len); memcpy(p, obj->data, obj->len); return p + XDR_QUADLEN(obj->len); } EXPORT_SYMBOL_GPL(xdr_encode_netobj); __be32 * xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj) { unsigned int len; if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ) return NULL; obj->len = len; obj->data = (u8 *) p; return p + XDR_QUADLEN(len); } EXPORT_SYMBOL_GPL(xdr_decode_netobj); /** * xdr_encode_opaque_fixed - Encode fixed length opaque data * @p: pointer to current position in XDR buffer. * @ptr: pointer to data to encode (or NULL) * @nbytes: size of data. * * Copy the array of data of length nbytes at ptr to the XDR buffer * at position p, then align to the next 32-bit boundary by padding * with zero bytes (see RFC1832). * Note: if ptr is NULL, only the padding is performed. * * Returns the updated current XDR buffer position * */ __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes) { if (likely(nbytes != 0)) { unsigned int quadlen = XDR_QUADLEN(nbytes); unsigned int padding = (quadlen << 2) - nbytes; if (ptr != NULL) memcpy(p, ptr, nbytes); if (padding != 0) memset((char *)p + nbytes, 0, padding); p += quadlen; } return p; } EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed); /** * xdr_encode_opaque - Encode variable length opaque data * @p: pointer to current position in XDR buffer. * @ptr: pointer to data to encode (or NULL) * @nbytes: size of data. * * Returns the updated current XDR buffer position */ __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) { *p++ = cpu_to_be32(nbytes); return xdr_encode_opaque_fixed(p, ptr, nbytes); } EXPORT_SYMBOL_GPL(xdr_encode_opaque); __be32 * xdr_encode_string(__be32 *p, const char *string) { return xdr_encode_array(p, string, strlen(string)); } EXPORT_SYMBOL_GPL(xdr_encode_string); __be32 * xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, unsigned int maxlen) { u32 len; len = be32_to_cpu(*p++); if (len > maxlen) return NULL; *lenp = len; *sp = (char *) p; return p + XDR_QUADLEN(len); } EXPORT_SYMBOL_GPL(xdr_decode_string_inplace); /** * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf * @buf: XDR buffer where string resides * @len: length of string, in bytes * */ void xdr_terminate_string(const struct xdr_buf *buf, const u32 len) { char *kaddr; kaddr = kmap_atomic(buf->pages[0]); kaddr[buf->page_base + len] = '\0'; kunmap_atomic(kaddr); } EXPORT_SYMBOL_GPL(xdr_terminate_string); size_t xdr_buf_pagecount(const struct xdr_buf *buf) { if (!buf->page_len) return 0; return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp) { size_t i, n = xdr_buf_pagecount(buf); if (n != 0 && buf->bvec == NULL) { buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp); if (!buf->bvec) return -ENOMEM; for (i = 0; i < n; i++) { bvec_set_page(&buf->bvec[i], buf->pages[i], PAGE_SIZE, 0); } } return 0; } void xdr_free_bvec(struct xdr_buf *buf) { kfree(buf->bvec); buf->bvec = NULL; } /** * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array * @bvec: bio_vec array to populate * @bvec_size: element count of @bio_vec * @xdr: xdr_buf to be copied * * Returns the number of entries consumed in @bvec. */ unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size, const struct xdr_buf *xdr) { const struct kvec *head = xdr->head; const struct kvec *tail = xdr->tail; unsigned int count = 0; if (head->iov_len) { bvec_set_virt(bvec++, head->iov_base, head->iov_len); ++count; } if (xdr->page_len) { unsigned int offset, len, remaining; struct page **pages = xdr->pages; offset = offset_in_page(xdr->page_base); remaining = xdr->page_len; while (remaining > 0) { len = min_t(unsigned int, remaining, PAGE_SIZE - offset); bvec_set_page(bvec++, *pages++, len, offset); remaining -= len; offset = 0; if (unlikely(++count > bvec_size)) goto bvec_overflow; } } if (tail->iov_len) { bvec_set_virt(bvec, tail->iov_base, tail->iov_len); if (unlikely(++count > bvec_size)) goto bvec_overflow; } return count; bvec_overflow: pr_warn_once("%s: bio_vec array overflow\n", __func__); return count - 1; } EXPORT_SYMBOL_GPL(xdr_buf_to_bvec); /** * xdr_inline_pages - Prepare receive buffer for a large reply * @xdr: xdr_buf into which reply will be placed * @offset: expected offset where data payload will start, in bytes * @pages: vector of struct page pointers * @base: offset in first page where receive should start, in bytes * @len: expected size of the upper layer data payload, in bytes * */ void xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, struct page **pages, unsigned int base, unsigned int len) { struct kvec *head = xdr->head; struct kvec *tail = xdr->tail; char *buf = (char *)head->iov_base; unsigned int buflen = head->iov_len; head->iov_len = offset; xdr->pages = pages; xdr->page_base = base; xdr->page_len = len; tail->iov_base = buf + offset; tail->iov_len = buflen - offset; xdr->buflen += len; } EXPORT_SYMBOL_GPL(xdr_inline_pages); /* * Helper routines for doing 'memmove' like operations on a struct xdr_buf */ /** * _shift_data_left_pages * @pages: vector of pages containing both the source and dest memory area. * @pgto_base: page vector address of destination * @pgfrom_base: page vector address of source * @len: number of bytes to copy * * Note: the addresses pgto_base and pgfrom_base are both calculated in * the same way: * if a memory area starts at byte 'base' in page 'pages[i]', * then its address is given as (i << PAGE_CACHE_SHIFT) + base * Alse note: pgto_base must be < pgfrom_base, but the memory areas * they point to may overlap. */ static void _shift_data_left_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len) { struct page **pgfrom, **pgto; char *vfrom, *vto; size_t copy; BUG_ON(pgfrom_base <= pgto_base); if (!len) return; pgto = pages + (pgto_base >> PAGE_SHIFT); pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); pgto_base &= ~PAGE_MASK; pgfrom_base &= ~PAGE_MASK; do { if (pgto_base >= PAGE_SIZE) { pgto_base = 0; pgto++; } if (pgfrom_base >= PAGE_SIZE){ pgfrom_base = 0; pgfrom++; } copy = len; if (copy > (PAGE_SIZE - pgto_base)) copy = PAGE_SIZE - pgto_base; if (copy > (PAGE_SIZE - pgfrom_base)) copy = PAGE_SIZE - pgfrom_base; vto = kmap_atomic(*pgto); if (*pgto != *pgfrom) { vfrom = kmap_atomic(*pgfrom); memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); kunmap_atomic(vfrom); } else memmove(vto + pgto_base, vto + pgfrom_base, copy); flush_dcache_page(*pgto); kunmap_atomic(vto); pgto_base += copy; pgfrom_base += copy; } while ((len -= copy) != 0); } /** * _shift_data_right_pages * @pages: vector of pages containing both the source and dest memory area. * @pgto_base: page vector address of destination * @pgfrom_base: page vector address of source * @len: number of bytes to copy * * Note: the addresses pgto_base and pgfrom_base are both calculated in * the same way: * if a memory area starts at byte 'base' in page 'pages[i]', * then its address is given as (i << PAGE_SHIFT) + base * Also note: pgfrom_base must be < pgto_base, but the memory areas * they point to may overlap. */ static void _shift_data_right_pages(struct page **pages, size_t pgto_base, size_t pgfrom_base, size_t len) { struct page **pgfrom, **pgto; char *vfrom, *vto; size_t copy; BUG_ON(pgto_base <= pgfrom_base); if (!len) return; pgto_base += len; pgfrom_base += len; pgto = pages + (pgto_base >> PAGE_SHIFT); pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); pgto_base &= ~PAGE_MASK; pgfrom_base &= ~PAGE_MASK; do { /* Are any pointers crossing a page boundary? */ if (pgto_base == 0) { pgto_base = PAGE_SIZE; pgto--; } if (pgfrom_base == 0) { pgfrom_base = PAGE_SIZE; pgfrom--; } copy = len; if (copy > pgto_base) copy = pgto_base; if (copy > pgfrom_base) copy = pgfrom_base; pgto_base -= copy; pgfrom_base -= copy; vto = kmap_atomic(*pgto); if (*pgto != *pgfrom) { vfrom = kmap_atomic(*pgfrom); memcpy(vto + pgto_base, vfrom + pgfrom_base, copy); kunmap_atomic(vfrom); } else memmove(vto + pgto_base, vto + pgfrom_base, copy); flush_dcache_page(*pgto); kunmap_atomic(vto); } while ((len -= copy) != 0); } /** * _copy_to_pages * @pages: array of pages * @pgbase: page vector address of destination * @p: pointer to source data * @len: length * * Copies data from an arbitrary memory location into an array of pages * The copy is assumed to be non-overlapping. */ static void _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) { struct page **pgto; char *vto; size_t copy; if (!len) return; pgto = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; for (;;) { copy = PAGE_SIZE - pgbase; if (copy > len) copy = len; vto = kmap_atomic(*pgto); memcpy(vto + pgbase, p, copy); kunmap_atomic(vto); len -= copy; if (len == 0) break; pgbase += copy; if (pgbase == PAGE_SIZE) { flush_dcache_page(*pgto); pgbase = 0; pgto++; } p += copy; } flush_dcache_page(*pgto); } /** * _copy_from_pages * @p: pointer to destination * @pages: array of pages * @pgbase: offset of source data * @len: length * * Copies data into an arbitrary memory location from an array of pages * The copy is assumed to be non-overlapping. */ void _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) { struct page **pgfrom; char *vfrom; size_t copy; if (!len) return; pgfrom = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; do { copy = PAGE_SIZE - pgbase; if (copy > len) copy = len; vfrom = kmap_atomic(*pgfrom); memcpy(p, vfrom + pgbase, copy); kunmap_atomic(vfrom); pgbase += copy; if (pgbase == PAGE_SIZE) { pgbase = 0; pgfrom++; } p += copy; } while ((len -= copy) != 0); } EXPORT_SYMBOL_GPL(_copy_from_pages); static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base, unsigned int len) { if (base >= iov->iov_len) return; if (len > iov->iov_len - base) len = iov->iov_len - base; memset(iov->iov_base + base, 0, len); } /** * xdr_buf_pages_zero * @buf: xdr_buf * @pgbase: beginning offset * @len: length */ static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase, unsigned int len) { struct page **pages = buf->pages; struct page **page; char *vpage; unsigned int zero; if (!len) return; if (pgbase >= buf->page_len) { xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len); return; } if (pgbase + len > buf->page_len) { xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len); len = buf->page_len - pgbase; } pgbase += buf->page_base; page = pages + (pgbase >> PAGE_SHIFT); pgbase &= ~PAGE_MASK; do { zero = PAGE_SIZE - pgbase; if (zero > len) zero = len; vpage = kmap_atomic(*page); memset(vpage + pgbase, 0, zero); kunmap_atomic(vpage); flush_dcache_page(*page); pgbase = 0; page++; } while ((len -= zero) != 0); } static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf, unsigned int buflen, gfp_t gfp) { unsigned int i, npages, pagelen; if (!(buf->flags & XDRBUF_SPARSE_PAGES)) return buflen; if (buflen <= buf->head->iov_len) return buflen; pagelen = buflen - buf->head->iov_len; if (pagelen > buf->page_len) pagelen = buf->page_len; npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < npages; i++) { if (!buf->pages[i]) continue; buf->pages[i] = alloc_page(gfp); if (likely(buf->pages[i])) continue; buflen -= pagelen; pagelen = i << PAGE_SHIFT; if (pagelen > buf->page_base) buflen += pagelen - buf->page_base; break; } return buflen; } static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len) { struct kvec *head = buf->head; struct kvec *tail = buf->tail; unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; unsigned int free_space, newlen; if (sum > buf->len) { free_space = min_t(unsigned int, sum - buf->len, len); newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space, GFP_KERNEL); free_space = newlen - buf->len; buf->len = newlen; len -= free_space; if (!len) return; } if (buf->buflen > sum) { /* Expand the tail buffer */ free_space = min_t(unsigned int, buf->buflen - sum, len); tail->iov_len += free_space; buf->len += free_space; } } static void xdr_buf_tail_copy_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *tail = buf->tail; unsigned int to = base + shift; if (to >= tail->iov_len) return; if (len + to > tail->iov_len) len = tail->iov_len - to; memmove(tail->iov_base + to, tail->iov_base + base, len); } static void xdr_buf_pages_copy_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *tail = buf->tail; unsigned int to = base + shift; unsigned int pglen = 0; unsigned int talen = 0, tato = 0; if (base >= buf->page_len) return; if (len > buf->page_len - base) len = buf->page_len - base; if (to >= buf->page_len) { tato = to - buf->page_len; if (tail->iov_len >= len + tato) talen = len; else if (tail->iov_len > tato) talen = tail->iov_len - tato; } else if (len + to >= buf->page_len) { pglen = buf->page_len - to; talen = len - pglen; if (talen > tail->iov_len) talen = tail->iov_len; } else pglen = len; _copy_from_pages(tail->iov_base + tato, buf->pages, buf->page_base + base + pglen, talen); _shift_data_right_pages(buf->pages, buf->page_base + to, buf->page_base + base, pglen); } static void xdr_buf_head_copy_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *head = buf->head; const struct kvec *tail = buf->tail; unsigned int to = base + shift; unsigned int pglen = 0, pgto = 0; unsigned int talen = 0, tato = 0; if (base >= head->iov_len) return; if (len > head->iov_len - base) len = head->iov_len - base; if (to >= buf->page_len + head->iov_len) { tato = to - buf->page_len - head->iov_len; talen = len; } else if (to >= head->iov_len) { pgto = to - head->iov_len; pglen = len; if (pgto + pglen > buf->page_len) { talen = pgto + pglen - buf->page_len; pglen -= talen; } } else { pglen = len - to; if (pglen > buf->page_len) { talen = pglen - buf->page_len; pglen = buf->page_len; } } len -= talen; base += len; if (talen + tato > tail->iov_len) talen = tail->iov_len > tato ? tail->iov_len - tato : 0; memcpy(tail->iov_base + tato, head->iov_base + base, talen); len -= pglen; base -= pglen; _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base, pglen); base -= len; memmove(head->iov_base + to, head->iov_base + base, len); } static void xdr_buf_tail_shift_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *tail = buf->tail; if (base >= tail->iov_len || !shift || !len) return; xdr_buf_tail_copy_right(buf, base, len, shift); } static void xdr_buf_pages_shift_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { if (!shift || !len) return; if (base >= buf->page_len) { xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift); return; } if (base + len > buf->page_len) xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len, shift); xdr_buf_pages_copy_right(buf, base, len, shift); } static void xdr_buf_head_shift_right(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *head = buf->head; if (!shift) return; if (base >= head->iov_len) { xdr_buf_pages_shift_right(buf, head->iov_len - base, len, shift); return; } if (base + len > head->iov_len) xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len, shift); xdr_buf_head_copy_right(buf, base, len, shift); } static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *tail = buf->tail; if (base >= tail->iov_len) return; if (len > tail->iov_len - base) len = tail->iov_len - base; /* Shift data into head */ if (shift > buf->page_len + base) { const struct kvec *head = buf->head; unsigned int hdto = head->iov_len + buf->page_len + base - shift; unsigned int hdlen = len; if (WARN_ONCE(shift > head->iov_len + buf->page_len + base, "SUNRPC: Misaligned data.\n")) return; if (hdto + hdlen > head->iov_len) hdlen = head->iov_len - hdto; memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen); base += hdlen; len -= hdlen; if (!len) return; } /* Shift data into pages */ if (shift > base) { unsigned int pgto = buf->page_len + base - shift; unsigned int pglen = len; if (pgto + pglen > buf->page_len) pglen = buf->page_len - pgto; _copy_to_pages(buf->pages, buf->page_base + pgto, tail->iov_base + base, pglen); base += pglen; len -= pglen; if (!len) return; } memmove(tail->iov_base + base - shift, tail->iov_base + base, len); } static void xdr_buf_pages_copy_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { unsigned int pgto; if (base >= buf->page_len) return; if (len > buf->page_len - base) len = buf->page_len - base; /* Shift data into head */ if (shift > base) { const struct kvec *head = buf->head; unsigned int hdto = head->iov_len + base - shift; unsigned int hdlen = len; if (WARN_ONCE(shift > head->iov_len + base, "SUNRPC: Misaligned data.\n")) return; if (hdto + hdlen > head->iov_len) hdlen = head->iov_len - hdto; _copy_from_pages(head->iov_base + hdto, buf->pages, buf->page_base + base, hdlen); base += hdlen; len -= hdlen; if (!len) return; } pgto = base - shift; _shift_data_left_pages(buf->pages, buf->page_base + pgto, buf->page_base + base, len); } static void xdr_buf_tail_shift_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { if (!shift || !len) return; xdr_buf_tail_copy_left(buf, base, len, shift); } static void xdr_buf_pages_shift_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { if (!shift || !len) return; if (base >= buf->page_len) { xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift); return; } xdr_buf_pages_copy_left(buf, base, len, shift); len += base; if (len <= buf->page_len) return; xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift); } static void xdr_buf_head_shift_left(const struct xdr_buf *buf, unsigned int base, unsigned int len, unsigned int shift) { const struct kvec *head = buf->head; unsigned int bytes; if (!shift || !len) return; if (shift > base) { bytes = (shift - base); if (bytes >= len) return; base += bytes; len -= bytes; } if (base < head->iov_len) { bytes = min_t(unsigned int, len, head->iov_len - base); memmove(head->iov_base + (base - shift), head->iov_base + base, bytes); base += bytes; len -= bytes; } xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift); } /** * xdr_shrink_bufhead * @buf: xdr_buf * @len: new length of buf->head[0] * * Shrinks XDR buffer's header kvec buf->head[0], setting it to * 'len' bytes. The extra data is not lost, but is instead * moved into the inlined pages and/or the tail. */ static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len) { struct kvec *head = buf->head; unsigned int shift, buflen = max(buf->len, len); WARN_ON_ONCE(len > head->iov_len); if (head->iov_len > buflen) { buf->buflen -= head->iov_len - buflen; head->iov_len = buflen; } if (len >= head->iov_len) return 0; shift = head->iov_len - len; xdr_buf_try_expand(buf, shift); xdr_buf_head_shift_right(buf, len, buflen - len, shift); head->iov_len = len; buf->buflen -= shift; buf->len -= shift; return shift; } /** * xdr_shrink_pagelen - shrinks buf->pages to @len bytes * @buf: xdr_buf * @len: new page buffer length * * The extra data is not lost, but is instead moved into buf->tail. * Returns the actual number of bytes moved. */ static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len) { unsigned int shift, buflen = buf->len - buf->head->iov_len; WARN_ON_ONCE(len > buf->page_len); if (buf->head->iov_len >= buf->len || len > buflen) buflen = len; if (buf->page_len > buflen) { buf->buflen -= buf->page_len - buflen; buf->page_len = buflen; } if (len >= buf->page_len) return 0; shift = buf->page_len - len; xdr_buf_try_expand(buf, shift); xdr_buf_pages_shift_right(buf, len, buflen - len, shift); buf->page_len = len; buf->len -= shift; buf->buflen -= shift; return shift; } /** * xdr_stream_pos - Return the current offset from the start of the xdr_stream * @xdr: pointer to struct xdr_stream */ unsigned int xdr_stream_pos(const struct xdr_stream *xdr) { return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2; } EXPORT_SYMBOL_GPL(xdr_stream_pos); static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos) { unsigned int blen = xdr->buf->len; xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0; } static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos) { xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len); } /** * xdr_page_pos - Return the current offset from the start of the xdr pages * @xdr: pointer to struct xdr_stream */ unsigned int xdr_page_pos(const struct xdr_stream *xdr) { unsigned int pos = xdr_stream_pos(xdr); WARN_ON(pos < xdr->buf->head[0].iov_len); return pos - xdr->buf->head[0].iov_len; } EXPORT_SYMBOL_GPL(xdr_page_pos); /** * xdr_init_encode - Initialize a struct xdr_stream for sending data. * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer in which to encode data * @p: current pointer inside XDR buffer * @rqst: pointer to controlling rpc_rqst, for debugging * * Note: at the moment the RPC client only passes the length of our * scratch buffer in the xdr_buf's header kvec. Previously this * meant we needed to call xdr_adjust_iovec() after encoding the * data. With the new scheme, the xdr_stream manages the details * of the buffer length, and takes care of adjusting the kvec * length for us. */ void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst) { struct kvec *iov = buf->head; int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; xdr_reset_scratch_buffer(xdr); BUG_ON(scratch_len < 0); xdr->buf = buf; xdr->iov = iov; xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len); xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len); BUG_ON(iov->iov_len > scratch_len); if (p != xdr->p && p != NULL) { size_t len; BUG_ON(p < xdr->p || p > xdr->end); len = (char *)p - (char *)xdr->p; xdr->p = p; buf->len += len; iov->iov_len += len; } xdr->rqst = rqst; } EXPORT_SYMBOL_GPL(xdr_init_encode); /** * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer into which to encode data * @pages: list of pages to decode into * @rqst: pointer to controlling rpc_rqst, for debugging * */ void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, struct rpc_rqst *rqst) { xdr_reset_scratch_buffer(xdr); xdr->buf = buf; xdr->page_ptr = pages; xdr->iov = NULL; xdr->p = page_address(*pages); xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE); xdr->rqst = rqst; } EXPORT_SYMBOL_GPL(xdr_init_encode_pages); /** * __xdr_commit_encode - Ensure all data is written to buffer * @xdr: pointer to xdr_stream * * We handle encoding across page boundaries by giving the caller a * temporary location to write to, then later copying the data into * place; xdr_commit_encode does that copying. * * Normally the caller doesn't need to call this directly, as the * following xdr_reserve_space will do it. But an explicit call may be * required at the end of encoding, or any other time when the xdr_buf * data might be read. */ void __xdr_commit_encode(struct xdr_stream *xdr) { size_t shift = xdr->scratch.iov_len; void *page; page = page_address(*xdr->page_ptr); memcpy(xdr->scratch.iov_base, page, shift); memmove(page, page + shift, (void *)xdr->p - page); xdr_reset_scratch_buffer(xdr); } EXPORT_SYMBOL_GPL(__xdr_commit_encode); /* * The buffer space to be reserved crosses the boundary between * xdr->buf->head and xdr->buf->pages, or between two pages * in xdr->buf->pages. */ static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr, size_t nbytes) { int space_left; int frag1bytes, frag2bytes; void *p; if (nbytes > PAGE_SIZE) goto out_overflow; /* Bigger buffers require special handling */ if (xdr->buf->len + nbytes > xdr->buf->buflen) goto out_overflow; /* Sorry, we're totally out of space */ frag1bytes = (xdr->end - xdr->p) << 2; frag2bytes = nbytes - frag1bytes; if (xdr->iov) xdr->iov->iov_len += frag1bytes; else xdr->buf->page_len += frag1bytes; xdr->page_ptr++; xdr->iov = NULL; /* * If the last encode didn't end exactly on a page boundary, the * next one will straddle boundaries. Encode into the next * page, then copy it back later in xdr_commit_encode. We use * the "scratch" iov to track any temporarily unused fragment of * space at the end of the previous buffer: */ xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes); /* * xdr->p is where the next encode will start after * xdr_commit_encode() has shifted this one back: */ p = page_address(*xdr->page_ptr); xdr->p = p + frag2bytes; space_left = xdr->buf->buflen - xdr->buf->len; if (space_left - frag1bytes >= PAGE_SIZE) xdr->end = p + PAGE_SIZE; else xdr->end = p + space_left - frag1bytes; xdr->buf->page_len += frag2bytes; xdr->buf->len += nbytes; return p; out_overflow: trace_rpc_xdr_overflow(xdr, nbytes); return NULL; } /** * xdr_reserve_space - Reserve buffer space for sending * @xdr: pointer to xdr_stream * @nbytes: number of bytes to reserve * * Checks that we have enough buffer space to encode 'nbytes' more * bytes of data. If so, update the total xdr_buf length, and * adjust the length of the current kvec. * * The returned pointer is valid only until the next call to * xdr_reserve_space() or xdr_commit_encode() on @xdr. The current * implementation of this API guarantees that space reserved for a * four-byte data item remains valid until @xdr is destroyed, but * that might not always be true in the future. */ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr->p; __be32 *q; xdr_commit_encode(xdr); /* align nbytes on the next 32-bit boundary */ nbytes += 3; nbytes &= ~3; q = p + (nbytes >> 2); if (unlikely(q > xdr->end || q < p)) return xdr_get_next_encode_buffer(xdr, nbytes); xdr->p = q; if (xdr->iov) xdr->iov->iov_len += nbytes; else xdr->buf->page_len += nbytes; xdr->buf->len += nbytes; return p; } EXPORT_SYMBOL_GPL(xdr_reserve_space); /** * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending * @xdr: pointer to xdr_stream * @nbytes: number of bytes to reserve * * The size argument passed to xdr_reserve_space() is determined based * on the number of bytes remaining in the current page to avoid * invalidating iov_base pointers when xdr_commit_encode() is called. * * Return values: * %0: success * %-EMSGSIZE: not enough space is available in @xdr */ int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes) { size_t thislen; __be32 *p; /* * svcrdma requires every READ payload to start somewhere * in xdr->pages. */ if (xdr->iov == xdr->buf->head) { xdr->iov = NULL; xdr->end = xdr->p; } /* XXX: Let's find a way to make this more efficient */ while (nbytes) { thislen = xdr->buf->page_len % PAGE_SIZE; thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen); p = xdr_reserve_space(xdr, thislen); if (!p) return -EMSGSIZE; nbytes -= thislen; } return 0; } EXPORT_SYMBOL_GPL(xdr_reserve_space_vec); /** * xdr_truncate_encode - truncate an encode buffer * @xdr: pointer to xdr_stream * @len: new length of buffer * * Truncates the xdr stream, so that xdr->buf->len == len, * and xdr->p points at offset len from the start of the buffer, and * head, tail, and page lengths are adjusted to correspond. * * If this means moving xdr->p to a different buffer, we assume that * the end pointer should be set to the end of the current page, * except in the case of the head buffer when we assume the head * buffer's current length represents the end of the available buffer. * * This is *not* safe to use on a buffer that already has inlined page * cache pages (as in a zero-copy server read reply), except for the * simple case of truncating from one position in the tail to another. * */ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) { struct xdr_buf *buf = xdr->buf; struct kvec *head = buf->head; struct kvec *tail = buf->tail; int fraglen; int new; if (len > buf->len) { WARN_ON_ONCE(1); return; } xdr_commit_encode(xdr); fraglen = min_t(int, buf->len - len, tail->iov_len); tail->iov_len -= fraglen; buf->len -= fraglen; if (tail->iov_len) { xdr->p = tail->iov_base + tail->iov_len; WARN_ON_ONCE(!xdr->end); WARN_ON_ONCE(!xdr->iov); return; } WARN_ON_ONCE(fraglen); fraglen = min_t(int, buf->len - len, buf->page_len); buf->page_len -= fraglen; buf->len -= fraglen; new = buf->page_base + buf->page_len; xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); if (buf->page_len) { xdr->p = page_address(*xdr->page_ptr); xdr->end = (void *)xdr->p + PAGE_SIZE; xdr->p = (void *)xdr->p + (new % PAGE_SIZE); WARN_ON_ONCE(xdr->iov); return; } if (fraglen) xdr->end = head->iov_base + head->iov_len; /* (otherwise assume xdr->end is already set) */ xdr->page_ptr--; head->iov_len = len; buf->len = len; xdr->p = head->iov_base + head->iov_len; xdr->iov = buf->head; } EXPORT_SYMBOL(xdr_truncate_encode); /** * xdr_truncate_decode - Truncate a decoding stream * @xdr: pointer to struct xdr_stream * @len: Number of bytes to remove * */ void xdr_truncate_decode(struct xdr_stream *xdr, size_t len) { unsigned int nbytes = xdr_align_size(len); xdr->buf->len -= nbytes; xdr->nwords -= XDR_QUADLEN(nbytes); } EXPORT_SYMBOL_GPL(xdr_truncate_decode); /** * xdr_restrict_buflen - decrease available buffer space * @xdr: pointer to xdr_stream * @newbuflen: new maximum number of bytes available * * Adjust our idea of how much space is available in the buffer. * If we've already used too much space in the buffer, returns -1. * If the available space is already smaller than newbuflen, returns 0 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen * and ensures xdr->end is set at most offset newbuflen from the start * of the buffer. */ int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen) { struct xdr_buf *buf = xdr->buf; int left_in_this_buf = (void *)xdr->end - (void *)xdr->p; int end_offset = buf->len + left_in_this_buf; if (newbuflen < 0 || newbuflen < buf->len) return -1; if (newbuflen > buf->buflen) return 0; if (newbuflen < end_offset) xdr->end = (void *)xdr->end + newbuflen - end_offset; buf->buflen = newbuflen; return 0; } EXPORT_SYMBOL(xdr_restrict_buflen); /** * xdr_write_pages - Insert a list of pages into an XDR buffer for sending * @xdr: pointer to xdr_stream * @pages: array of pages to insert * @base: starting offset of first data byte in @pages * @len: number of data bytes in @pages to insert * * After the @pages are added, the tail iovec is instantiated pointing to * end of the head buffer, and the stream is set up to encode subsequent * items into the tail. */ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, unsigned int len) { struct xdr_buf *buf = xdr->buf; struct kvec *tail = buf->tail; buf->pages = pages; buf->page_base = base; buf->page_len = len; tail->iov_base = xdr->p; tail->iov_len = 0; xdr->iov = tail; if (len & 3) { unsigned int pad = 4 - (len & 3); BUG_ON(xdr->p >= xdr->end); tail->iov_base = (char *)xdr->p + (len & 3); tail->iov_len += pad; len += pad; *xdr->p++ = 0; } buf->buflen += len; buf->len += len; } EXPORT_SYMBOL_GPL(xdr_write_pages); static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov, unsigned int base, unsigned int len) { if (len > iov->iov_len) len = iov->iov_len; if (unlikely(base > len)) base = len; xdr->p = (__be32*)(iov->iov_base + base); xdr->end = (__be32*)(iov->iov_base + len); xdr->iov = iov; xdr->page_ptr = NULL; return len - base; } static unsigned int xdr_set_tail_base(struct xdr_stream *xdr, unsigned int base, unsigned int len) { struct xdr_buf *buf = xdr->buf; xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len); return xdr_set_iov(xdr, buf->tail, base, len); } static void xdr_stream_unmap_current_page(struct xdr_stream *xdr) { if (xdr->page_kaddr) { kunmap_local(xdr->page_kaddr); xdr->page_kaddr = NULL; } } static unsigned int xdr_set_page_base(struct xdr_stream *xdr, unsigned int base, unsigned int len) { unsigned int pgnr; unsigned int maxlen; unsigned int pgoff; unsigned int pgend; void *kaddr; maxlen = xdr->buf->page_len; if (base >= maxlen) return 0; else maxlen -= base; if (len > maxlen) len = maxlen; xdr_stream_unmap_current_page(xdr); xdr_stream_page_set_pos(xdr, base); base += xdr->buf->page_base; pgnr = base >> PAGE_SHIFT; xdr->page_ptr = &xdr->buf->pages[pgnr]; if (PageHighMem(*xdr->page_ptr)) { xdr->page_kaddr = kmap_local_page(*xdr->page_ptr); kaddr = xdr->page_kaddr; } else kaddr = page_address(*xdr->page_ptr); pgoff = base & ~PAGE_MASK; xdr->p = (__be32*)(kaddr + pgoff); pgend = pgoff + len; if (pgend > PAGE_SIZE) pgend = PAGE_SIZE; xdr->end = (__be32*)(kaddr + pgend); xdr->iov = NULL; return len; } static void xdr_set_page(struct xdr_stream *xdr, unsigned int base, unsigned int len) { if (xdr_set_page_base(xdr, base, len) == 0) { base -= xdr->buf->page_len; xdr_set_tail_base(xdr, base, len); } } static void xdr_set_next_page(struct xdr_stream *xdr) { unsigned int newbase; newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT; newbase -= xdr->buf->page_base; if (newbase < xdr->buf->page_len) xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr)); else xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr)); } static bool xdr_set_next_buffer(struct xdr_stream *xdr) { if (xdr->page_ptr != NULL) xdr_set_next_page(xdr); else if (xdr->iov == xdr->buf->head) xdr_set_page(xdr, 0, xdr_stream_remaining(xdr)); return xdr->p != xdr->end; } /** * xdr_init_decode - Initialize an xdr_stream for decoding data. * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer from which to decode data * @p: current pointer inside XDR buffer * @rqst: pointer to controlling rpc_rqst, for debugging */ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p, struct rpc_rqst *rqst) { xdr->buf = buf; xdr->page_kaddr = NULL; xdr_reset_scratch_buffer(xdr); xdr->nwords = XDR_QUADLEN(buf->len); if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 && xdr_set_page_base(xdr, 0, buf->len) == 0) xdr_set_iov(xdr, buf->tail, 0, buf->len); if (p != NULL && p > xdr->p && xdr->end >= p) { xdr->nwords -= p - xdr->p; xdr->p = p; } xdr->rqst = rqst; } EXPORT_SYMBOL_GPL(xdr_init_decode); /** * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages * @xdr: pointer to xdr_stream struct * @buf: pointer to XDR buffer from which to decode data * @pages: list of pages to decode into * @len: length in bytes of buffer in pages */ void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, unsigned int len) { memset(buf, 0, sizeof(*buf)); buf->pages = pages; buf->page_len = len; buf->buflen = len; buf->len = len; xdr_init_decode(xdr, buf, NULL, NULL); } EXPORT_SYMBOL_GPL(xdr_init_decode_pages); /** * xdr_finish_decode - Clean up the xdr_stream after decoding data. * @xdr: pointer to xdr_stream struct */ void xdr_finish_decode(struct xdr_stream *xdr) { xdr_stream_unmap_current_page(xdr); } EXPORT_SYMBOL(xdr_finish_decode); static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) { unsigned int nwords = XDR_QUADLEN(nbytes); __be32 *p = xdr->p; __be32 *q = p + nwords; if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) return NULL; xdr->p = q; xdr->nwords -= nwords; return p; } static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes) { __be32 *p; char *cpdest = xdr->scratch.iov_base; size_t cplen = (char *)xdr->end - (char *)xdr->p; if (nbytes > xdr->scratch.iov_len) goto out_overflow; p = __xdr_inline_decode(xdr, cplen); if (p == NULL) return NULL; memcpy(cpdest, p, cplen); if (!xdr_set_next_buffer(xdr)) goto out_overflow; cpdest += cplen; nbytes -= cplen; p = __xdr_inline_decode(xdr, nbytes); if (p == NULL) return NULL; memcpy(cpdest, p, nbytes); return xdr->scratch.iov_base; out_overflow: trace_rpc_xdr_overflow(xdr, nbytes); return NULL; } /** * xdr_inline_decode - Retrieve XDR data to decode * @xdr: pointer to xdr_stream struct * @nbytes: number of bytes of data to decode * * Check if the input buffer is long enough to enable us to decode * 'nbytes' more bytes of data starting at the current position. * If so return the current pointer, then update the current * pointer position. */ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) { __be32 *p; if (unlikely(nbytes == 0)) return xdr->p; if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) goto out_overflow; p = __xdr_inline_decode(xdr, nbytes); if (p != NULL) return p; return xdr_copy_to_scratch(xdr, nbytes); out_overflow: trace_rpc_xdr_overflow(xdr, nbytes); return NULL; } EXPORT_SYMBOL_GPL(xdr_inline_decode); static void xdr_realign_pages(struct xdr_stream *xdr) { struct xdr_buf *buf = xdr->buf; struct kvec *iov = buf->head; unsigned int cur = xdr_stream_pos(xdr); unsigned int copied; /* Realign pages to current pointer position */ if (iov->iov_len > cur) { copied = xdr_shrink_bufhead(buf, cur); trace_rpc_xdr_alignment(xdr, cur, copied); xdr_set_page(xdr, 0, buf->page_len); } } static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len) { struct xdr_buf *buf = xdr->buf; unsigned int nwords = XDR_QUADLEN(len); unsigned int copied; if (xdr->nwords == 0) return 0; xdr_realign_pages(xdr); if (nwords > xdr->nwords) { nwords = xdr->nwords; len = nwords << 2; } if (buf->page_len <= len) len = buf->page_len; else if (nwords < xdr->nwords) { /* Truncate page data and move it into the tail */ copied = xdr_shrink_pagelen(buf, len); trace_rpc_xdr_alignment(xdr, len, copied); } return len; } /** * xdr_read_pages - align page-based XDR data to current pointer position * @xdr: pointer to xdr_stream struct * @len: number of bytes of page data * * Moves data beyond the current pointer position from the XDR head[] buffer * into the page list. Any data that lies beyond current position + @len * bytes is moved into the XDR tail[]. The xdr_stream current position is * then advanced past that data to align to the next XDR object in the tail. * * Returns the number of XDR encoded bytes now contained in the pages */ unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len) { unsigned int nwords = XDR_QUADLEN(len); unsigned int base, end, pglen; pglen = xdr_align_pages(xdr, nwords << 2); if (pglen == 0) return 0; base = (nwords << 2) - pglen; end = xdr_stream_remaining(xdr) - pglen; xdr_set_tail_base(xdr, base, end); return len <= pglen ? len : pglen; } EXPORT_SYMBOL_GPL(xdr_read_pages); /** * xdr_set_pagelen - Sets the length of the XDR pages * @xdr: pointer to xdr_stream struct * @len: new length of the XDR page data * * Either grows or shrinks the length of the xdr pages by setting pagelen to * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas * when growing any data beyond the current pointer is moved into the tail. * * Returns True if the operation was successful, and False otherwise. */ void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len) { struct xdr_buf *buf = xdr->buf; size_t remaining = xdr_stream_remaining(xdr); size_t base = 0; if (len < buf->page_len) { base = buf->page_len - len; xdr_shrink_pagelen(buf, len); } else { xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr), buf->page_len, remaining); if (len > buf->page_len) xdr_buf_try_expand(buf, len - buf->page_len); } xdr_set_tail_base(xdr, base, remaining); } EXPORT_SYMBOL_GPL(xdr_set_pagelen); /** * xdr_enter_page - decode data from the XDR page * @xdr: pointer to xdr_stream struct * @len: number of bytes of page data * * Moves data beyond the current pointer position from the XDR head[] buffer * into the page list. Any data that lies beyond current position + "len" * bytes is moved into the XDR tail[]. The current pointer is then * repositioned at the beginning of the first XDR page. */ void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) { len = xdr_align_pages(xdr, len); /* * Position current pointer at beginning of tail, and * set remaining message length. */ if (len != 0) xdr_set_page_base(xdr, 0, len); } EXPORT_SYMBOL_GPL(xdr_enter_page); static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf) { buf->head[0] = *iov; buf->tail[0] = empty_iov; buf->page_len = 0; buf->buflen = buf->len = iov->iov_len; } EXPORT_SYMBOL_GPL(xdr_buf_from_iov); /** * xdr_buf_subsegment - set subbuf to a portion of buf * @buf: an xdr buffer * @subbuf: the result buffer * @base: beginning of range in bytes * @len: length of range in bytes * * sets @subbuf to an xdr buffer representing the portion of @buf of * length @len starting at offset @base. * * @buf and @subbuf may be pointers to the same struct xdr_buf. * * Returns -1 if base or length are out of bounds. */ int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf, unsigned int base, unsigned int len) { subbuf->buflen = subbuf->len = len; if (base < buf->head[0].iov_len) { subbuf->head[0].iov_base = buf->head[0].iov_base + base; subbuf->head[0].iov_len = min_t(unsigned int, len, buf->head[0].iov_len - base); len -= subbuf->head[0].iov_len; base = 0; } else { base -= buf->head[0].iov_len; subbuf->head[0].iov_base = buf->head[0].iov_base; subbuf->head[0].iov_len = 0; } if (base < buf->page_len) { subbuf->page_len = min(buf->page_len - base, len); base += buf->page_base; subbuf->page_base = base & ~PAGE_MASK; subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; len -= subbuf->page_len; base = 0; } else { base -= buf->page_len; subbuf->pages = buf->pages; subbuf->page_base = 0; subbuf->page_len = 0; } if (base < buf->tail[0].iov_len) { subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; subbuf->tail[0].iov_len = min_t(unsigned int, len, buf->tail[0].iov_len - base); len -= subbuf->tail[0].iov_len; base = 0; } else { base -= buf->tail[0].iov_len; subbuf->tail[0].iov_base = buf->tail[0].iov_base; subbuf->tail[0].iov_len = 0; } if (base || len) return -1; return 0; } EXPORT_SYMBOL_GPL(xdr_buf_subsegment); /** * xdr_stream_subsegment - set @subbuf to a portion of @xdr * @xdr: an xdr_stream set up for decoding * @subbuf: the result buffer * @nbytes: length of @xdr to extract, in bytes * * Sets up @subbuf to represent a portion of @xdr. The portion * starts at the current offset in @xdr, and extends for a length * of @nbytes. If this is successful, @xdr is advanced to the next * XDR data item following that portion. * * Return values: * %true: @subbuf has been initialized, and @xdr has been advanced. * %false: a bounds error has occurred */ bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, unsigned int nbytes) { unsigned int start = xdr_stream_pos(xdr); unsigned int remaining, len; /* Extract @subbuf and bounds-check the fn arguments */ if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes)) return false; /* Advance @xdr by @nbytes */ for (remaining = nbytes; remaining;) { if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr)) return false; len = (char *)xdr->end - (char *)xdr->p; if (remaining <= len) { xdr->p = (__be32 *)((char *)xdr->p + (remaining + xdr_pad_size(nbytes))); break; } xdr->p = (__be32 *)((char *)xdr->p + len); xdr->end = xdr->p; remaining -= len; } xdr_stream_set_pos(xdr, start + nbytes); return true; } EXPORT_SYMBOL_GPL(xdr_stream_subsegment); /** * xdr_stream_move_subsegment - Move part of a stream to another position * @xdr: the source xdr_stream * @offset: the source offset of the segment * @target: the target offset of the segment * @length: the number of bytes to move * * Moves @length bytes from @offset to @target in the xdr_stream, overwriting * anything in its space. Returns the number of bytes in the segment. */ unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, unsigned int target, unsigned int length) { struct xdr_buf buf; unsigned int shift; if (offset < target) { shift = target - offset; if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0) return 0; xdr_buf_head_shift_right(&buf, 0, length, shift); } else if (offset > target) { shift = offset - target; if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0) return 0; xdr_buf_head_shift_left(&buf, shift, length, shift); } return length; } EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment); /** * xdr_stream_zero - zero out a portion of an xdr_stream * @xdr: an xdr_stream to zero out * @offset: the starting point in the stream * @length: the number of bytes to zero */ unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset, unsigned int length) { struct xdr_buf buf; if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0) return 0; if (buf.head[0].iov_len) xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len); if (buf.page_len > 0) xdr_buf_pages_zero(&buf, 0, buf.page_len); if (buf.tail[0].iov_len) xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len); return length; } EXPORT_SYMBOL_GPL(xdr_stream_zero); /** * xdr_buf_trim - lop at most "len" bytes off the end of "buf" * @buf: buf to be trimmed * @len: number of bytes to reduce "buf" by * * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note * that it's possible that we'll trim less than that amount if the xdr_buf is * too small, or if (for instance) it's all in the head and the parser has * already read too far into it. */ void xdr_buf_trim(struct xdr_buf *buf, unsigned int len) { size_t cur; unsigned int trim = len; if (buf->tail[0].iov_len) { cur = min_t(size_t, buf->tail[0].iov_len, trim); buf->tail[0].iov_len -= cur; trim -= cur; if (!trim) goto fix_len; } if (buf->page_len) { cur = min_t(unsigned int, buf->page_len, trim); buf->page_len -= cur; trim -= cur; if (!trim) goto fix_len; } if (buf->head[0].iov_len) { cur = min_t(size_t, buf->head[0].iov_len, trim); buf->head[0].iov_len -= cur; trim -= cur; } fix_len: buf->len -= (len - trim); } EXPORT_SYMBOL_GPL(xdr_buf_trim); static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf, void *obj, unsigned int len) { unsigned int this_len; this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); memcpy(obj, subbuf->head[0].iov_base, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->page_len); _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); memcpy(obj, subbuf->tail[0].iov_base, this_len); } /* obj is assumed to point to allocated memory of size at least len: */ int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) { struct xdr_buf subbuf; int status; status = xdr_buf_subsegment(buf, &subbuf, base, len); if (status != 0) return status; __read_bytes_from_xdr_buf(&subbuf, obj, len); return 0; } EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf); static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf, void *obj, unsigned int len) { unsigned int this_len; this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); memcpy(subbuf->head[0].iov_base, obj, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->page_len); _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); len -= this_len; obj += this_len; this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); memcpy(subbuf->tail[0].iov_base, obj, this_len); } /* obj is assumed to point to allocated memory of size at least len: */ int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) { struct xdr_buf subbuf; int status; status = xdr_buf_subsegment(buf, &subbuf, base, len); if (status != 0) return status; __write_bytes_to_xdr_buf(&subbuf, obj, len); return 0; } EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf); int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj) { __be32 raw; int status; status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); if (status) return status; *obj = be32_to_cpu(raw); return 0; } EXPORT_SYMBOL_GPL(xdr_decode_word); int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj) { __be32 raw = cpu_to_be32(obj); return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); } EXPORT_SYMBOL_GPL(xdr_encode_word); /* Returns 0 on success, or else a negative error code. */ static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc, int encode) { char *elem = NULL, *c; unsigned int copied = 0, todo, avail_here; struct page **ppages = NULL; int err; if (encode) { if (xdr_encode_word(buf, base, desc->array_len) != 0) return -EINVAL; } else { if (xdr_decode_word(buf, base, &desc->array_len) != 0 || desc->array_len > desc->array_maxlen || (unsigned long) base + 4 + desc->array_len * desc->elem_size > buf->len) return -EINVAL; } base += 4; if (!desc->xcode) return 0; todo = desc->array_len * desc->elem_size; /* process head */ if (todo && base < buf->head->iov_len) { c = buf->head->iov_base + base; avail_here = min_t(unsigned int, todo, buf->head->iov_len - base); todo -= avail_here; while (avail_here >= desc->elem_size) { err = desc->xcode(desc, c); if (err) goto out; c += desc->elem_size; avail_here -= desc->elem_size; } if (avail_here) { if (!elem) { elem = kmalloc(desc->elem_size, GFP_KERNEL); err = -ENOMEM; if (!elem) goto out; } if (encode) { err = desc->xcode(desc, elem); if (err) goto out; memcpy(c, elem, avail_here); } else memcpy(elem, c, avail_here); copied = avail_here; } base = buf->head->iov_len; /* align to start of pages */ } /* process pages array */ base -= buf->head->iov_len; if (todo && base < buf->page_len) { unsigned int avail_page; avail_here = min(todo, buf->page_len - base); todo -= avail_here; base += buf->page_base; ppages = buf->pages + (base >> PAGE_SHIFT); base &= ~PAGE_MASK; avail_page = min_t(unsigned int, PAGE_SIZE - base, avail_here); c = kmap(*ppages) + base; while (avail_here) { avail_here -= avail_page; if (copied || avail_page < desc->elem_size) { unsigned int l = min(avail_page, desc->elem_size - copied); if (!elem) { elem = kmalloc(desc->elem_size, GFP_KERNEL); err = -ENOMEM; if (!elem) goto out; } if (encode) { if (!copied) { err = desc->xcode(desc, elem); if (err) goto out; } memcpy(c, elem + copied, l); copied += l; if (copied == desc->elem_size) copied = 0; } else { memcpy(elem + copied, c, l); copied += l; if (copied == desc->elem_size) { err = desc->xcode(desc, elem); if (err) goto out; copied = 0; } } avail_page -= l; c += l; } while (avail_page >= desc->elem_size) { err = desc->xcode(desc, c); if (err) goto out; c += desc->elem_size; avail_page -= desc->elem_size; } if (avail_page) { unsigned int l = min(avail_page, desc->elem_size - copied); if (!elem) { elem = kmalloc(desc->elem_size, GFP_KERNEL); err = -ENOMEM; if (!elem) goto out; } if (encode) { if (!copied) { err = desc->xcode(desc, elem); if (err) goto out; } memcpy(c, elem + copied, l); copied += l; if (copied == desc->elem_size) copied = 0; } else { memcpy(elem + copied, c, l); copied += l; if (copied == desc->elem_size) { err = desc->xcode(desc, elem); if (err) goto out; copied = 0; } } } if (avail_here) { kunmap(*ppages); ppages++; c = kmap(*ppages); } avail_page = min(avail_here, (unsigned int) PAGE_SIZE); } base = buf->page_len; /* align to start of tail */ } /* process tail */ base -= buf->page_len; if (todo) { c = buf->tail->iov_base + base; if (copied) { unsigned int l = desc->elem_size - copied; if (encode) memcpy(c, elem + copied, l); else { memcpy(elem + copied, c, l); err = desc->xcode(desc, elem); if (err) goto out; } todo -= l; c += l; } while (todo) { err = desc->xcode(desc, c); if (err) goto out; c += desc->elem_size; todo -= desc->elem_size; } } err = 0; out: kfree(elem); if (ppages) kunmap(*ppages); return err; } int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc) { if (base >= buf->len) return -EINVAL; return xdr_xcode_array2(buf, base, desc, 0); } EXPORT_SYMBOL_GPL(xdr_decode_array2); int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base, struct xdr_array2_desc *desc) { if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > buf->head->iov_len + buf->page_len + buf->tail->iov_len) return -EINVAL; return xdr_xcode_array2(buf, base, desc, 1); } EXPORT_SYMBOL_GPL(xdr_encode_array2); int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data) { int i, ret = 0; unsigned int page_len, thislen, page_offset; struct scatterlist sg[1]; sg_init_table(sg, 1); if (offset >= buf->head[0].iov_len) { offset -= buf->head[0].iov_len; } else { thislen = buf->head[0].iov_len - offset; if (thislen > len) thislen = len; sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); ret = actor(sg, data); if (ret) goto out; offset = 0; len -= thislen; } if (len == 0) goto out; if (offset >= buf->page_len) { offset -= buf->page_len; } else { page_len = buf->page_len - offset; if (page_len > len) page_len = len; len -= page_len; page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); i = (offset + buf->page_base) >> PAGE_SHIFT; thislen = PAGE_SIZE - page_offset; do { if (thislen > page_len) thislen = page_len; sg_set_page(sg, buf->pages[i], thislen, page_offset); ret = actor(sg, data); if (ret) goto out; page_len -= thislen; i++; page_offset = 0; thislen = PAGE_SIZE; } while (page_len != 0); offset = 0; } if (len == 0) goto out; if (offset < buf->tail[0].iov_len) { thislen = buf->tail[0].iov_len - offset; if (thislen > len) thislen = len; sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); ret = actor(sg, data); len -= thislen; } if (len != 0) ret = -EINVAL; out: return ret; } EXPORT_SYMBOL_GPL(xdr_process_buf); /** * xdr_stream_decode_opaque - Decode variable length opaque * @xdr: pointer to xdr_stream * @ptr: location to store opaque data * @size: size of storage buffer @ptr * * Return values: * On success, returns size of object stored in *@ptr * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE on overflow of storage buffer @ptr */ ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size) { ssize_t ret; void *p; ret = xdr_stream_decode_opaque_inline(xdr, &p, size); if (ret <= 0) return ret; memcpy(ptr, p, ret); return ret; } EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque); /** * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque * @xdr: pointer to xdr_stream * @ptr: location to store pointer to opaque data * @maxlen: maximum acceptable object size * @gfp_flags: GFP mask to use * * Return values: * On success, returns size of object stored in *@ptr * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of the object would exceed @maxlen * %-ENOMEM on memory allocation failure */ ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, size_t maxlen, gfp_t gfp_flags) { ssize_t ret; void *p; ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); if (ret > 0) { *ptr = kmemdup(p, ret, gfp_flags); if (*ptr != NULL) return ret; ret = -ENOMEM; } *ptr = NULL; return ret; } EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup); /** * xdr_stream_decode_string - Decode variable length string * @xdr: pointer to xdr_stream * @str: location to store string * @size: size of storage buffer @str * * Return values: * On success, returns length of NUL-terminated string stored in *@str * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE on overflow of storage buffer @str */ ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size) { ssize_t ret; void *p; ret = xdr_stream_decode_opaque_inline(xdr, &p, size); if (ret > 0) { memcpy(str, p, ret); str[ret] = '\0'; return strlen(str); } *str = '\0'; return ret; } EXPORT_SYMBOL_GPL(xdr_stream_decode_string); /** * xdr_stream_decode_string_dup - Decode and duplicate variable length string * @xdr: pointer to xdr_stream * @str: location to store pointer to string * @maxlen: maximum acceptable string length * @gfp_flags: GFP mask to use * * Return values: * On success, returns length of NUL-terminated string stored in *@ptr * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of the string would exceed @maxlen * %-ENOMEM on memory allocation failure */ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, size_t maxlen, gfp_t gfp_flags) { void *p; ssize_t ret; ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen); if (ret > 0) { char *s = kmemdup_nul(p, ret, gfp_flags); if (s != NULL) { *str = s; return strlen(s); } ret = -ENOMEM; } *str = NULL; return ret; } EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup); /** * xdr_stream_decode_opaque_auth - Decode struct opaque_auth (RFC5531 S8.2) * @xdr: pointer to xdr_stream * @flavor: location to store decoded flavor * @body: location to store decode body * @body_len: location to store length of decoded body * * Return values: * On success, returns the number of buffer bytes consumed * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the decoded size of the body field exceeds 400 octets */ ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor, void **body, unsigned int *body_len) { ssize_t ret, len; len = xdr_stream_decode_u32(xdr, flavor); if (unlikely(len < 0)) return len; ret = xdr_stream_decode_opaque_inline(xdr, body, RPC_MAX_AUTH_SIZE); if (unlikely(ret < 0)) return ret; *body_len = ret; return len + ret; } EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_auth); /** * xdr_stream_encode_opaque_auth - Encode struct opaque_auth (RFC5531 S8.2) * @xdr: pointer to xdr_stream * @flavor: verifier flavor to encode * @body: content of body to encode * @body_len: length of body to encode * * Return values: * On success, returns length in bytes of XDR buffer consumed * %-EBADMSG on XDR buffer overflow * %-EMSGSIZE if the size of @body exceeds 400 octets */ ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor, void *body, unsigned int body_len) { ssize_t ret, len; if (unlikely(body_len > RPC_MAX_AUTH_SIZE)) return -EMSGSIZE; len = xdr_stream_encode_u32(xdr, flavor); if (unlikely(len < 0)) return len; ret = xdr_stream_encode_opaque(xdr, body, body_len); if (unlikely(ret < 0)) return ret; return len + ret; } EXPORT_SYMBOL_GPL(xdr_stream_encode_opaque_auth); |
2 2 2 2 2 2 1 1 1 1 1 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 | // SPDX-License-Identifier: GPL-2.0 /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2022 Intel Corporation * Copyright 2023-2024 NXP */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/iso.h> #include "eir.h" static const struct proto_ops iso_sock_ops; static struct bt_sock_list iso_sk_list = { .lock = __RW_LOCK_UNLOCKED(iso_sk_list.lock) }; /* ---- ISO connections ---- */ struct iso_conn { struct hci_conn *hcon; /* @lock: spinlock protecting changes to iso_conn fields */ spinlock_t lock; struct sock *sk; struct delayed_work timeout_work; struct sk_buff *rx_skb; __u32 rx_len; __u16 tx_sn; struct kref ref; }; #define iso_conn_lock(c) spin_lock(&(c)->lock) #define iso_conn_unlock(c) spin_unlock(&(c)->lock) static void iso_sock_close(struct sock *sk); static void iso_sock_kill(struct sock *sk); /* ----- ISO socket info ----- */ #define iso_pi(sk) ((struct iso_pinfo *)sk) #define EIR_SERVICE_DATA_LENGTH 4 #define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH) #define EIR_BAA_SERVICE_UUID 0x1851 /* iso_pinfo flags values */ enum { BT_SK_BIG_SYNC, BT_SK_PA_SYNC, }; struct iso_pinfo { struct bt_sock bt; bdaddr_t src; __u8 src_type; bdaddr_t dst; __u8 dst_type; __u8 bc_sid; __u8 bc_num_bis; __u8 bc_bis[ISO_MAX_NUM_BIS]; __u16 sync_handle; unsigned long flags; struct bt_iso_qos qos; bool qos_user_set; __u8 base_len; __u8 base[BASE_MAX_LENGTH]; struct iso_conn *conn; }; static struct bt_iso_qos default_qos; static bool check_ucast_qos(struct bt_iso_qos *qos); static bool check_bcast_qos(struct bt_iso_qos *qos); static bool iso_match_sid(struct sock *sk, void *data); static bool iso_match_sync_handle(struct sock *sk, void *data); static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data); static void iso_sock_disconn(struct sock *sk); typedef bool (*iso_sock_match_t)(struct sock *sk, void *data); static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst, enum bt_sock_state state, iso_sock_match_t match, void *data); /* ---- ISO timers ---- */ #define ISO_CONN_TIMEOUT (HZ * 40) #define ISO_DISCONN_TIMEOUT (HZ * 2) static void iso_conn_free(struct kref *ref) { struct iso_conn *conn = container_of(ref, struct iso_conn, ref); BT_DBG("conn %p", conn); if (conn->sk) iso_pi(conn->sk)->conn = NULL; if (conn->hcon) { conn->hcon->iso_data = NULL; hci_conn_drop(conn->hcon); } /* Ensure no more work items will run since hci_conn has been dropped */ disable_delayed_work_sync(&conn->timeout_work); kfree(conn); } static void iso_conn_put(struct iso_conn *conn) { if (!conn) return; BT_DBG("conn %p refcnt %d", conn, kref_read(&conn->ref)); kref_put(&conn->ref, iso_conn_free); } static struct iso_conn *iso_conn_hold_unless_zero(struct iso_conn *conn) { if (!conn) return NULL; BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref)); if (!kref_get_unless_zero(&conn->ref)) return NULL; return conn; } static struct sock *iso_sock_hold(struct iso_conn *conn) { if (!conn || !bt_sock_linked(&iso_sk_list, conn->sk)) return NULL; sock_hold(conn->sk); return conn->sk; } static void iso_sock_timeout(struct work_struct *work) { struct iso_conn *conn = container_of(work, struct iso_conn, timeout_work.work); struct sock *sk; conn = iso_conn_hold_unless_zero(conn); if (!conn) return; iso_conn_lock(conn); sk = iso_sock_hold(conn); iso_conn_unlock(conn); iso_conn_put(conn); if (!sk) return; BT_DBG("sock %p state %d", sk, sk->sk_state); lock_sock(sk); sk->sk_err = ETIMEDOUT; sk->sk_state_change(sk); release_sock(sk); sock_put(sk); } static void iso_sock_set_timer(struct sock *sk, long timeout) { if (!iso_pi(sk)->conn) return; BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); schedule_delayed_work(&iso_pi(sk)->conn->timeout_work, timeout); } static void iso_sock_clear_timer(struct sock *sk) { if (!iso_pi(sk)->conn) return; BT_DBG("sock %p state %d", sk, sk->sk_state); cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); } /* ---- ISO connections ---- */ static struct iso_conn *iso_conn_add(struct hci_conn *hcon) { struct iso_conn *conn = hcon->iso_data; conn = iso_conn_hold_unless_zero(conn); if (conn) { if (!conn->hcon) { iso_conn_lock(conn); conn->hcon = hcon; iso_conn_unlock(conn); } iso_conn_put(conn); return conn; } conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) return NULL; kref_init(&conn->ref); spin_lock_init(&conn->lock); INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout); hcon->iso_data = conn; conn->hcon = hcon; conn->tx_sn = 0; BT_DBG("hcon %p conn %p", hcon, conn); return conn; } /* Delete channel. Must be called on the locked socket. */ static void iso_chan_del(struct sock *sk, int err) { struct iso_conn *conn; struct sock *parent; conn = iso_pi(sk)->conn; iso_pi(sk)->conn = NULL; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { iso_conn_lock(conn); conn->sk = NULL; iso_conn_unlock(conn); iso_conn_put(conn); } sk->sk_state = BT_CLOSED; sk->sk_err = err; parent = bt_sk(sk)->parent; if (parent) { bt_accept_unlink(sk); parent->sk_data_ready(parent); } else { sk->sk_state_change(sk); } sock_set_flag(sk, SOCK_ZAPPED); } static void iso_conn_del(struct hci_conn *hcon, int err) { struct iso_conn *conn = hcon->iso_data; struct sock *sk; conn = iso_conn_hold_unless_zero(conn); if (!conn) return; BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); /* Kill socket */ iso_conn_lock(conn); sk = iso_sock_hold(conn); iso_conn_unlock(conn); iso_conn_put(conn); if (!sk) { iso_conn_put(conn); return; } lock_sock(sk); iso_sock_clear_timer(sk); iso_chan_del(sk, err); release_sock(sk); sock_put(sk); } static int __iso_chan_add(struct iso_conn *conn, struct sock *sk, struct sock *parent) { BT_DBG("conn %p", conn); if (iso_pi(sk)->conn == conn && conn->sk == sk) return 0; if (conn->sk) { BT_ERR("conn->sk already set"); return -EBUSY; } iso_pi(sk)->conn = conn; conn->sk = sk; if (parent) bt_accept_enqueue(parent, sk, true); return 0; } static int iso_chan_add(struct iso_conn *conn, struct sock *sk, struct sock *parent) { int err; iso_conn_lock(conn); err = __iso_chan_add(conn, sk, parent); iso_conn_unlock(conn); return err; } static inline u8 le_addr_type(u8 bdaddr_type) { if (bdaddr_type == BDADDR_LE_PUBLIC) return ADDR_LE_DEV_PUBLIC; else return ADDR_LE_DEV_RANDOM; } static int iso_connect_bis(struct sock *sk) { struct iso_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err; BT_DBG("%pMR (SID 0x%2.2x)", &iso_pi(sk)->src, iso_pi(sk)->bc_sid); hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (!bis_capable(hdev)) { err = -EOPNOTSUPP; goto unlock; } /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { iso_pi(sk)->qos = default_qos; err = -EINVAL; goto unlock; } /* Fail if out PHYs are marked as disabled */ if (!iso_pi(sk)->qos.bcast.out.phy) { err = -EINVAL; goto unlock; } /* Just bind if DEFER_SETUP has been set */ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hcon = hci_bind_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid, &iso_pi(sk)->qos, iso_pi(sk)->base_len, iso_pi(sk)->base); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } else { hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), iso_pi(sk)->bc_sid, &iso_pi(sk)->qos, iso_pi(sk)->base_len, iso_pi(sk)->base); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } /* Update SID if it was not set */ if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) iso_pi(sk)->bc_sid = hcon->sid; } conn = iso_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto unlock; } lock_sock(sk); err = iso_chan_add(conn, sk, NULL); if (err) { release_sock(sk); goto unlock; } /* Update source addr of the socket */ bacpy(&iso_pi(sk)->src, &hcon->src); if (hcon->state == BT_CONNECTED) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECT; } else { sk->sk_state = BT_CONNECT; iso_sock_set_timer(sk, sk->sk_sndtimeo); } release_sock(sk); unlock: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static int iso_connect_cis(struct sock *sk) { struct iso_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err; BT_DBG("%pMR -> %pMR", &iso_pi(sk)->src, &iso_pi(sk)->dst); hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (!cis_central_capable(hdev)) { err = -EOPNOTSUPP; goto unlock; } /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_ucast_qos(&iso_pi(sk)->qos)) { iso_pi(sk)->qos = default_qos; err = -EINVAL; goto unlock; } /* Fail if either PHYs are marked as disabled */ if (!iso_pi(sk)->qos.ucast.in.phy && !iso_pi(sk)->qos.ucast.out.phy) { err = -EINVAL; goto unlock; } /* Just bind if DEFER_SETUP has been set */ if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), &iso_pi(sk)->qos); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } else { hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), &iso_pi(sk)->qos); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } } conn = iso_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto unlock; } lock_sock(sk); err = iso_chan_add(conn, sk, NULL); if (err) { release_sock(sk); goto unlock; } /* Update source addr of the socket */ bacpy(&iso_pi(sk)->src, &hcon->src); if (hcon->state == BT_CONNECTED) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECT; } else { sk->sk_state = BT_CONNECT; iso_sock_set_timer(sk, sk->sk_sndtimeo); } release_sock(sk); unlock: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static struct bt_iso_qos *iso_sock_get_qos(struct sock *sk) { if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2) return &iso_pi(sk)->conn->hcon->iso_qos; return &iso_pi(sk)->qos; } static int iso_send_frame(struct sock *sk, struct sk_buff *skb, const struct sockcm_cookie *sockc) { struct iso_conn *conn = iso_pi(sk)->conn; struct bt_iso_qos *qos = iso_sock_get_qos(sk); struct hci_iso_data_hdr *hdr; int len = 0; BT_DBG("sk %p len %d", sk, skb->len); if (skb->len > qos->ucast.out.sdu) return -EMSGSIZE; len = skb->len; /* Push ISO data header */ hdr = skb_push(skb, HCI_ISO_DATA_HDR_SIZE); hdr->sn = cpu_to_le16(conn->tx_sn++); hdr->slen = cpu_to_le16(hci_iso_data_len_pack(len, HCI_ISO_STATUS_VALID)); if (sk->sk_state == BT_CONNECTED) { hci_setup_tx_timestamp(skb, 1, sockc); hci_send_iso(conn->hcon, skb); } else { len = -ENOTCONN; } return len; } static void iso_recv_frame(struct iso_conn *conn, struct sk_buff *skb) { struct sock *sk; iso_conn_lock(conn); sk = conn->sk; iso_conn_unlock(conn); if (!sk) goto drop; BT_DBG("sk %p len %d", sk, skb->len); if (sk->sk_state != BT_CONNECTED) goto drop; if (!sock_queue_rcv_skb(sk, skb)) return; drop: kfree_skb(skb); } /* -------- Socket interface ---------- */ static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst) { struct sock *sk; sk_for_each(sk, &iso_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (bacmp(&iso_pi(sk)->dst, dst)) continue; if (!bacmp(&iso_pi(sk)->src, src)) return sk; } return NULL; } static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc, __u8 sid) { struct sock *sk; sk_for_each(sk, &iso_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (bacmp(&iso_pi(sk)->src, ba)) continue; if (bacmp(&iso_pi(sk)->dst, bc)) continue; if (iso_pi(sk)->bc_sid == sid) return sk; } return NULL; } /* Find socket in given state: * source bdaddr (Unicast) * destination bdaddr (Broadcast only) * match func - pass NULL to ignore * match func data - pass -1 to ignore * Returns closest match. */ static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst, enum bt_sock_state state, iso_sock_match_t match, void *data) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&iso_sk_list.lock); sk_for_each(sk, &iso_sk_list.head) { if (sk->sk_state != state) continue; /* Match Broadcast destination */ if (bacmp(dst, BDADDR_ANY) && bacmp(&iso_pi(sk)->dst, dst)) continue; /* Use Match function if provided */ if (match && !match(sk, data)) continue; /* Exact match. */ if (!bacmp(&iso_pi(sk)->src, src)) { sock_hold(sk); break; } /* Closest match */ if (!bacmp(&iso_pi(sk)->src, BDADDR_ANY)) { if (sk1) sock_put(sk1); sk1 = sk; sock_hold(sk1); } } if (sk && sk1) sock_put(sk1); read_unlock(&iso_sk_list.lock); return sk ? sk : sk1; } static struct sock *iso_get_sock_big(struct sock *match_sk, bdaddr_t *src, bdaddr_t *dst, uint8_t big) { struct sock *sk = NULL; read_lock(&iso_sk_list.lock); sk_for_each(sk, &iso_sk_list.head) { if (match_sk == sk) continue; /* Look for sockets that have already been * connected to the BIG */ if (sk->sk_state != BT_CONNECTED && sk->sk_state != BT_CONNECT) continue; /* Match Broadcast destination */ if (bacmp(&iso_pi(sk)->dst, dst)) continue; /* Match BIG handle */ if (iso_pi(sk)->qos.bcast.big != big) continue; /* Match source address */ if (bacmp(&iso_pi(sk)->src, src)) continue; sock_hold(sk); break; } read_unlock(&iso_sk_list.lock); return sk; } static void iso_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); iso_conn_put(iso_pi(sk)->conn); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } static void iso_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted channels */ while ((sk = bt_accept_dequeue(parent, NULL))) { iso_sock_close(sk); iso_sock_kill(sk); } /* If listening socket has a hcon, properly disconnect it */ if (iso_pi(parent)->conn && iso_pi(parent)->conn->hcon) { iso_sock_disconn(parent); return; } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void iso_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || sock_flag(sk, SOCK_DEAD)) return; BT_DBG("sk %p state %d", sk, sk->sk_state); /* Kill poor orphan */ bt_sock_unlink(&iso_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void iso_sock_disconn(struct sock *sk) { struct sock *bis_sk; struct hci_conn *hcon = iso_pi(sk)->conn->hcon; if (test_bit(HCI_CONN_BIG_CREATED, &hcon->flags)) { bis_sk = iso_get_sock_big(sk, &iso_pi(sk)->src, &iso_pi(sk)->dst, iso_pi(sk)->qos.bcast.big); /* If there are any other connected sockets for the * same BIG, just delete the sk and leave the bis * hcon active, in case later rebinding is needed. */ if (bis_sk) { hcon->state = BT_OPEN; hcon->iso_data = NULL; iso_pi(sk)->conn->hcon = NULL; iso_sock_clear_timer(sk); iso_chan_del(sk, bt_to_errno(hcon->abort_reason)); sock_put(bis_sk); return; } } sk->sk_state = BT_DISCONN; iso_conn_lock(iso_pi(sk)->conn); hci_conn_drop(iso_pi(sk)->conn->hcon); iso_pi(sk)->conn->hcon = NULL; iso_conn_unlock(iso_pi(sk)->conn); } static void __iso_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: iso_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECTED: case BT_CONFIG: if (iso_pi(sk)->conn->hcon) iso_sock_disconn(sk); else iso_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: if (iso_pi(sk)->conn->hcon && (test_bit(HCI_CONN_PA_SYNC, &iso_pi(sk)->conn->hcon->flags) || test_bit(HCI_CONN_PA_SYNC_FAILED, &iso_pi(sk)->conn->hcon->flags))) iso_sock_disconn(sk); else iso_chan_del(sk, ECONNRESET); break; case BT_DISCONN: iso_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Must be called on unlocked socket. */ static void iso_sock_close(struct sock *sk) { iso_sock_clear_timer(sk); lock_sock(sk); __iso_sock_close(sk); release_sock(sk); iso_sock_kill(sk); } static void iso_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); } } static struct proto iso_proto = { .name = "ISO", .owner = THIS_MODULE, .obj_size = sizeof(struct iso_pinfo) }; #define DEFAULT_IO_QOS \ { \ .interval = 10000u, \ .latency = 10u, \ .sdu = 40u, \ .phy = BT_ISO_PHY_2M, \ .rtn = 2u, \ } static struct bt_iso_qos default_qos = { .bcast = { .big = BT_ISO_QOS_BIG_UNSET, .bis = BT_ISO_QOS_BIS_UNSET, .sync_factor = 0x01, .packing = 0x00, .framing = 0x00, .in = DEFAULT_IO_QOS, .out = DEFAULT_IO_QOS, .encryption = 0x00, .bcode = {0x00}, .options = 0x00, .skip = 0x0000, .sync_timeout = BT_ISO_SYNC_TIMEOUT, .sync_cte_type = 0x00, .mse = 0x00, .timeout = BT_ISO_SYNC_TIMEOUT, }, }; static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; sk = bt_sock_alloc(net, sock, &iso_proto, proto, prio, kern); if (!sk) return NULL; sk->sk_destruct = iso_sock_destruct; sk->sk_sndtimeo = ISO_CONN_TIMEOUT; /* Set address type as public as default src address is BDADDR_ANY */ iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; iso_pi(sk)->qos = default_qos; iso_pi(sk)->sync_handle = -1; bt_sock_link(&iso_sk_list, sk); return sk; } static int iso_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &iso_sock_ops; sk = iso_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; iso_sock_init(sk, NULL); return 0; } static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int i; BT_DBG("sk %p bc_sid %u bc_num_bis %u", sk, sa->iso_bc->bc_sid, sa->iso_bc->bc_num_bis); if (addr_len != sizeof(*sa) + sizeof(*sa->iso_bc)) return -EINVAL; bacpy(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr); /* Check if the address type is of LE type */ if (!bdaddr_type_is_le(sa->iso_bc->bc_bdaddr_type)) return -EINVAL; iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; if (sa->iso_bc->bc_sid > 0x0f && sa->iso_bc->bc_sid != HCI_SID_INVALID) return -EINVAL; iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; if (sa->iso_bc->bc_num_bis > ISO_MAX_NUM_BIS) return -EINVAL; iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; for (i = 0; i < iso_pi(sk)->bc_num_bis; i++) if (sa->iso_bc->bc_bis[i] < 0x01 || sa->iso_bc->bc_bis[i] > 0x1f) return -EINVAL; memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, iso_pi(sk)->bc_num_bis); return 0; } static int iso_sock_bind_pa_sk(struct sock *sk, struct sockaddr_iso *sa, int addr_len) { int err = 0; if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } if (addr_len != sizeof(*sa) + sizeof(*sa->iso_bc)) { err = -EINVAL; goto done; } if (sa->iso_bc->bc_num_bis > ISO_MAX_NUM_BIS) { err = -EINVAL; goto done; } iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; for (int i = 0; i < iso_pi(sk)->bc_num_bis; i++) if (sa->iso_bc->bc_bis[i] < 0x01 || sa->iso_bc->bc_bis[i] > 0x1f) { err = -EINVAL; goto done; } memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, iso_pi(sk)->bc_num_bis); done: return err; } static int iso_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p %pMR type %u", sk, &sa->iso_bdaddr, sa->iso_bdaddr_type); if (!addr || addr_len < sizeof(struct sockaddr_iso) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); /* Allow the user to bind a PA sync socket to a number * of BISes to sync to. */ if ((sk->sk_state == BT_CONNECT2 || sk->sk_state == BT_CONNECTED) && test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { err = iso_sock_bind_pa_sk(sk, sa, addr_len); goto done; } if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } /* Check if the address type is of LE type */ if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) { err = -EINVAL; goto done; } bacpy(&iso_pi(sk)->src, &sa->iso_bdaddr); iso_pi(sk)->src_type = sa->iso_bdaddr_type; /* Check for Broadcast address */ if (addr_len > sizeof(*sa)) { err = iso_sock_bind_bc(sock, addr, addr_len); if (err) goto done; } sk->sk_state = BT_BOUND; done: release_sock(sk); return err; } static int iso_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int err; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_iso) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) return -EBADFD; if (sk->sk_type != SOCK_SEQPACKET) return -EINVAL; /* Check if the address type is of LE type */ if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) return -EINVAL; lock_sock(sk); bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr); iso_pi(sk)->dst_type = sa->iso_bdaddr_type; release_sock(sk); if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) err = iso_connect_cis(sk); else err = iso_connect_bis(sk); if (err) return err; lock_sock(sk); if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); } release_sock(sk); return err; } static int iso_listen_bis(struct sock *sk) { struct hci_dev *hdev; int err = 0; struct iso_conn *conn; struct hci_conn *hcon; BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid); write_lock(&iso_sk_list.lock); if (__iso_get_sock_listen_by_sid(&iso_pi(sk)->src, &iso_pi(sk)->dst, iso_pi(sk)->bc_sid)) err = -EADDRINUSE; write_unlock(&iso_sk_list.lock); if (err) return err; hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); lock_sock(sk); /* Fail if user set invalid QoS */ if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { iso_pi(sk)->qos = default_qos; err = -EINVAL; goto unlock; } hcon = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), iso_pi(sk)->bc_sid, &iso_pi(sk)->qos); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto unlock; } conn = iso_conn_add(hcon); if (!conn) { hci_conn_drop(hcon); err = -ENOMEM; goto unlock; } err = iso_chan_add(conn, sk, NULL); if (err) { hci_conn_drop(hcon); goto unlock; } unlock: release_sock(sk); hci_dev_unlock(hdev); hci_dev_put(hdev); return err; } static int iso_listen_cis(struct sock *sk) { int err = 0; BT_DBG("%pMR", &iso_pi(sk)->src); write_lock(&iso_sk_list.lock); if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst)) err = -EADDRINUSE; write_unlock(&iso_sk_list.lock); return err; } static int iso_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); sock_hold(sk); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_SEQPACKET) { err = -EINVAL; goto done; } if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { err = iso_listen_cis(sk); } else { /* Drop sock lock to avoid potential * deadlock with the hdev lock. */ release_sock(sk); err = iso_listen_bis(sk); lock_sock(sk); } if (err) goto done; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); sock_put(sk); return err; } static int iso_sock_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *ch; long timeo; int err = 0; /* Use explicit nested locking to avoid lockdep warnings generated * because the parent socket and the child socket are locked on the * same thread. */ lock_sock_nested(sk, SINGLE_DEPTH_NESTING); timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } ch = bt_accept_dequeue(sk, newsock); if (ch) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", ch); /* A Broadcast Sink might require BIG sync to be terminated * and re-established multiple times, while keeping the same * PA sync handle active. To allow this, once all BIS * connections have been accepted on a PA sync parent socket, * "reset" socket state, to allow future BIG re-sync procedures. */ if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { /* Iterate through the list of bound BIS indices * and clear each BIS as they are accepted by the * user space, one by one. */ for (int i = 0; i < iso_pi(sk)->bc_num_bis; i++) { if (iso_pi(sk)->bc_bis[i] > 0) { iso_pi(sk)->bc_bis[i] = 0; iso_pi(sk)->bc_num_bis--; break; } } if (iso_pi(sk)->bc_num_bis == 0) { /* Once the last BIS was accepted, reset parent * socket parameters to mark that the listening * process for BIS connections has been completed: * * 1. Reset the DEFER setup flag on the parent sk. * 2. Clear the flag marking that the BIG create * sync command is pending. * 3. Transition socket state from BT_LISTEN to * BT_CONNECTED. */ set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); clear_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags); sk->sk_state = BT_CONNECTED; } } done: release_sock(sk); return err; } static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; struct sock *sk = sock->sk; int len = sizeof(struct sockaddr_iso); BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; if (peer) { struct hci_conn *hcon = iso_pi(sk)->conn ? iso_pi(sk)->conn->hcon : NULL; bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); sa->iso_bdaddr_type = iso_pi(sk)->dst_type; if (hcon && hcon->type == BIS_LINK) { sa->iso_bc->bc_sid = iso_pi(sk)->bc_sid; sa->iso_bc->bc_num_bis = iso_pi(sk)->bc_num_bis; memcpy(sa->iso_bc->bc_bis, iso_pi(sk)->bc_bis, ISO_MAX_NUM_BIS); len += sizeof(struct sockaddr_iso_bc); } } else { bacpy(&sa->iso_bdaddr, &iso_pi(sk)->src); sa->iso_bdaddr_type = iso_pi(sk)->src_type; } return len; } static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb, **frag; struct sockcm_cookie sockc; size_t mtu; int err; BT_DBG("sock %p, sk %p", sock, sk); err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; hci_sockcm_init(&sockc, sk); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (err) return err; } lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { release_sock(sk); return -ENOTCONN; } mtu = iso_pi(sk)->conn->hcon->mtu; release_sock(sk); skb = bt_skb_sendmsg(sk, msg, len, mtu, HCI_ISO_DATA_HDR_SIZE, 0); if (IS_ERR(skb)) return PTR_ERR(skb); len -= skb->len; BT_DBG("skb %p len %d", sk, skb->len); /* Continuation fragments */ frag = &skb_shinfo(skb)->frag_list; while (len) { struct sk_buff *tmp; tmp = bt_skb_sendmsg(sk, msg, len, mtu, 0, 0); if (IS_ERR(tmp)) { kfree_skb(skb); return PTR_ERR(tmp); } *frag = tmp; len -= tmp->len; skb->len += tmp->len; skb->data_len += tmp->len; BT_DBG("frag %p len %d", *frag, tmp->len); frag = &(*frag)->next; } lock_sock(sk); if (sk->sk_state == BT_CONNECTED) err = iso_send_frame(sk, skb, &sockc); else err = -ENOTCONN; release_sock(sk); if (err < 0) kfree_skb(skb); return err; } static void iso_conn_defer_accept(struct hci_conn *conn) { struct hci_cp_le_accept_cis cp; struct hci_dev *hdev = conn->hdev; BT_DBG("conn %p", conn); conn->state = BT_CONFIG; cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); } static void iso_conn_big_sync(struct sock *sk) { int err; struct hci_dev *hdev; hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, iso_pi(sk)->src_type); if (!hdev) return; /* hci_le_big_create_sync requires hdev lock to be held, since * it enqueues the HCI LE BIG Create Sync command via * hci_cmd_sync_queue_once, which checks hdev flags that might * change. */ hci_dev_lock(hdev); lock_sock(sk); if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon, &iso_pi(sk)->qos, iso_pi(sk)->sync_handle, iso_pi(sk)->bc_num_bis, iso_pi(sk)->bc_bis); if (err) bt_dev_err(hdev, "hci_big_create_sync: %d", err); } release_sock(sk); hci_dev_unlock(hdev); } static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct iso_pinfo *pi = iso_pi(sk); bool early_ret = false; int err = 0; BT_DBG("sk %p", sk); if (unlikely(flags & MSG_ERRQUEUE)) return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, BT_SCM_ERROR); if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sock_hold(sk); lock_sock(sk); switch (sk->sk_state) { case BT_CONNECT2: if (test_bit(BT_SK_PA_SYNC, &pi->flags)) { release_sock(sk); iso_conn_big_sync(sk); lock_sock(sk); sk->sk_state = BT_LISTEN; } else { iso_conn_defer_accept(pi->conn->hcon); sk->sk_state = BT_CONFIG; } early_ret = true; break; case BT_CONNECTED: if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) { release_sock(sk); iso_conn_big_sync(sk); lock_sock(sk); sk->sk_state = BT_LISTEN; early_ret = true; } break; case BT_CONNECT: release_sock(sk); err = iso_connect_cis(sk); lock_sock(sk); early_ret = true; break; default: break; } release_sock(sk); sock_put(sk); if (early_ret) return err; } return bt_sock_recvmsg(sock, msg, len, flags); } static bool check_io_qos(struct bt_iso_io_qos *qos) { /* If no PHY is enable SDU must be 0 */ if (!qos->phy && qos->sdu) return false; if (qos->interval && (qos->interval < 0xff || qos->interval > 0xfffff)) return false; if (qos->latency && (qos->latency < 0x05 || qos->latency > 0xfa0)) return false; if (qos->phy > BT_ISO_PHY_ANY) return false; return true; } static bool check_ucast_qos(struct bt_iso_qos *qos) { if (qos->ucast.cig > 0xef && qos->ucast.cig != BT_ISO_QOS_CIG_UNSET) return false; if (qos->ucast.cis > 0xef && qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) return false; if (qos->ucast.sca > 0x07) return false; if (qos->ucast.packing > 0x01) return false; if (qos->ucast.framing > 0x01) return false; if (!check_io_qos(&qos->ucast.in)) return false; if (!check_io_qos(&qos->ucast.out)) return false; return true; } static bool check_bcast_qos(struct bt_iso_qos *qos) { if (!qos->bcast.sync_factor) qos->bcast.sync_factor = 0x01; if (qos->bcast.packing > 0x01) return false; if (qos->bcast.framing > 0x01) return false; if (!check_io_qos(&qos->bcast.in)) return false; if (!check_io_qos(&qos->bcast.out)) return false; if (qos->bcast.encryption > 0x01) return false; if (qos->bcast.options > 0x07) return false; if (qos->bcast.skip > 0x01f3) return false; if (!qos->bcast.sync_timeout) qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT; if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000) return false; if (qos->bcast.sync_cte_type > 0x1f) return false; if (qos->bcast.mse > 0x1f) return false; if (!qos->bcast.timeout) qos->bcast.sync_timeout = BT_ISO_SYNC_TIMEOUT; if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000) return false; return true; } static int iso_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; struct bt_iso_qos qos = default_qos; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; case BT_PKT_STATUS: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) set_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); else clear_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags); break; case BT_ISO_QOS: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2 && (!test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags) || sk->sk_state != BT_CONNECTED)) { err = -EINVAL; break; } err = copy_safe_from_sockptr(&qos, sizeof(qos), optval, optlen); if (err) break; iso_pi(sk)->qos = qos; iso_pi(sk)->qos_user_set = true; break; case BT_ISO_BASE: if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECT2) { err = -EINVAL; break; } if (optlen > sizeof(iso_pi(sk)->base)) { err = -EINVAL; break; } err = copy_safe_from_sockptr(iso_pi(sk)->base, optlen, optval, optlen); if (err) break; iso_pi(sk)->base_len = optlen; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int iso_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int len, err = 0; struct bt_iso_qos *qos; u8 base_len; u8 *base; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_DEFER_SETUP: if (sk->sk_state == BT_CONNECTED) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *)optval)) err = -EFAULT; break; case BT_PKT_STATUS: if (put_user(test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags), (int __user *)optval)) err = -EFAULT; break; case BT_ISO_QOS: qos = iso_sock_get_qos(sk); len = min_t(unsigned int, len, sizeof(*qos)); if (copy_to_user(optval, qos, len)) err = -EFAULT; break; case BT_ISO_BASE: if (sk->sk_state == BT_CONNECTED && !bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) { base_len = iso_pi(sk)->conn->hcon->le_per_adv_data_len; base = iso_pi(sk)->conn->hcon->le_per_adv_data; } else { base_len = iso_pi(sk)->base_len; base = iso_pi(sk)->base; } len = min_t(unsigned int, len, base_len); if (copy_to_user(optval, base, len)) err = -EFAULT; if (put_user(len, optlen)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int iso_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p, how %d", sock, sk, how); if (!sk) return 0; sock_hold(sk); lock_sock(sk); switch (how) { case SHUT_RD: if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; sk->sk_shutdown |= RCV_SHUTDOWN; break; case SHUT_WR: if (sk->sk_shutdown & SEND_SHUTDOWN) goto unlock; sk->sk_shutdown |= SEND_SHUTDOWN; break; case SHUT_RDWR: if (sk->sk_shutdown & SHUTDOWN_MASK) goto unlock; sk->sk_shutdown |= SHUTDOWN_MASK; break; } iso_sock_clear_timer(sk); __iso_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); unlock: release_sock(sk); sock_put(sk); return err; } static int iso_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; iso_sock_close(sk); if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && !(current->flags & PF_EXITING)) { lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); release_sock(sk); } sock_orphan(sk); iso_sock_kill(sk); return err; } static void iso_sock_ready(struct sock *sk) { BT_DBG("sk %p", sk); if (!sk) return; lock_sock(sk); iso_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); release_sock(sk); } static bool iso_match_big(struct sock *sk, void *data) { struct hci_evt_le_big_sync_estabilished *ev = data; return ev->handle == iso_pi(sk)->qos.bcast.big; } static bool iso_match_big_hcon(struct sock *sk, void *data) { struct hci_conn *hcon = data; return hcon->iso_qos.bcast.big == iso_pi(sk)->qos.bcast.big; } static bool iso_match_pa_sync_flag(struct sock *sk, void *data) { return test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } static void iso_conn_ready(struct iso_conn *conn) { struct sock *parent = NULL; struct sock *sk = conn->sk; struct hci_ev_le_big_sync_estabilished *ev = NULL; struct hci_ev_le_pa_sync_established *ev2 = NULL; struct hci_ev_le_per_adv_report *ev3 = NULL; struct hci_conn *hcon; BT_DBG("conn %p", conn); if (sk) { iso_sock_ready(conn->sk); } else { hcon = conn->hcon; if (!hcon) return; if (test_bit(HCI_CONN_BIG_SYNC, &hcon->flags)) { /* A BIS slave hcon is notified to the ISO layer * after the Command Complete for the LE Setup * ISO Data Path command is received. Get the * parent socket that matches the hcon BIG handle. */ parent = iso_get_sock(&hcon->src, &hcon->dst, BT_LISTEN, iso_match_big_hcon, hcon); } else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { ev = hci_recv_event_data(hcon->hdev, HCI_EVT_LE_BIG_SYNC_ESTABLISHED); /* Get reference to PA sync parent socket, if it exists */ parent = iso_get_sock(&hcon->src, &hcon->dst, BT_LISTEN, iso_match_pa_sync_flag, NULL); if (!parent && ev) parent = iso_get_sock(&hcon->src, &hcon->dst, BT_LISTEN, iso_match_big, ev); } else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { ev2 = hci_recv_event_data(hcon->hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); if (ev2) parent = iso_get_sock(&hcon->src, &hcon->dst, BT_LISTEN, iso_match_sid, ev2); } else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { ev3 = hci_recv_event_data(hcon->hdev, HCI_EV_LE_PER_ADV_REPORT); if (ev3) parent = iso_get_sock(&hcon->src, &hcon->dst, BT_LISTEN, iso_match_sync_handle_pa_report, ev3); } if (!parent) parent = iso_get_sock(&hcon->src, BDADDR_ANY, BT_LISTEN, NULL, NULL); if (!parent) return; lock_sock(parent); sk = iso_sock_alloc(sock_net(parent), NULL, BTPROTO_ISO, GFP_ATOMIC, 0); if (!sk) { release_sock(parent); return; } iso_sock_init(sk, parent); bacpy(&iso_pi(sk)->src, &hcon->src); /* Convert from HCI to three-value type */ if (hcon->src_type == ADDR_LE_DEV_PUBLIC) iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; else iso_pi(sk)->src_type = BDADDR_LE_RANDOM; /* If hcon has no destination address (BDADDR_ANY) it means it * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED or * HCI_EV_LE_PA_SYNC_ESTABLISHED so we need to initialize using * the parent socket destination address. */ if (!bacmp(&hcon->dst, BDADDR_ANY)) { bacpy(&hcon->dst, &iso_pi(parent)->dst); hcon->dst_type = iso_pi(parent)->dst_type; } if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { iso_pi(sk)->qos = iso_pi(parent)->qos; hcon->iso_qos = iso_pi(sk)->qos; iso_pi(sk)->bc_sid = iso_pi(parent)->bc_sid; iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis; memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS); set_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags); } bacpy(&iso_pi(sk)->dst, &hcon->dst); iso_pi(sk)->dst_type = hcon->dst_type; iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); iso_pi(sk)->base_len = iso_pi(parent)->base_len; hci_conn_hold(hcon); iso_chan_add(conn, sk, parent); if ((ev && ((struct hci_evt_le_big_sync_estabilished *)ev)->status) || (ev2 && ev2->status)) { /* Trigger error signal on child socket */ sk->sk_err = ECONNREFUSED; sk->sk_error_report(sk); } if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent); release_sock(parent); sock_put(parent); } } static bool iso_match_sid(struct sock *sk, void *data) { struct hci_ev_le_pa_sync_established *ev = data; if (iso_pi(sk)->bc_sid == HCI_SID_INVALID) return true; return ev->sid == iso_pi(sk)->bc_sid; } static bool iso_match_sync_handle(struct sock *sk, void *data) { struct hci_evt_le_big_info_adv_report *ev = data; return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; } static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data) { struct hci_ev_le_per_adv_report *ev = data; return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; } /* ----- ISO interface with lower layer (HCI) ----- */ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct hci_ev_le_pa_sync_established *ev1; struct hci_evt_le_big_info_adv_report *ev2; struct hci_ev_le_per_adv_report *ev3; struct sock *sk; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); /* Broadcast receiver requires handling of some events before it can * proceed to establishing a BIG sync: * * 1. HCI_EV_LE_PA_SYNC_ESTABLISHED: The socket may specify a specific * SID to listen to and once sync is estabilished its handle needs to * be stored in iso_pi(sk)->sync_handle so it can be matched once * receiving the BIG Info. * 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a * a BIG Info it attempts to check if there any listening socket with * the same sync_handle and if it does then attempt to create a sync. * 3. HCI_EV_LE_PER_ADV_REPORT: When a PA report is received, it is stored * in iso_pi(sk)->base so it can be passed up to user, in the case of a * broadcast sink. */ ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); if (ev1) { sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN, iso_match_sid, ev1); if (sk && !ev1->status) { iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); iso_pi(sk)->bc_sid = ev1->sid; } goto done; } ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT); if (ev2) { /* Check if BIGInfo report has already been handled */ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECTED, iso_match_sync_handle, ev2); if (sk) { sock_put(sk); sk = NULL; goto done; } /* Try to get PA sync socket, if it exists */ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECT2, iso_match_sync_handle, ev2); if (!sk) sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN, iso_match_sync_handle, ev2); if (sk) { int err; struct hci_conn *hcon = iso_pi(sk)->conn->hcon; iso_pi(sk)->qos.bcast.encryption = ev2->encryption; if (ev2->num_bis < iso_pi(sk)->bc_num_bis) iso_pi(sk)->bc_num_bis = ev2->num_bis; if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { err = hci_conn_big_create_sync(hdev, hcon, &iso_pi(sk)->qos, iso_pi(sk)->sync_handle, iso_pi(sk)->bc_num_bis, iso_pi(sk)->bc_bis); if (err) { bt_dev_err(hdev, "hci_le_big_create_sync: %d", err); sock_put(sk); sk = NULL; } } } goto done; } ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT); if (ev3) { size_t base_len = 0; u8 *base; struct hci_conn *hcon; sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN, iso_match_sync_handle_pa_report, ev3); if (!sk) goto done; hcon = iso_pi(sk)->conn->hcon; if (!hcon) goto done; if (ev3->data_status == LE_PA_DATA_TRUNCATED) { /* The controller was unable to retrieve PA data. */ memset(hcon->le_per_adv_data, 0, HCI_MAX_PER_AD_TOT_LEN); hcon->le_per_adv_data_len = 0; hcon->le_per_adv_data_offset = 0; goto done; } if (hcon->le_per_adv_data_offset + ev3->length > HCI_MAX_PER_AD_TOT_LEN) goto done; memcpy(hcon->le_per_adv_data + hcon->le_per_adv_data_offset, ev3->data, ev3->length); hcon->le_per_adv_data_offset += ev3->length; if (ev3->data_status == LE_PA_DATA_COMPLETE) { /* All PA data has been received. */ hcon->le_per_adv_data_len = hcon->le_per_adv_data_offset; hcon->le_per_adv_data_offset = 0; /* Extract BASE */ base = eir_get_service_data(hcon->le_per_adv_data, hcon->le_per_adv_data_len, EIR_BAA_SERVICE_UUID, &base_len); if (!base || base_len > BASE_MAX_LENGTH) goto done; memcpy(iso_pi(sk)->base, base, base_len); iso_pi(sk)->base_len = base_len; } else { /* This is a PA data fragment. Keep pa_data_len set to 0 * until all data has been reassembled. */ hcon->le_per_adv_data_len = 0; } } else { sk = iso_get_sock(&hdev->bdaddr, BDADDR_ANY, BT_LISTEN, NULL, NULL); } done: if (!sk) return 0; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; sock_put(sk); return HCI_LM_ACCEPT; } static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != CIS_LINK && hcon->type != BIS_LINK) { if (hcon->type != LE_LINK) return; /* Check if LE link has failed */ if (status) { struct hci_link *link, *t; list_for_each_entry_safe(link, t, &hcon->link_list, list) iso_conn_del(link->conn, bt_to_errno(status)); return; } /* Create CIS if pending */ hci_le_create_cis_pending(hcon->hdev); return; } BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); /* Similar to the success case, if HCI_CONN_BIG_SYNC_FAILED or * HCI_CONN_PA_SYNC_FAILED is set, queue the failed connection * into the accept queue of the listening socket and wake up * userspace, to inform the user about the event. */ if (!status || test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags) || test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) { struct iso_conn *conn; conn = iso_conn_add(hcon); if (conn) iso_conn_ready(conn); } else { iso_conn_del(hcon, bt_to_errno(status)); } } static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason) { if (hcon->type != CIS_LINK && hcon->type != BIS_LINK) return; BT_DBG("hcon %p reason %d", hcon, reason); iso_conn_del(hcon, bt_to_errno(reason)); } void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) { struct iso_conn *conn = hcon->iso_data; __u16 pb, ts, len; if (!conn) goto drop; pb = hci_iso_flags_pb(flags); ts = hci_iso_flags_ts(flags); BT_DBG("conn %p len %d pb 0x%x ts 0x%x", conn, skb->len, pb, ts); switch (pb) { case ISO_START: case ISO_SINGLE: if (conn->rx_len) { BT_ERR("Unexpected start frame (len %d)", skb->len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; } if (ts) { struct hci_iso_ts_data_hdr *hdr; /* TODO: add timestamp to the packet? */ hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE); if (!hdr) { BT_ERR("Frame is too short (len %d)", skb->len); goto drop; } len = __le16_to_cpu(hdr->slen); } else { struct hci_iso_data_hdr *hdr; hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE); if (!hdr) { BT_ERR("Frame is too short (len %d)", skb->len); goto drop; } len = __le16_to_cpu(hdr->slen); } flags = hci_iso_data_flags(len); len = hci_iso_data_len(len); BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x", len, skb->len, flags); if (len == skb->len) { /* Complete frame received */ hci_skb_pkt_status(skb) = flags & 0x03; iso_recv_frame(conn, skb); return; } if (pb == ISO_SINGLE) { BT_ERR("Frame malformed (len %d, expected len %d)", skb->len, len); goto drop; } if (skb->len > len) { BT_ERR("Frame is too long (len %d, expected len %d)", skb->len, len); goto drop; } /* Allocate skb for the complete frame (with header) */ conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); if (!conn->rx_skb) goto drop; hci_skb_pkt_status(conn->rx_skb) = flags & 0x03; skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len = len - skb->len; break; case ISO_CONT: BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); if (!conn->rx_len) { BT_ERR("Unexpected continuation frame (len %d)", skb->len); goto drop; } if (skb->len > conn->rx_len) { BT_ERR("Fragment is too long (len %d, expected %d)", skb->len, conn->rx_len); kfree_skb(conn->rx_skb); conn->rx_skb = NULL; conn->rx_len = 0; goto drop; } skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; return; case ISO_END: skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), skb->len); conn->rx_len -= skb->len; if (!conn->rx_len) { struct sk_buff *rx_skb = conn->rx_skb; /* Complete frame received. iso_recv_frame * takes ownership of the skb so set the global * rx_skb pointer to NULL first. */ conn->rx_skb = NULL; iso_recv_frame(conn, rx_skb); } break; } drop: kfree_skb(skb); } static struct hci_cb iso_cb = { .name = "ISO", .connect_cfm = iso_connect_cfm, .disconn_cfm = iso_disconn_cfm, }; static int iso_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&iso_sk_list.lock); sk_for_each(sk, &iso_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &iso_pi(sk)->src, &iso_pi(sk)->dst, sk->sk_state); } read_unlock(&iso_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(iso_debugfs); static struct dentry *iso_debugfs; static const struct proto_ops iso_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = iso_sock_release, .bind = iso_sock_bind, .connect = iso_sock_connect, .listen = iso_sock_listen, .accept = iso_sock_accept, .getname = iso_sock_getname, .sendmsg = iso_sock_sendmsg, .recvmsg = iso_sock_recvmsg, .poll = bt_sock_poll, .ioctl = bt_sock_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = iso_sock_shutdown, .setsockopt = iso_sock_setsockopt, .getsockopt = iso_sock_getsockopt }; static const struct net_proto_family iso_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = iso_sock_create, }; static bool iso_inited; bool iso_enabled(void) { return iso_inited; } int iso_init(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr)); if (iso_inited) return -EALREADY; err = proto_register(&iso_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_ISO, &iso_sock_family_ops); if (err < 0) { BT_ERR("ISO socket registration failed"); goto error; } err = bt_procfs_init(&init_net, "iso", &iso_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create ISO proc file"); bt_sock_unregister(BTPROTO_ISO); goto error; } BT_INFO("ISO socket layer initialized"); hci_register_cb(&iso_cb); if (!IS_ERR_OR_NULL(bt_debugfs)) iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs, NULL, &iso_debugfs_fops); iso_inited = true; return 0; error: proto_unregister(&iso_proto); return err; } int iso_exit(void) { if (!iso_inited) return -EALREADY; bt_procfs_cleanup(&init_net, "iso"); debugfs_remove(iso_debugfs); iso_debugfs = NULL; hci_unregister_cb(&iso_cb); bt_sock_unregister(BTPROTO_ISO); proto_unregister(&iso_proto); iso_inited = false; return 0; } |
1399 1399 8 8 8 4 3 2 1 1 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <linux/bpf.h> #include <linux/btf.h> #include <linux/err.h> #include "linux/filter.h" #include <linux/btf_ids.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include "range_tree.h" /* * bpf_arena is a sparsely populated shared memory region between bpf program and * user space process. * * For example on x86-64 the values could be: * user_vm_start 7f7d26200000 // picked by mmap() * kern_vm_start ffffc90001e69000 // picked by get_vm_area() * For user space all pointers within the arena are normal 8-byte addresses. * In this example 7f7d26200000 is the address of the first page (pgoff=0). * The bpf program will access it as: kern_vm_start + lower_32bit_of_user_ptr * (u32)7f7d26200000 -> 26200000 * hence * ffffc90001e69000 + 26200000 == ffffc90028069000 is "pgoff=0" within 4Gb * kernel memory region. * * BPF JITs generate the following code to access arena: * mov eax, eax // eax has lower 32-bit of user pointer * mov word ptr [rax + r12 + off], bx * where r12 == kern_vm_start and off is s16. * Hence allocate 4Gb + GUARD_SZ/2 on each side. * * Initially kernel vm_area and user vma are not populated. * User space can fault-in any address which will insert the page * into kernel and user vma. * bpf program can allocate a page via bpf_arena_alloc_pages() kfunc * which will insert it into kernel vm_area. * The later fault-in from user space will populate that page into user vma. */ /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */ #define GUARD_SZ round_up(1ull << sizeof_field(struct bpf_insn, off) * 8, PAGE_SIZE << 1) #define KERN_VM_SZ (SZ_4G + GUARD_SZ) struct bpf_arena { struct bpf_map map; u64 user_vm_start; u64 user_vm_end; struct vm_struct *kern_vm; struct range_tree rt; struct list_head vma_list; struct mutex lock; }; u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) { return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0; } u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) { return arena ? arena->user_vm_start : 0; } static long arena_map_peek_elem(struct bpf_map *map, void *value) { return -EOPNOTSUPP; } static long arena_map_push_elem(struct bpf_map *map, void *value, u64 flags) { return -EOPNOTSUPP; } static long arena_map_pop_elem(struct bpf_map *map, void *value) { return -EOPNOTSUPP; } static long arena_map_delete_elem(struct bpf_map *map, void *value) { return -EOPNOTSUPP; } static int arena_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { return -EOPNOTSUPP; } static long compute_pgoff(struct bpf_arena *arena, long uaddr) { return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT; } static struct bpf_map *arena_map_alloc(union bpf_attr *attr) { struct vm_struct *kern_vm; int numa_node = bpf_map_attr_numa_node(attr); struct bpf_arena *arena; u64 vm_range; int err = -ENOMEM; if (!bpf_jit_supports_arena()) return ERR_PTR(-EOPNOTSUPP); if (attr->key_size || attr->value_size || attr->max_entries == 0 || /* BPF_F_MMAPABLE must be set */ !(attr->map_flags & BPF_F_MMAPABLE) || /* No unsupported flags present */ (attr->map_flags & ~(BPF_F_SEGV_ON_FAULT | BPF_F_MMAPABLE | BPF_F_NO_USER_CONV))) return ERR_PTR(-EINVAL); if (attr->map_extra & ~PAGE_MASK) /* If non-zero the map_extra is an expected user VMA start address */ return ERR_PTR(-EINVAL); vm_range = (u64)attr->max_entries * PAGE_SIZE; if (vm_range > SZ_4G) return ERR_PTR(-E2BIG); if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32)) /* user vma must not cross 32-bit boundary */ return ERR_PTR(-ERANGE); kern_vm = get_vm_area(KERN_VM_SZ, VM_SPARSE | VM_USERMAP); if (!kern_vm) return ERR_PTR(-ENOMEM); arena = bpf_map_area_alloc(sizeof(*arena), numa_node); if (!arena) goto err; arena->kern_vm = kern_vm; arena->user_vm_start = attr->map_extra; if (arena->user_vm_start) arena->user_vm_end = arena->user_vm_start + vm_range; INIT_LIST_HEAD(&arena->vma_list); bpf_map_init_from_attr(&arena->map, attr); range_tree_init(&arena->rt); err = range_tree_set(&arena->rt, 0, attr->max_entries); if (err) { bpf_map_area_free(arena); goto err; } mutex_init(&arena->lock); return &arena->map; err: free_vm_area(kern_vm); return ERR_PTR(err); } static int existing_page_cb(pte_t *ptep, unsigned long addr, void *data) { struct page *page; pte_t pte; pte = ptep_get(ptep); if (!pte_present(pte)) /* sanity check */ return 0; page = pte_page(pte); /* * We do not update pte here: * 1. Nobody should be accessing bpf_arena's range outside of a kernel bug * 2. TLB flushing is batched or deferred. Even if we clear pte, * the TLB entries can stick around and continue to permit access to * the freed page. So it all relies on 1. */ __free_page(page); return 0; } static void arena_map_free(struct bpf_map *map) { struct bpf_arena *arena = container_of(map, struct bpf_arena, map); /* * Check that user vma-s are not around when bpf map is freed. * mmap() holds vm_file which holds bpf_map refcnt. * munmap() must have happened on vma followed by arena_vm_close() * which would clear arena->vma_list. */ if (WARN_ON_ONCE(!list_empty(&arena->vma_list))) return; /* * free_vm_area() calls remove_vm_area() that calls free_unmap_vmap_area(). * It unmaps everything from vmalloc area and clears pgtables. * Call apply_to_existing_page_range() first to find populated ptes and * free those pages. */ apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena), KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL); free_vm_area(arena->kern_vm); range_tree_destroy(&arena->rt); bpf_map_area_free(arena); } static void *arena_map_lookup_elem(struct bpf_map *map, void *key) { return ERR_PTR(-EINVAL); } static long arena_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { return -EOPNOTSUPP; } static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type, const struct btf_type *value_type) { return 0; } static u64 arena_map_mem_usage(const struct bpf_map *map) { return 0; } struct vma_list { struct vm_area_struct *vma; struct list_head head; refcount_t mmap_count; }; static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma) { struct vma_list *vml; vml = kmalloc(sizeof(*vml), GFP_KERNEL); if (!vml) return -ENOMEM; refcount_set(&vml->mmap_count, 1); vma->vm_private_data = vml; vml->vma = vma; list_add(&vml->head, &arena->vma_list); return 0; } static void arena_vm_open(struct vm_area_struct *vma) { struct vma_list *vml = vma->vm_private_data; refcount_inc(&vml->mmap_count); } static void arena_vm_close(struct vm_area_struct *vma) { struct bpf_map *map = vma->vm_file->private_data; struct bpf_arena *arena = container_of(map, struct bpf_arena, map); struct vma_list *vml = vma->vm_private_data; if (!refcount_dec_and_test(&vml->mmap_count)) return; guard(mutex)(&arena->lock); /* update link list under lock */ list_del(&vml->head); vma->vm_private_data = NULL; kfree(vml); } static vm_fault_t arena_vm_fault(struct vm_fault *vmf) { struct bpf_map *map = vmf->vma->vm_file->private_data; struct bpf_arena *arena = container_of(map, struct bpf_arena, map); struct page *page; long kbase, kaddr; int ret; kbase = bpf_arena_get_kern_vm_start(arena); kaddr = kbase + (u32)(vmf->address); guard(mutex)(&arena->lock); page = vmalloc_to_page((void *)kaddr); if (page) /* already have a page vmap-ed */ goto out; if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT) /* User space requested to segfault when page is not allocated by bpf prog */ return VM_FAULT_SIGSEGV; ret = range_tree_clear(&arena->rt, vmf->pgoff, 1); if (ret) return VM_FAULT_SIGSEGV; /* Account into memcg of the process that created bpf_arena */ ret = bpf_map_alloc_pages(map, NUMA_NO_NODE, 1, &page); if (ret) { range_tree_set(&arena->rt, vmf->pgoff, 1); return VM_FAULT_SIGSEGV; } ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page); if (ret) { range_tree_set(&arena->rt, vmf->pgoff, 1); __free_page(page); return VM_FAULT_SIGSEGV; } out: page_ref_add(page, 1); vmf->page = page; return 0; } static const struct vm_operations_struct arena_vm_ops = { .open = arena_vm_open, .close = arena_vm_close, .fault = arena_vm_fault, }; static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct bpf_map *map = filp->private_data; struct bpf_arena *arena = container_of(map, struct bpf_arena, map); long ret; if (pgoff) return -EINVAL; if (len > SZ_4G) return -E2BIG; /* if user_vm_start was specified at arena creation time */ if (arena->user_vm_start) { if (len > arena->user_vm_end - arena->user_vm_start) return -E2BIG; if (len != arena->user_vm_end - arena->user_vm_start) return -EINVAL; if (addr != arena->user_vm_start) return -EINVAL; } ret = mm_get_unmapped_area(current->mm, filp, addr, len * 2, 0, flags); if (IS_ERR_VALUE(ret)) return ret; if ((ret >> 32) == ((ret + len - 1) >> 32)) return ret; if (WARN_ON_ONCE(arena->user_vm_start)) /* checks at map creation time should prevent this */ return -EFAULT; return round_up(ret, SZ_4G); } static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) { struct bpf_arena *arena = container_of(map, struct bpf_arena, map); guard(mutex)(&arena->lock); if (arena->user_vm_start && arena->user_vm_start != vma->vm_start) /* * If map_extra was not specified at arena creation time then * 1st user process can do mmap(NULL, ...) to pick user_vm_start * 2nd user process must pass the same addr to mmap(addr, MAP_FIXED..); * or * specify addr in map_extra and * use the same addr later with mmap(addr, MAP_FIXED..); */ return -EBUSY; if (arena->user_vm_end && arena->user_vm_end != vma->vm_end) /* all user processes must have the same size of mmap-ed region */ return -EBUSY; /* Earlier checks should prevent this */ if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff)) return -EFAULT; if (remember_vma(arena, vma)) return -ENOMEM; arena->user_vm_start = vma->vm_start; arena->user_vm_end = vma->vm_end; /* * bpf_map_mmap() checks that it's being mmaped as VM_SHARED and * clears VM_MAYEXEC. Set VM_DONTEXPAND as well to avoid * potential change of user_vm_start. */ vm_flags_set(vma, VM_DONTEXPAND); vma->vm_ops = &arena_vm_ops; return 0; } static int arena_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off) { struct bpf_arena *arena = container_of(map, struct bpf_arena, map); if ((u64)off > arena->user_vm_end - arena->user_vm_start) return -ERANGE; *imm = (unsigned long)arena->user_vm_start; return 0; } BTF_ID_LIST_SINGLE(bpf_arena_map_btf_ids, struct, bpf_arena) const struct bpf_map_ops arena_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc = arena_map_alloc, .map_free = arena_map_free, .map_direct_value_addr = arena_map_direct_value_addr, .map_mmap = arena_map_mmap, .map_get_unmapped_area = arena_get_unmapped_area, .map_get_next_key = arena_map_get_next_key, .map_push_elem = arena_map_push_elem, .map_peek_elem = arena_map_peek_elem, .map_pop_elem = arena_map_pop_elem, .map_lookup_elem = arena_map_lookup_elem, .map_update_elem = arena_map_update_elem, .map_delete_elem = arena_map_delete_elem, .map_check_btf = arena_map_check_btf, .map_mem_usage = arena_map_mem_usage, .map_btf_id = &bpf_arena_map_btf_ids[0], }; static u64 clear_lo32(u64 val) { return val & ~(u64)~0U; } /* * Allocate pages and vmap them into kernel vmalloc area. * Later the pages will be mmaped into user space vma. */ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt, int node_id) { /* user_vm_end/start are fixed before bpf prog runs */ long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT; u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena); struct page **pages; long pgoff = 0; u32 uaddr32; int ret, i; if (page_cnt > page_cnt_max) return 0; if (uaddr) { if (uaddr & ~PAGE_MASK) return 0; pgoff = compute_pgoff(arena, uaddr); if (pgoff > page_cnt_max - page_cnt) /* requested address will be outside of user VMA */ return 0; } /* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */ pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL); if (!pages) return 0; guard(mutex)(&arena->lock); if (uaddr) { ret = is_range_tree_set(&arena->rt, pgoff, page_cnt); if (ret) goto out_free_pages; ret = range_tree_clear(&arena->rt, pgoff, page_cnt); } else { ret = pgoff = range_tree_find(&arena->rt, page_cnt); if (pgoff >= 0) ret = range_tree_clear(&arena->rt, pgoff, page_cnt); } if (ret) goto out_free_pages; ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages); if (ret) goto out; uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE); /* Earlier checks made sure that uaddr32 + page_cnt * PAGE_SIZE - 1 * will not overflow 32-bit. Lower 32-bit need to represent * contiguous user address range. * Map these pages at kern_vm_start base. * kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE - 1 can overflow * lower 32-bit and it's ok. */ ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32, kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages); if (ret) { for (i = 0; i < page_cnt; i++) __free_page(pages[i]); goto out; } kvfree(pages); return clear_lo32(arena->user_vm_start) + uaddr32; out: range_tree_set(&arena->rt, pgoff, page_cnt); out_free_pages: kvfree(pages); return 0; } /* * If page is present in vmalloc area, unmap it from vmalloc area, * unmap it from all user space vma-s, * and free it. */ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt) { struct vma_list *vml; list_for_each_entry(vml, &arena->vma_list, head) zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt, NULL); } static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt) { u64 full_uaddr, uaddr_end; long kaddr, pgoff, i; struct page *page; /* only aligned lower 32-bit are relevant */ uaddr = (u32)uaddr; uaddr &= PAGE_MASK; full_uaddr = clear_lo32(arena->user_vm_start) + uaddr; uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT)); if (full_uaddr >= uaddr_end) return; page_cnt = (uaddr_end - full_uaddr) >> PAGE_SHIFT; guard(mutex)(&arena->lock); pgoff = compute_pgoff(arena, uaddr); /* clear range */ range_tree_set(&arena->rt, pgoff, page_cnt); if (page_cnt > 1) /* bulk zap if multiple pages being freed */ zap_pages(arena, full_uaddr, page_cnt); kaddr = bpf_arena_get_kern_vm_start(arena) + uaddr; for (i = 0; i < page_cnt; i++, kaddr += PAGE_SIZE, full_uaddr += PAGE_SIZE) { page = vmalloc_to_page((void *)kaddr); if (!page) continue; if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */ /* Optimization for the common case of page_cnt==1: * If page wasn't mapped into some user vma there * is no need to call zap_pages which is slow. When * page_cnt is big it's faster to do the batched zap. */ zap_pages(arena, full_uaddr, 1); vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE); __free_page(page); } } __bpf_kfunc_start_defs(); __bpf_kfunc void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt, int node_id, u64 flags) { struct bpf_map *map = p__map; struct bpf_arena *arena = container_of(map, struct bpf_arena, map); if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt) return NULL; return (void *)arena_alloc_pages(arena, (long)addr__ign, page_cnt, node_id); } __bpf_kfunc void bpf_arena_free_pages(void *p__map, void *ptr__ign, u32 page_cnt) { struct bpf_map *map = p__map; struct bpf_arena *arena = container_of(map, struct bpf_arena, map); if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign) return; arena_free_pages(arena, (long)ptr__ign, page_cnt); } __bpf_kfunc_end_defs(); BTF_KFUNCS_START(arena_kfuncs) BTF_ID_FLAGS(func, bpf_arena_alloc_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_RET | KF_ARENA_ARG2) BTF_ID_FLAGS(func, bpf_arena_free_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE | KF_ARENA_ARG2) BTF_KFUNCS_END(arena_kfuncs) static const struct btf_kfunc_id_set common_kfunc_set = { .owner = THIS_MODULE, .set = &arena_kfuncs, }; static int __init kfunc_init(void) { return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); } late_initcall(kfunc_init); |
3 3 6 2 6 1 3 1 1 1 1 1 1 1 1 7 7 7 1 7 8 8 8 3 3 8 6 8 3 8 7 8 7 8 12 9 9 9 9 8 8 12 9 13 12 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | // SPDX-License-Identifier: GPL-2.0 /* XDP sockets monitoring support * * Copyright(c) 2019 Intel Corporation. * * Author: Björn Töpel <bjorn.topel@intel.com> */ #include <linux/module.h> #include <net/xdp_sock.h> #include <linux/xdp_diag.h> #include <linux/sock_diag.h> #include "xsk_queue.h" #include "xsk.h" static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb) { struct xdp_diag_info di = {}; di.ifindex = xs->dev ? xs->dev->ifindex : 0; di.queue_id = xs->queue_id; return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di); } static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type, struct sk_buff *nlskb) { struct xdp_diag_ring dr = {}; dr.entries = queue->nentries; return nla_put(nlskb, nl_type, sizeof(dr), &dr); } static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs, struct sk_buff *nlskb) { int err = 0; if (xs->rx) err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb); if (!err && xs->tx) err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb); return err; } static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) { struct xsk_buff_pool *pool = xs->pool; struct xdp_umem *umem = xs->umem; struct xdp_diag_umem du = {}; int err; if (!umem) return 0; du.id = umem->id; du.size = umem->size; du.num_pages = umem->npgs; du.chunk_size = umem->chunk_size; du.headroom = umem->headroom; du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0; du.queue_id = pool ? pool->queue_id : 0; du.flags = 0; if (umem->zc) du.flags |= XDP_DU_F_ZEROCOPY; du.refs = refcount_read(&umem->users); err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du); if (!err && pool && pool->fq) err = xsk_diag_put_ring(pool->fq, XDP_DIAG_UMEM_FILL_RING, nlskb); if (!err && pool && pool->cq) err = xsk_diag_put_ring(pool->cq, XDP_DIAG_UMEM_COMPLETION_RING, nlskb); return err; } static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) { struct xdp_diag_stats du = {}; du.n_rx_dropped = xs->rx_dropped; du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); du.n_rx_full = xs->rx_queue_full; du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); } static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, struct xdp_diag_req *req, struct user_namespace *user_ns, u32 portid, u32 seq, u32 flags, int sk_ino) { struct xdp_sock *xs = xdp_sk(sk); struct xdp_diag_msg *msg; struct nlmsghdr *nlh; nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg), flags); if (!nlh) return -EMSGSIZE; msg = nlmsg_data(nlh); memset(msg, 0, sizeof(*msg)); msg->xdiag_family = AF_XDP; msg->xdiag_type = sk->sk_type; msg->xdiag_ino = sk_ino; sock_diag_save_cookie(sk, msg->xdiag_cookie); mutex_lock(&xs->mutex); if (READ_ONCE(xs->state) == XSK_UNBOUND) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_INFO) && nla_put_u32(nlskb, XDP_DIAG_UID, from_kuid_munged(user_ns, sock_i_uid(sk)))) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_RING_CFG) && xsk_diag_put_rings_cfg(xs, nlskb)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_UMEM) && xsk_diag_put_umem(xs, nlskb)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_MEMINFO) && sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) goto out_nlmsg_trim; if ((req->xdiag_show & XDP_SHOW_STATS) && xsk_diag_put_stats(xs, nlskb)) goto out_nlmsg_trim; mutex_unlock(&xs->mutex); nlmsg_end(nlskb, nlh); return 0; out_nlmsg_trim: mutex_unlock(&xs->mutex); nlmsg_cancel(nlskb, nlh); return -EMSGSIZE; } static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb) { struct xdp_diag_req *req = nlmsg_data(cb->nlh); struct net *net = sock_net(nlskb->sk); int num = 0, s_num = cb->args[0]; struct sock *sk; mutex_lock(&net->xdp.lock); sk_for_each(sk, &net->xdp.list) { if (!net_eq(sock_net(sk), net)) continue; if (num++ < s_num) continue; if (xsk_diag_fill(sk, nlskb, req, sk_user_ns(NETLINK_CB(cb->skb).sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, sock_i_ino(sk)) < 0) { num--; break; } } mutex_unlock(&net->xdp.lock); cb->args[0] = num; return nlskb->len; } static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr) { struct netlink_dump_control c = { .dump = xsk_diag_dump }; int hdrlen = sizeof(struct xdp_diag_req); struct net *net = sock_net(nlskb->sk); if (nlmsg_len(hdr) < hdrlen) return -EINVAL; if (!(hdr->nlmsg_flags & NLM_F_DUMP)) return -EOPNOTSUPP; return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c); } static const struct sock_diag_handler xsk_diag_handler = { .owner = THIS_MODULE, .family = AF_XDP, .dump = xsk_diag_handler_dump, }; static int __init xsk_diag_init(void) { return sock_diag_register(&xsk_diag_handler); } static void __exit xsk_diag_exit(void) { sock_diag_unregister(&xsk_diag_handler); } module_init(xsk_diag_init); module_exit(xsk_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("XDP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP); |
72 73 73 72 3 3 3 3 3 3 3 150 296 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 | // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright Red Hat Inc. 2017 * * This file is part of the SCTP kernel implementation * * These functions implement sctp stream message interleaving, mostly * including I-DATA and I-FORWARD-TSN chunks process. * * Please send any bug reports or fixes you make to the * email addresched(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Xin Long <lucien.xin@gmail.com> */ #include <net/busy_poll.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/ulpevent.h> #include <linux/sctp.h> static struct sctp_chunk *sctp_make_idatafrag_empty( const struct sctp_association *asoc, const struct sctp_sndrcvinfo *sinfo, int len, __u8 flags, gfp_t gfp) { struct sctp_chunk *retval; struct sctp_idatahdr dp; memset(&dp, 0, sizeof(dp)); dp.stream = htons(sinfo->sinfo_stream); if (sinfo->sinfo_flags & SCTP_UNORDERED) flags |= SCTP_DATA_UNORDERED; retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp); if (!retval) return NULL; retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); return retval; } static void sctp_chunk_assign_mid(struct sctp_chunk *chunk) { struct sctp_stream *stream; struct sctp_chunk *lchunk; __u32 cfsn = 0; __u16 sid; if (chunk->has_mid) return; sid = sctp_chunk_stream_no(chunk); stream = &chunk->asoc->stream; list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { struct sctp_idatahdr *hdr; __u32 mid; lchunk->has_mid = 1; hdr = lchunk->subh.idata_hdr; if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) hdr->ppid = lchunk->sinfo.sinfo_ppid; else hdr->fsn = htonl(cfsn++); if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ? sctp_mid_uo_next(stream, out, sid) : sctp_mid_uo_peek(stream, out, sid); } else { mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ? sctp_mid_next(stream, out, sid) : sctp_mid_peek(stream, out, sid); } hdr->mid = htonl(mid); } } static bool sctp_validate_data(struct sctp_chunk *chunk) { struct sctp_stream *stream; __u16 sid, ssn; if (chunk->chunk_hdr->type != SCTP_CID_DATA) return false; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) return true; stream = &chunk->asoc->stream; sid = sctp_chunk_stream_no(chunk); ssn = ntohs(chunk->subh.data_hdr->ssn); return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)); } static bool sctp_validate_idata(struct sctp_chunk *chunk) { struct sctp_stream *stream; __u32 mid; __u16 sid; if (chunk->chunk_hdr->type != SCTP_CID_I_DATA) return false; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) return true; stream = &chunk->asoc->stream; sid = sctp_chunk_stream_no(chunk); mid = ntohl(chunk->subh.idata_hdr->mid); return !MID_lt(mid, sctp_mid_peek(stream, in, sid)); } static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_ulpevent *cevent; struct sk_buff *pos, *loc; pos = skb_peek_tail(&ulpq->reasm); if (!pos) { __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); return; } cevent = sctp_skb2event(pos); if (event->stream == cevent->stream && event->mid == cevent->mid && (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && event->fsn > cevent->fsn))) { __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); return; } if ((event->stream == cevent->stream && MID_lt(cevent->mid, event->mid)) || event->stream > cevent->stream) { __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); return; } loc = NULL; skb_queue_walk(&ulpq->reasm, pos) { cevent = sctp_skb2event(pos); if (event->stream < cevent->stream || (event->stream == cevent->stream && MID_lt(event->mid, cevent->mid))) { loc = pos; break; } if (event->stream == cevent->stream && event->mid == cevent->mid && !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && (event->msg_flags & SCTP_DATA_FIRST_FRAG || event->fsn < cevent->fsn)) { loc = pos; break; } } if (!loc) __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); else __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event)); } static struct sctp_ulpevent *sctp_intl_retrieve_partial( struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sk_buff *first_frag = NULL; struct sk_buff *last_frag = NULL; struct sctp_ulpevent *retval; struct sctp_stream_in *sin; struct sk_buff *pos; __u32 next_fsn = 0; int is_last = 0; sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); if (cevent->stream < event->stream) continue; if (cevent->stream > event->stream || cevent->mid != sin->mid) break; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: goto out; case SCTP_DATA_MIDDLE_FRAG: if (!first_frag) { if (cevent->fsn == sin->fsn) { first_frag = pos; last_frag = pos; next_fsn = cevent->fsn + 1; } } else if (cevent->fsn == next_fsn) { last_frag = pos; next_fsn++; } else { goto out; } break; case SCTP_DATA_LAST_FRAG: if (!first_frag) { if (cevent->fsn == sin->fsn) { first_frag = pos; last_frag = pos; next_fsn = 0; is_last = 1; } } else if (cevent->fsn == next_fsn) { last_frag = pos; next_fsn = 0; is_last = 1; } goto out; default: goto out; } } out: if (!first_frag) return NULL; retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, first_frag, last_frag); if (retval) { sin->fsn = next_fsn; if (is_last) { retval->msg_flags |= MSG_EOR; sin->pd_mode = 0; } } return retval; } static struct sctp_ulpevent *sctp_intl_retrieve_reassembled( struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_association *asoc = ulpq->asoc; struct sk_buff *pos, *first_frag = NULL; struct sctp_ulpevent *retval = NULL; struct sk_buff *pd_first = NULL; struct sk_buff *pd_last = NULL; struct sctp_stream_in *sin; __u32 next_fsn = 0; __u32 pd_point = 0; __u32 pd_len = 0; __u32 mid = 0; sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); if (cevent->stream < event->stream) continue; if (cevent->stream > event->stream) break; if (MID_lt(cevent->mid, event->mid)) continue; if (MID_lt(event->mid, cevent->mid)) break; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: if (cevent->mid == sin->mid) { pd_first = pos; pd_last = pos; pd_len = pos->len; } first_frag = pos; next_fsn = 0; mid = cevent->mid; break; case SCTP_DATA_MIDDLE_FRAG: if (first_frag && cevent->mid == mid && cevent->fsn == next_fsn) { next_fsn++; if (pd_first) { pd_last = pos; pd_len += pos->len; } } else { first_frag = NULL; } break; case SCTP_DATA_LAST_FRAG: if (first_frag && cevent->mid == mid && cevent->fsn == next_fsn) goto found; else first_frag = NULL; break; } } if (!pd_first) goto out; pd_point = sctp_sk(asoc->base.sk)->pd_point; if (pd_point && pd_point <= pd_len) { retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm, pd_first, pd_last); if (retval) { sin->fsn = next_fsn; sin->pd_mode = 1; } } goto out; found: retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm, first_frag, pos); if (retval) retval->msg_flags |= MSG_EOR; out: return retval; } static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_ulpevent *retval = NULL; struct sctp_stream_in *sin; if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { event->msg_flags |= MSG_EOR; return event; } sctp_intl_store_reasm(ulpq, event); sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); if (sin->pd_mode && event->mid == sin->mid && event->fsn == sin->fsn) retval = sctp_intl_retrieve_partial(ulpq, event); if (!retval) retval = sctp_intl_retrieve_reassembled(ulpq, event); return retval; } static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_ulpevent *cevent; struct sk_buff *pos, *loc; pos = skb_peek_tail(&ulpq->lobby); if (!pos) { __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); return; } cevent = (struct sctp_ulpevent *)pos->cb; if (event->stream == cevent->stream && MID_lt(cevent->mid, event->mid)) { __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); return; } if (event->stream > cevent->stream) { __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); return; } loc = NULL; skb_queue_walk(&ulpq->lobby, pos) { cevent = (struct sctp_ulpevent *)pos->cb; if (cevent->stream > event->stream) { loc = pos; break; } if (cevent->stream == event->stream && MID_lt(event->mid, cevent->mid)) { loc = pos; break; } } if (!loc) __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); else __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event)); } static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sk_buff_head *event_list; struct sctp_stream *stream; struct sk_buff *pos, *tmp; __u16 sid = event->stream; stream = &ulpq->asoc->stream; event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev; sctp_skb_for_each(pos, &ulpq->lobby, tmp) { struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb; if (cevent->stream > sid) break; if (cevent->stream < sid) continue; if (cevent->mid != sctp_mid_peek(stream, in, sid)) break; sctp_mid_next(stream, in, sid); __skb_unlink(pos, &ulpq->lobby); __skb_queue_tail(event_list, pos); } } static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_stream *stream; __u16 sid; stream = &ulpq->asoc->stream; sid = event->stream; if (event->mid != sctp_mid_peek(stream, in, sid)) { sctp_intl_store_ordered(ulpq, event); return NULL; } sctp_mid_next(stream, in, sid); sctp_intl_retrieve_ordered(ulpq, event); return event; } static int sctp_enqueue_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list) { struct sock *sk = ulpq->asoc->base.sk; struct sctp_sock *sp = sctp_sk(sk); struct sctp_ulpevent *event; struct sk_buff *skb; skb = __skb_peek(skb_list); event = sctp_skb2event(skb); if (sk->sk_shutdown & RCV_SHUTDOWN && (sk->sk_shutdown & SEND_SHUTDOWN || !sctp_ulpevent_is_notification(event))) goto out_free; if (!sctp_ulpevent_is_notification(event)) { sk_mark_napi_id(sk, skb); sk_incoming_cpu_update(sk); } if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) goto out_free; skb_queue_splice_tail_init(skb_list, &sk->sk_receive_queue); if (!sp->data_ready_signalled) { sp->data_ready_signalled = 1; sk->sk_data_ready(sk); } return 1; out_free: sctp_queue_purge_ulpevents(skb_list); return 0; } static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_ulpevent *cevent; struct sk_buff *pos; pos = skb_peek_tail(&ulpq->reasm_uo); if (!pos) { __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); return; } cevent = sctp_skb2event(pos); if (event->stream == cevent->stream && event->mid == cevent->mid && (cevent->msg_flags & SCTP_DATA_FIRST_FRAG || (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) && event->fsn > cevent->fsn))) { __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); return; } if ((event->stream == cevent->stream && MID_lt(cevent->mid, event->mid)) || event->stream > cevent->stream) { __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event)); return; } skb_queue_walk(&ulpq->reasm_uo, pos) { cevent = sctp_skb2event(pos); if (event->stream < cevent->stream || (event->stream == cevent->stream && MID_lt(event->mid, cevent->mid))) break; if (event->stream == cevent->stream && event->mid == cevent->mid && !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) && (event->msg_flags & SCTP_DATA_FIRST_FRAG || event->fsn < cevent->fsn)) break; } __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event)); } static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo( struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sk_buff *first_frag = NULL; struct sk_buff *last_frag = NULL; struct sctp_ulpevent *retval; struct sctp_stream_in *sin; struct sk_buff *pos; __u32 next_fsn = 0; int is_last = 0; sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm_uo, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); if (cevent->stream < event->stream) continue; if (cevent->stream > event->stream) break; if (MID_lt(cevent->mid, sin->mid_uo)) continue; if (MID_lt(sin->mid_uo, cevent->mid)) break; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: goto out; case SCTP_DATA_MIDDLE_FRAG: if (!first_frag) { if (cevent->fsn == sin->fsn_uo) { first_frag = pos; last_frag = pos; next_fsn = cevent->fsn + 1; } } else if (cevent->fsn == next_fsn) { last_frag = pos; next_fsn++; } else { goto out; } break; case SCTP_DATA_LAST_FRAG: if (!first_frag) { if (cevent->fsn == sin->fsn_uo) { first_frag = pos; last_frag = pos; next_fsn = 0; is_last = 1; } } else if (cevent->fsn == next_fsn) { last_frag = pos; next_fsn = 0; is_last = 1; } goto out; default: goto out; } } out: if (!first_frag) return NULL; retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm_uo, first_frag, last_frag); if (retval) { sin->fsn_uo = next_fsn; if (is_last) { retval->msg_flags |= MSG_EOR; sin->pd_mode_uo = 0; } } return retval; } static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo( struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_association *asoc = ulpq->asoc; struct sk_buff *pos, *first_frag = NULL; struct sctp_ulpevent *retval = NULL; struct sk_buff *pd_first = NULL; struct sk_buff *pd_last = NULL; struct sctp_stream_in *sin; __u32 next_fsn = 0; __u32 pd_point = 0; __u32 pd_len = 0; __u32 mid = 0; sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); skb_queue_walk(&ulpq->reasm_uo, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); if (cevent->stream < event->stream) continue; if (cevent->stream > event->stream) break; if (MID_lt(cevent->mid, event->mid)) continue; if (MID_lt(event->mid, cevent->mid)) break; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: if (!sin->pd_mode_uo) { sin->mid_uo = cevent->mid; pd_first = pos; pd_last = pos; pd_len = pos->len; } first_frag = pos; next_fsn = 0; mid = cevent->mid; break; case SCTP_DATA_MIDDLE_FRAG: if (first_frag && cevent->mid == mid && cevent->fsn == next_fsn) { next_fsn++; if (pd_first) { pd_last = pos; pd_len += pos->len; } } else { first_frag = NULL; } break; case SCTP_DATA_LAST_FRAG: if (first_frag && cevent->mid == mid && cevent->fsn == next_fsn) goto found; else first_frag = NULL; break; } } if (!pd_first) goto out; pd_point = sctp_sk(asoc->base.sk)->pd_point; if (pd_point && pd_point <= pd_len) { retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo, pd_first, pd_last); if (retval) { sin->fsn_uo = next_fsn; sin->pd_mode_uo = 1; } } goto out; found: retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo, first_frag, pos); if (retval) retval->msg_flags |= MSG_EOR; out: return retval; } static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sctp_ulpevent *retval = NULL; struct sctp_stream_in *sin; if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { event->msg_flags |= MSG_EOR; return event; } sctp_intl_store_reasm_uo(ulpq, event); sin = sctp_stream_in(&ulpq->asoc->stream, event->stream); if (sin->pd_mode_uo && event->mid == sin->mid_uo && event->fsn == sin->fsn_uo) retval = sctp_intl_retrieve_partial_uo(ulpq, event); if (!retval) retval = sctp_intl_retrieve_reassembled_uo(ulpq, event); return retval; } static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq) { struct sctp_stream_in *csin, *sin = NULL; struct sk_buff *first_frag = NULL; struct sk_buff *last_frag = NULL; struct sctp_ulpevent *retval; struct sk_buff *pos; __u32 next_fsn = 0; __u16 sid = 0; skb_queue_walk(&ulpq->reasm_uo, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); if (csin->pd_mode_uo) continue; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: if (first_frag) goto out; first_frag = pos; last_frag = pos; next_fsn = 0; sin = csin; sid = cevent->stream; sin->mid_uo = cevent->mid; break; case SCTP_DATA_MIDDLE_FRAG: if (!first_frag) break; if (cevent->stream == sid && cevent->mid == sin->mid_uo && cevent->fsn == next_fsn) { next_fsn++; last_frag = pos; } else { goto out; } break; case SCTP_DATA_LAST_FRAG: if (first_frag) goto out; break; default: break; } } if (!first_frag) return NULL; out: retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm_uo, first_frag, last_frag); if (retval) { sin->fsn_uo = next_fsn; sin->pd_mode_uo = 1; } return retval; } static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff_head temp; int event_eor = 0; event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); if (!event) return -ENOMEM; event->mid = ntohl(chunk->subh.idata_hdr->mid); if (event->msg_flags & SCTP_DATA_FIRST_FRAG) event->ppid = chunk->subh.idata_hdr->ppid; else event->fsn = ntohl(chunk->subh.idata_hdr->fsn); if (!(event->msg_flags & SCTP_DATA_UNORDERED)) { event = sctp_intl_reasm(ulpq, event); if (event) { skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); if (event->msg_flags & MSG_EOR) event = sctp_intl_order(ulpq, event); } } else { event = sctp_intl_reasm_uo(ulpq, event); if (event) { skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); } } if (event) { event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; sctp_enqueue_event(ulpq, &temp); } return event_eor; } static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq) { struct sctp_stream_in *csin, *sin = NULL; struct sk_buff *first_frag = NULL; struct sk_buff *last_frag = NULL; struct sctp_ulpevent *retval; struct sk_buff *pos; __u32 next_fsn = 0; __u16 sid = 0; skb_queue_walk(&ulpq->reasm, pos) { struct sctp_ulpevent *cevent = sctp_skb2event(pos); csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream); if (csin->pd_mode) continue; switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { case SCTP_DATA_FIRST_FRAG: if (first_frag) goto out; if (cevent->mid == csin->mid) { first_frag = pos; last_frag = pos; next_fsn = 0; sin = csin; sid = cevent->stream; } break; case SCTP_DATA_MIDDLE_FRAG: if (!first_frag) break; if (cevent->stream == sid && cevent->mid == sin->mid && cevent->fsn == next_fsn) { next_fsn++; last_frag = pos; } else { goto out; } break; case SCTP_DATA_LAST_FRAG: if (first_frag) goto out; break; default: break; } } if (!first_frag) return NULL; out: retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, first_frag, last_frag); if (retval) { sin->fsn = next_fsn; sin->pd_mode = 1; } return retval; } static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff_head temp; if (!skb_queue_empty(&ulpq->reasm)) { do { event = sctp_intl_retrieve_first(ulpq); if (event) { skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); sctp_enqueue_event(ulpq, &temp); } } while (event); } if (!skb_queue_empty(&ulpq->reasm_uo)) { do { event = sctp_intl_retrieve_first_uo(ulpq); if (event) { skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); sctp_enqueue_event(ulpq, &temp); } } while (event); } } static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, gfp_t gfp) { struct sctp_association *asoc = ulpq->asoc; __u32 freed = 0; __u16 needed; needed = ntohs(chunk->chunk_hdr->length) - sizeof(struct sctp_idata_chunk); if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); if (freed < needed) freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); if (freed < needed) freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo, needed); } if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) sctp_intl_start_pd(ulpq, gfp); } static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid, __u16 flags, gfp_t gfp) { struct sock *sk = ulpq->asoc->base.sk; struct sctp_ulpevent *ev = NULL; if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe, SCTP_PARTIAL_DELIVERY_EVENT)) return; ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED, sid, mid, flags, gfp); if (ev) { struct sctp_sock *sp = sctp_sk(sk); __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); if (!sp->data_ready_signalled) { sp->data_ready_signalled = 1; sk->sk_data_ready(sk); } } } static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) { struct sctp_stream *stream = &ulpq->asoc->stream; struct sctp_ulpevent *cevent, *event = NULL; struct sk_buff_head *lobby = &ulpq->lobby; struct sk_buff *pos, *tmp; struct sk_buff_head temp; __u16 csid; __u32 cmid; skb_queue_head_init(&temp); sctp_skb_for_each(pos, lobby, tmp) { cevent = (struct sctp_ulpevent *)pos->cb; csid = cevent->stream; cmid = cevent->mid; if (csid > sid) break; if (csid < sid) continue; if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid))) break; __skb_unlink(pos, lobby); if (!event) event = sctp_skb2event(pos); __skb_queue_tail(&temp, pos); } if (!event && pos != (struct sk_buff *)lobby) { cevent = (struct sctp_ulpevent *)pos->cb; csid = cevent->stream; cmid = cevent->mid; if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) { sctp_mid_next(stream, in, csid); __skb_unlink(pos, lobby); __skb_queue_tail(&temp, pos); event = sctp_skb2event(pos); } } if (event) { sctp_intl_retrieve_ordered(ulpq, event); sctp_enqueue_event(ulpq, &temp); } } static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) { struct sctp_stream *stream = &ulpq->asoc->stream; __u16 sid; for (sid = 0; sid < stream->incnt; sid++) { struct sctp_stream_in *sin = SCTP_SI(stream, sid); __u32 mid; if (sin->pd_mode_uo) { sin->pd_mode_uo = 0; mid = sin->mid_uo; sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp); } if (sin->pd_mode) { sin->pd_mode = 0; mid = sin->mid; sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp); sctp_mid_skip(stream, in, sid, mid); sctp_intl_reap_ordered(ulpq, sid); } } /* intl abort pd happens only when all data needs to be cleaned */ sctp_ulpq_flush(ulpq); } static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist, int nskips, __be16 stream, __u8 flags) { int i; for (i = 0; i < nskips; i++) if (skiplist[i].stream == stream && skiplist[i].flags == flags) return i; return i; } #define SCTP_FTSN_U_BIT 0x1 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn) { struct sctp_ifwdtsn_skip ftsn_skip_arr[10]; struct sctp_association *asoc = q->asoc; struct sctp_chunk *ftsn_chunk = NULL; struct list_head *lchunk, *temp; int nskips = 0, skip_pos; struct sctp_chunk *chunk; __u32 tsn; if (!asoc->peer.prsctp_capable) return; if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) asoc->adv_peer_ack_point = ctsn; list_for_each_safe(lchunk, temp, &q->abandoned) { chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list); tsn = ntohl(chunk->subh.data_hdr->tsn); if (TSN_lte(tsn, ctsn)) { list_del_init(lchunk); sctp_chunk_free(chunk); } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) { __be16 sid = chunk->subh.idata_hdr->stream; __be32 mid = chunk->subh.idata_hdr->mid; __u8 flags = 0; if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) flags |= SCTP_FTSN_U_BIT; asoc->adv_peer_ack_point = tsn; skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips, sid, flags); ftsn_skip_arr[skip_pos].stream = sid; ftsn_skip_arr[skip_pos].reserved = 0; ftsn_skip_arr[skip_pos].flags = flags; ftsn_skip_arr[skip_pos].mid = mid; if (skip_pos == nskips) nskips++; if (nskips == 10) break; } else { break; } } if (asoc->adv_peer_ack_point > ctsn) ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point, nskips, &ftsn_skip_arr[0]); if (ftsn_chunk) { list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); } } #define _sctp_walk_ifwdtsn(pos, chunk, end) \ for (pos = (void *)(chunk->subh.ifwdtsn_hdr + 1); \ (void *)pos <= (void *)(chunk->subh.ifwdtsn_hdr + 1) + (end) - \ sizeof(struct sctp_ifwdtsn_skip); pos++) #define sctp_walk_ifwdtsn(pos, ch) \ _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \ sizeof(struct sctp_ifwdtsn_chunk)) static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk) { struct sctp_fwdtsn_skip *skip; __u16 incnt; if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN) return false; incnt = chunk->asoc->stream.incnt; sctp_walk_fwdtsn(skip, chunk) if (ntohs(skip->stream) >= incnt) return false; return true; } static bool sctp_validate_iftsn(struct sctp_chunk *chunk) { struct sctp_ifwdtsn_skip *skip; __u16 incnt; if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN) return false; incnt = chunk->asoc->stream.incnt; sctp_walk_ifwdtsn(skip, chunk) if (ntohs(skip->stream) >= incnt) return false; return true; } static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn) { /* Move the Cumulattive TSN Ack ahead. */ sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); /* purge the fragmentation queue */ sctp_ulpq_reasm_flushtsn(ulpq, ftsn); /* Abort any in progress partial delivery. */ sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC); } static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn) { struct sk_buff *pos, *tmp; skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { struct sctp_ulpevent *event = sctp_skb2event(pos); __u32 tsn = event->tsn; if (TSN_lte(tsn, ftsn)) { __skb_unlink(pos, &ulpq->reasm); sctp_ulpevent_free(event); } } skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) { struct sctp_ulpevent *event = sctp_skb2event(pos); __u32 tsn = event->tsn; if (TSN_lte(tsn, ftsn)) { __skb_unlink(pos, &ulpq->reasm_uo); sctp_ulpevent_free(event); } } } static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn) { /* Move the Cumulattive TSN Ack ahead. */ sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn); /* purge the fragmentation queue */ sctp_intl_reasm_flushtsn(ulpq, ftsn); /* abort only when it's for all data */ if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map)) sctp_intl_abort_pd(ulpq, GFP_ATOMIC); } static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) { struct sctp_fwdtsn_skip *skip; /* Walk through all the skipped SSNs */ sctp_walk_fwdtsn(skip, chunk) sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); } static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid, __u8 flags) { struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid); struct sctp_stream *stream = &ulpq->asoc->stream; if (flags & SCTP_FTSN_U_BIT) { if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) { sin->pd_mode_uo = 0; sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, GFP_ATOMIC); } return; } if (MID_lt(mid, sctp_mid_peek(stream, in, sid))) return; if (sin->pd_mode) { sin->pd_mode = 0; sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC); } sctp_mid_skip(stream, in, sid, mid); sctp_intl_reap_ordered(ulpq, sid); } static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) { struct sctp_ifwdtsn_skip *skip; /* Walk through all the skipped MIDs and abort stream pd if possible */ sctp_walk_ifwdtsn(skip, chunk) sctp_intl_skip(ulpq, ntohs(skip->stream), ntohl(skip->mid), skip->flags); } static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sk_buff_head temp; skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); return sctp_ulpq_tail_event(ulpq, &temp); } static struct sctp_stream_interleave sctp_stream_interleave_0 = { .data_chunk_len = sizeof(struct sctp_data_chunk), .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk), /* DATA process functions */ .make_datafrag = sctp_make_datafrag_empty, .assign_number = sctp_chunk_assign_ssn, .validate_data = sctp_validate_data, .ulpevent_data = sctp_ulpq_tail_data, .enqueue_event = do_ulpq_tail_event, .renege_events = sctp_ulpq_renege, .start_pd = sctp_ulpq_partial_delivery, .abort_pd = sctp_ulpq_abort_pd, /* FORWARD-TSN process functions */ .generate_ftsn = sctp_generate_fwdtsn, .validate_ftsn = sctp_validate_fwdtsn, .report_ftsn = sctp_report_fwdtsn, .handle_ftsn = sctp_handle_fwdtsn, }; static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sk_buff_head temp; skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); return sctp_enqueue_event(ulpq, &temp); } static struct sctp_stream_interleave sctp_stream_interleave_1 = { .data_chunk_len = sizeof(struct sctp_idata_chunk), .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk), /* I-DATA process functions */ .make_datafrag = sctp_make_idatafrag_empty, .assign_number = sctp_chunk_assign_mid, .validate_data = sctp_validate_idata, .ulpevent_data = sctp_ulpevent_idata, .enqueue_event = do_sctp_enqueue_event, .renege_events = sctp_renege_events, .start_pd = sctp_intl_start_pd, .abort_pd = sctp_intl_abort_pd, /* I-FORWARD-TSN process functions */ .generate_ftsn = sctp_generate_iftsn, .validate_ftsn = sctp_validate_iftsn, .report_ftsn = sctp_report_iftsn, .handle_ftsn = sctp_handle_iftsn, }; void sctp_stream_interleave_init(struct sctp_stream *stream) { struct sctp_association *asoc; asoc = container_of(stream, struct sctp_association, stream); stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1 : &sctp_stream_interleave_0; } |
12 12 12 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2017 Mellanox Technologies Ltd. All rights reserved. */ #include "rxe.h" #include "rxe_hw_counters.h" static const struct rdma_stat_desc rxe_counter_descs[] = { [RXE_CNT_SENT_PKTS].name = "sent_pkts", [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts", [RXE_CNT_DUP_REQ].name = "duplicate_request", [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request", [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err", [RXE_CNT_SND_RNR].name = "send_rnr_err", [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err", [RXE_CNT_SENDER_SCHED].name = "ack_deferred", [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err", [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err", [RXE_CNT_COMP_RETRY].name = "completer_retry_err", [RXE_CNT_SEND_ERR].name = "send_err", [RXE_CNT_LINK_DOWNED].name = "link_downed", [RXE_CNT_RDMA_SEND].name = "rdma_sends", [RXE_CNT_RDMA_RECV].name = "rdma_recvs", }; int rxe_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { struct rxe_dev *dev = to_rdev(ibdev); unsigned int cnt; if (!port || !stats) return -EINVAL; for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++) stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]); return ARRAY_SIZE(rxe_counter_descs); } struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS); return rdma_alloc_hw_stats_struct(rxe_counter_descs, ARRAY_SIZE(rxe_counter_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } |
15 15 15 10 7 15 13 13 10 3 3 13 3 3 3 3 3 31 31 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 | // SPDX-License-Identifier: GPL-2.0-only /* Helper handling for netfilter. */ /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> * (C) 2006-2012 Patrick McHardy <kaber@trash.net> */ #include <linux/types.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <linux/stddef.h> #include <linux/random.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rculist.h> #include <linux/rtnetlink.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_extend.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netfilter/nf_log.h> #include <net/ip.h> static DEFINE_MUTEX(nf_ct_helper_mutex); struct hlist_head *nf_ct_helper_hash __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_helper_hash); unsigned int nf_ct_helper_hsize __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_helper_hsize); static unsigned int nf_ct_helper_count __read_mostly; static DEFINE_MUTEX(nf_ct_nat_helpers_mutex); static struct list_head nf_ct_nat_helpers __read_mostly; /* Stupid hash, but collision free for the default registrations of the * helpers currently in the kernel. */ static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple) { return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^ (__force __u16)tuple->src.u.all) % nf_ct_helper_hsize; } struct nf_conntrack_helper * __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; unsigned int i; for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { if (strcmp(h->name, name)) continue; if (h->tuple.src.l3num != NFPROTO_UNSPEC && h->tuple.src.l3num != l3num) continue; if (h->tuple.dst.protonum == protonum) return h; } } return NULL; } EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find); struct nf_conntrack_helper * nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; rcu_read_lock(); h = __nf_conntrack_helper_find(name, l3num, protonum); #ifdef CONFIG_MODULES if (h == NULL) { rcu_read_unlock(); if (request_module("nfct-helper-%s", name) == 0) { rcu_read_lock(); h = __nf_conntrack_helper_find(name, l3num, protonum); } else { return h; } } #endif if (h != NULL && !try_module_get(h->me)) h = NULL; if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) { module_put(h->me); h = NULL; } rcu_read_unlock(); return h; } EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); void nf_conntrack_helper_put(struct nf_conntrack_helper *helper) { refcount_dec(&helper->refcnt); module_put(helper->me); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_put); static struct nf_conntrack_nat_helper * nf_conntrack_nat_helper_find(const char *mod_name) { struct nf_conntrack_nat_helper *cur; bool found = false; list_for_each_entry_rcu(cur, &nf_ct_nat_helpers, list) { if (!strcmp(cur->mod_name, mod_name)) { found = true; break; } } return found ? cur : NULL; } int nf_nat_helper_try_module_get(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; struct nf_conntrack_nat_helper *nat; char mod_name[NF_CT_HELPER_NAME_LEN]; int ret = 0; rcu_read_lock(); h = __nf_conntrack_helper_find(name, l3num, protonum); if (!h) { rcu_read_unlock(); return -ENOENT; } nat = nf_conntrack_nat_helper_find(h->nat_mod_name); if (!nat) { snprintf(mod_name, sizeof(mod_name), "%s", h->nat_mod_name); rcu_read_unlock(); request_module("%s", mod_name); rcu_read_lock(); nat = nf_conntrack_nat_helper_find(mod_name); if (!nat) { rcu_read_unlock(); return -ENOENT; } } if (!try_module_get(nat->module)) ret = -ENOENT; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(nf_nat_helper_try_module_get); void nf_nat_helper_put(struct nf_conntrack_helper *helper) { struct nf_conntrack_nat_helper *nat; nat = nf_conntrack_nat_helper_find(helper->nat_mod_name); if (WARN_ON_ONCE(!nat)) return; module_put(nat->module); } EXPORT_SYMBOL_GPL(nf_nat_helper_put); struct nf_conn_help * nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp) { struct nf_conn_help *help; help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp); if (help) INIT_HLIST_HEAD(&help->expectations); else pr_debug("failed to add helper extension area"); return help; } EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add); int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl, gfp_t flags) { struct nf_conntrack_helper *helper = NULL; struct nf_conn_help *help; /* We already got a helper explicitly attached (e.g. nft_ct) */ if (test_bit(IPS_HELPER_BIT, &ct->status)) return 0; if (WARN_ON_ONCE(!tmpl)) return 0; help = nfct_help(tmpl); if (help != NULL) { helper = rcu_dereference(help->helper); set_bit(IPS_HELPER_BIT, &ct->status); } help = nfct_help(ct); if (helper == NULL) { if (help) RCU_INIT_POINTER(help->helper, NULL); return 0; } if (help == NULL) { help = nf_ct_helper_ext_add(ct, flags); if (help == NULL) return -ENOMEM; } else { /* We only allow helper re-assignment of the same sort since * we cannot reallocate the helper extension area. */ struct nf_conntrack_helper *tmp = rcu_dereference(help->helper); if (tmp && tmp->help != helper->help) { RCU_INIT_POINTER(help->helper, NULL); return 0; } } rcu_assign_pointer(help->helper, helper); return 0; } EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper); /* appropriate ct lock protecting must be taken by caller */ static int unhelp(struct nf_conn *ct, void *me) { struct nf_conn_help *help = nfct_help(ct); if (help && rcu_dereference_raw(help->helper) == me) { nf_conntrack_event(IPCT_HELPER, ct); RCU_INIT_POINTER(help->helper, NULL); } /* We are not intended to delete this conntrack. */ return 0; } void nf_ct_helper_destroy(struct nf_conn *ct) { struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_helper *helper; if (help) { rcu_read_lock(); helper = rcu_dereference(help->helper); if (helper && helper->destroy) helper->destroy(ct); rcu_read_unlock(); } } static LIST_HEAD(nf_ct_helper_expectfn_list); void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n) { spin_lock_bh(&nf_conntrack_expect_lock); list_add_rcu(&n->head, &nf_ct_helper_expectfn_list); spin_unlock_bh(&nf_conntrack_expect_lock); } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register); void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n) { spin_lock_bh(&nf_conntrack_expect_lock); list_del_rcu(&n->head); spin_unlock_bh(&nf_conntrack_expect_lock); } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); /* Caller should hold the rcu lock */ struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_name(const char *name) { struct nf_ct_helper_expectfn *cur; bool found = false; list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (!strcmp(cur->name, name)) { found = true; break; } } return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); /* Caller should hold the rcu lock */ struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_symbol(const void *symbol) { struct nf_ct_helper_expectfn *cur; bool found = false; list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (cur->expectfn == symbol) { found = true; break; } } return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); __printf(3, 4) void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct, const char *fmt, ...) { const struct nf_conn_help *help; const struct nf_conntrack_helper *helper; struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; /* Called from the helper function, this call never fails */ help = nfct_help(ct); /* rcu_read_lock()ed by nf_hook_thresh */ helper = rcu_dereference(help->helper); nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); va_end(args); } EXPORT_SYMBOL_GPL(nf_ct_helper_log); int nf_conntrack_helper_register(struct nf_conntrack_helper *me) { struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; unsigned int h = helper_hash(&me->tuple); struct nf_conntrack_helper *cur; int ret = 0, i; BUG_ON(me->expect_policy == NULL); BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1); if (!nf_ct_helper_hash) return -ENOENT; if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) return -EINVAL; mutex_lock(&nf_ct_helper_mutex); for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry(cur, &nf_ct_helper_hash[i], hnode) { if (!strcmp(cur->name, me->name) && (cur->tuple.src.l3num == NFPROTO_UNSPEC || cur->tuple.src.l3num == me->tuple.src.l3num) && cur->tuple.dst.protonum == me->tuple.dst.protonum) { ret = -EEXIST; goto out; } } } /* avoid unpredictable behaviour for auto_assign_helper */ if (!(me->flags & NF_CT_HELPER_F_USERSPACE)) { hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) { ret = -EEXIST; goto out; } } } refcount_set(&me->refcnt, 1); hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); nf_ct_helper_count++; out: mutex_unlock(&nf_ct_helper_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_conntrack_helper_register); static bool expect_iter_me(struct nf_conntrack_expect *exp, void *data) { struct nf_conn_help *help = nfct_help(exp->master); const struct nf_conntrack_helper *me = data; const struct nf_conntrack_helper *this; if (exp->helper == me) return true; this = rcu_dereference_protected(help->helper, lockdep_is_held(&nf_conntrack_expect_lock)); return this == me; } void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) { mutex_lock(&nf_ct_helper_mutex); hlist_del_rcu(&me->hnode); nf_ct_helper_count--; mutex_unlock(&nf_ct_helper_mutex); /* Make sure every nothing is still using the helper unless its a * connection in the hash. */ synchronize_rcu(); nf_ct_expect_iterate_destroy(expect_iter_me, NULL); nf_ct_iterate_destroy(unhelp, me); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); void nf_ct_helper_init(struct nf_conntrack_helper *helper, u16 l3num, u16 protonum, const char *name, u16 default_port, u16 spec_port, u32 id, const struct nf_conntrack_expect_policy *exp_pol, u32 expect_class_max, int (*help)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo), int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct), struct module *module) { helper->tuple.src.l3num = l3num; helper->tuple.dst.protonum = protonum; helper->tuple.src.u.all = htons(spec_port); helper->expect_policy = exp_pol; helper->expect_class_max = expect_class_max; helper->help = help; helper->from_nlattr = from_nlattr; helper->me = module; snprintf(helper->nat_mod_name, sizeof(helper->nat_mod_name), NF_NAT_HELPER_PREFIX "%s", name); if (spec_port == default_port) snprintf(helper->name, sizeof(helper->name), "%s", name); else snprintf(helper->name, sizeof(helper->name), "%s-%u", name, id); } EXPORT_SYMBOL_GPL(nf_ct_helper_init); int nf_conntrack_helpers_register(struct nf_conntrack_helper *helper, unsigned int n) { unsigned int i; int err = 0; for (i = 0; i < n; i++) { err = nf_conntrack_helper_register(&helper[i]); if (err < 0) goto err; } return err; err: if (i > 0) nf_conntrack_helpers_unregister(helper, i); return err; } EXPORT_SYMBOL_GPL(nf_conntrack_helpers_register); void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *helper, unsigned int n) { while (n-- > 0) nf_conntrack_helper_unregister(&helper[n]); } EXPORT_SYMBOL_GPL(nf_conntrack_helpers_unregister); void nf_nat_helper_register(struct nf_conntrack_nat_helper *nat) { mutex_lock(&nf_ct_nat_helpers_mutex); list_add_rcu(&nat->list, &nf_ct_nat_helpers); mutex_unlock(&nf_ct_nat_helpers_mutex); } EXPORT_SYMBOL_GPL(nf_nat_helper_register); void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat) { mutex_lock(&nf_ct_nat_helpers_mutex); list_del_rcu(&nat->list); mutex_unlock(&nf_ct_nat_helpers_mutex); } EXPORT_SYMBOL_GPL(nf_nat_helper_unregister); int nf_conntrack_helper_init(void) { nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); if (!nf_ct_helper_hash) return -ENOMEM; INIT_LIST_HEAD(&nf_ct_nat_helpers); return 0; } void nf_conntrack_helper_fini(void) { kvfree(nf_ct_helper_hash); nf_ct_helper_hash = NULL; } |
63 63 70 70 2060 2061 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 | // SPDX-License-Identifier: GPL-2.0+ /* * A wrapper for multiple PHYs which passes all phy_* function calls to * multiple (actual) PHY devices. This is comes handy when initializing * all PHYs on a HCD and to keep them all in the same state. * * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */ #include <linux/device.h> #include <linux/list.h> #include <linux/phy/phy.h> #include <linux/of.h> #include "phy.h" struct usb_phy_roothub { struct phy *phy; struct list_head list; }; /* Allocate the roothub_entry by specific name of phy */ static int usb_phy_roothub_add_phy_by_name(struct device *dev, const char *name, struct list_head *list) { struct usb_phy_roothub *roothub_entry; struct phy *phy; phy = devm_of_phy_get(dev, dev->of_node, name); if (IS_ERR(phy)) return PTR_ERR(phy); roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); if (!roothub_entry) return -ENOMEM; INIT_LIST_HEAD(&roothub_entry->list); roothub_entry->phy = phy; list_add_tail(&roothub_entry->list, list); return 0; } static int usb_phy_roothub_add_phy(struct device *dev, int index, struct list_head *list) { struct usb_phy_roothub *roothub_entry; struct phy *phy; phy = devm_of_phy_get_by_index(dev, dev->of_node, index); if (IS_ERR(phy)) { if (PTR_ERR(phy) == -ENODEV) return 0; else return PTR_ERR(phy); } roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); if (!roothub_entry) return -ENOMEM; INIT_LIST_HEAD(&roothub_entry->list); roothub_entry->phy = phy; list_add_tail(&roothub_entry->list, list); return 0; } struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) { struct usb_phy_roothub *phy_roothub; int i, num_phys, err; if (!IS_ENABLED(CONFIG_GENERIC_PHY)) return NULL; num_phys = of_count_phandle_with_args(dev->of_node, "phys", "#phy-cells"); if (num_phys <= 0) return NULL; phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); if (!phy_roothub) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&phy_roothub->list); if (!usb_phy_roothub_add_phy_by_name(dev, "usb2-phy", &phy_roothub->list)) return phy_roothub; for (i = 0; i < num_phys; i++) { err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); if (err) return ERR_PTR(err); } return phy_roothub; } EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc); /** * usb_phy_roothub_alloc_usb3_phy - alloc the roothub * @dev: the device of the host controller * * Allocate the usb phy roothub if the host use a generic usb3-phy. * * Return: On success, a pointer to the usb_phy_roothub. Otherwise, * %NULL if no use usb3 phy or %-ENOMEM if out of memory. */ struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev) { struct usb_phy_roothub *phy_roothub; int num_phys; if (!IS_ENABLED(CONFIG_GENERIC_PHY)) return NULL; num_phys = of_count_phandle_with_args(dev->of_node, "phys", "#phy-cells"); if (num_phys <= 0) return NULL; phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); if (!phy_roothub) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&phy_roothub->list); if (!usb_phy_roothub_add_phy_by_name(dev, "usb3-phy", &phy_roothub->list)) return phy_roothub; return NULL; } EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc_usb3_phy); int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_init(roothub_entry->phy); if (err) goto err_exit_phys; } return 0; err_exit_phys: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_exit(roothub_entry->phy); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_init); int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err, ret = 0; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_exit(roothub_entry->phy); if (err) ret = err; } return ret; } EXPORT_SYMBOL_GPL(usb_phy_roothub_exit); int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub, enum phy_mode mode) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_set_mode(roothub_entry->phy, mode); if (err) goto err_out; } return 0; err_out: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_power_off(roothub_entry->phy); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode); int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_calibrate(roothub_entry->phy); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate); /** * usb_phy_roothub_notify_connect() - connect notification * @phy_roothub: the phy of roothub, if the host use a generic phy. * @port: the port index for connect * * If the phy needs to get connection status, the callback can be used. * Returns: %0 if successful, a negative error code otherwise */ int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_notify_connect(roothub_entry->phy, port); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_connect); /** * usb_phy_roothub_notify_disconnect() - disconnect notification * @phy_roothub: the phy of roothub, if the host use a generic phy. * @port: the port index for disconnect * * If the phy needs to get connection status, the callback can be used. * Returns: %0 if successful, a negative error code otherwise */ int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_notify_disconnect(roothub_entry->phy, port); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_disconnect); int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; struct list_head *head; int err; if (!phy_roothub) return 0; head = &phy_roothub->list; list_for_each_entry(roothub_entry, head, list) { err = phy_power_on(roothub_entry->phy); if (err) goto err_out; } return 0; err_out: list_for_each_entry_continue_reverse(roothub_entry, head, list) phy_power_off(roothub_entry->phy); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_power_on); void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub) { struct usb_phy_roothub *roothub_entry; if (!phy_roothub) return; list_for_each_entry_reverse(roothub_entry, &phy_roothub->list, list) phy_power_off(roothub_entry->phy); } EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); int usb_phy_roothub_suspend(struct device *controller_dev, struct usb_phy_roothub *phy_roothub) { usb_phy_roothub_power_off(phy_roothub); /* keep the PHYs initialized so the device can wake up the system */ if (device_may_wakeup(controller_dev)) return 0; return usb_phy_roothub_exit(phy_roothub); } EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend); int usb_phy_roothub_resume(struct device *controller_dev, struct usb_phy_roothub *phy_roothub) { int err; /* if the device can't wake up the system _exit was called */ if (!device_may_wakeup(controller_dev)) { err = usb_phy_roothub_init(phy_roothub); if (err) return err; } err = usb_phy_roothub_power_on(phy_roothub); /* undo _init if _power_on failed */ if (err && !device_may_wakeup(controller_dev)) usb_phy_roothub_exit(phy_roothub); return err; } EXPORT_SYMBOL_GPL(usb_phy_roothub_resume); |
1061 10 10 1061 1063 7 1 104 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 | // SPDX-License-Identifier: GPL-2.0-only /* * Landlock - Credential hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI * Copyright © 2024-2025 Microsoft Corporation */ #include <linux/binfmts.h> #include <linux/cred.h> #include <linux/lsm_hooks.h> #include "common.h" #include "cred.h" #include "ruleset.h" #include "setup.h" static void hook_cred_transfer(struct cred *const new, const struct cred *const old) { const struct landlock_cred_security *const old_llcred = landlock_cred(old); if (old_llcred->domain) { landlock_get_ruleset(old_llcred->domain); *landlock_cred(new) = *old_llcred; } } static int hook_cred_prepare(struct cred *const new, const struct cred *const old, const gfp_t gfp) { hook_cred_transfer(new, old); return 0; } static void hook_cred_free(struct cred *const cred) { struct landlock_ruleset *const dom = landlock_cred(cred)->domain; if (dom) landlock_put_ruleset_deferred(dom); } #ifdef CONFIG_AUDIT static int hook_bprm_creds_for_exec(struct linux_binprm *const bprm) { /* Resets for each execution. */ landlock_cred(bprm->cred)->domain_exec = 0; return 0; } #endif /* CONFIG_AUDIT */ static struct security_hook_list landlock_hooks[] __ro_after_init = { LSM_HOOK_INIT(cred_prepare, hook_cred_prepare), LSM_HOOK_INIT(cred_transfer, hook_cred_transfer), LSM_HOOK_INIT(cred_free, hook_cred_free), #ifdef CONFIG_AUDIT LSM_HOOK_INIT(bprm_creds_for_exec, hook_bprm_creds_for_exec), #endif /* CONFIG_AUDIT */ }; __init void landlock_add_cred_hooks(void) { security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks), &landlock_lsmid); } |
2 2 2 2 2 2 2 2 2 2 1 2 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 | // SPDX-License-Identifier: GPL-2.0+ /* * Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC * * Current development and maintenance: * (C) 2001-2002 Björn Stenberg (bjorn@haxx.se) * * Developed with the assistance of: * (C) 2002 Alan Stern <stern@rowland.org> * * Initial work: * (C) 2000 In-System Design, Inc. (support@in-system.com) * * The ISD200 ASIC does not natively support ATA devices. The chip * does implement an interface, the ATA Command Block (ATACB) which provides * a means of passing ATA commands and ATA register accesses to a device. * * History: * * 2002-10-19: Removed the specialized transfer routines. * (Alan Stern <stern@rowland.harvard.edu>) * 2001-02-24: Removed lots of duplicate code and simplified the structure. * (bjorn@haxx.se) * 2002-01-16: Fixed endianness bug so it works on the ppc arch. * (Luc Saillard <luc@saillard.org>) * 2002-01-17: All bitfields removed. * (bjorn@haxx.se) */ /* Include files */ #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/scatterlist.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-isd200" MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC"); MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); static int isd200_Initialization(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id isd200_usb_ids[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, isd200_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev isd200_unusual_dev_list[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* Timeout defines (in Seconds) */ #define ISD200_ENUM_BSY_TIMEOUT 35 #define ISD200_ENUM_DETECT_TIMEOUT 30 #define ISD200_DEFAULT_TIMEOUT 30 /* device flags */ #define DF_ATA_DEVICE 0x0001 #define DF_MEDIA_STATUS_ENABLED 0x0002 #define DF_REMOVABLE_MEDIA 0x0004 /* capability bit definitions */ #define CAPABILITY_DMA 0x01 #define CAPABILITY_LBA 0x02 /* command_setX bit definitions */ #define COMMANDSET_REMOVABLE 0x02 #define COMMANDSET_MEDIA_STATUS 0x10 /* ATA Vendor Specific defines */ #define ATA_ADDRESS_DEVHEAD_STD 0xa0 #define ATA_ADDRESS_DEVHEAD_LBA_MODE 0x40 #define ATA_ADDRESS_DEVHEAD_SLAVE 0x10 /* Action Select bits */ #define ACTION_SELECT_0 0x01 #define ACTION_SELECT_1 0x02 #define ACTION_SELECT_2 0x04 #define ACTION_SELECT_3 0x08 #define ACTION_SELECT_4 0x10 #define ACTION_SELECT_5 0x20 #define ACTION_SELECT_6 0x40 #define ACTION_SELECT_7 0x80 /* Register Select bits */ #define REG_ALTERNATE_STATUS 0x01 #define REG_DEVICE_CONTROL 0x01 #define REG_ERROR 0x02 #define REG_FEATURES 0x02 #define REG_SECTOR_COUNT 0x04 #define REG_SECTOR_NUMBER 0x08 #define REG_CYLINDER_LOW 0x10 #define REG_CYLINDER_HIGH 0x20 #define REG_DEVICE_HEAD 0x40 #define REG_STATUS 0x80 #define REG_COMMAND 0x80 /* ATA registers offset definitions */ #define ATA_REG_ERROR_OFFSET 1 #define ATA_REG_LCYL_OFFSET 4 #define ATA_REG_HCYL_OFFSET 5 #define ATA_REG_STATUS_OFFSET 7 /* ATA error definitions not in <linux/hdreg.h> */ #define ATA_ERROR_MEDIA_CHANGE 0x20 /* ATA command definitions not in <linux/hdreg.h> */ #define ATA_COMMAND_GET_MEDIA_STATUS 0xDA #define ATA_COMMAND_MEDIA_EJECT 0xED /* ATA drive control definitions */ #define ATA_DC_DISABLE_INTERRUPTS 0x02 #define ATA_DC_RESET_CONTROLLER 0x04 #define ATA_DC_REENABLE_CONTROLLER 0x00 /* * General purpose return codes */ #define ISD200_ERROR -1 #define ISD200_GOOD 0 /* * Transport return codes */ #define ISD200_TRANSPORT_GOOD 0 /* Transport good, command good */ #define ISD200_TRANSPORT_FAILED 1 /* Transport good, command failed */ #define ISD200_TRANSPORT_ERROR 2 /* Transport bad (i.e. device dead) */ /* driver action codes */ #define ACTION_READ_STATUS 0 #define ACTION_RESET 1 #define ACTION_REENABLE 2 #define ACTION_SOFT_RESET 3 #define ACTION_ENUM 4 #define ACTION_IDENTIFY 5 /* * ata_cdb struct */ union ata_cdb { struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char WriteData3F6; unsigned char WriteData1F1; unsigned char WriteData1F2; unsigned char WriteData1F3; unsigned char WriteData1F4; unsigned char WriteData1F5; unsigned char WriteData1F6; unsigned char WriteData1F7; unsigned char Reserved[3]; } generic; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char AlternateStatusByte; unsigned char ErrorByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char StatusByte; unsigned char Reserved[3]; } read; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char DeviceControlByte; unsigned char FeaturesByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char CommandByte; unsigned char Reserved[3]; } write; }; /* * Inquiry data structure. This is the data returned from the target * after it receives an inquiry. * * This structure may be extended by the number of bytes specified * in the field AdditionalLength. The defined size constant only * includes fields through ProductRevisionLevel. */ /* * DeviceType field */ #define DIRECT_ACCESS_DEVICE 0x00 /* disks */ #define DEVICE_REMOVABLE 0x80 struct inquiry_data { unsigned char DeviceType; unsigned char DeviceTypeModifier; unsigned char Versions; unsigned char Format; unsigned char AdditionalLength; unsigned char Reserved[2]; unsigned char Capability; unsigned char VendorId[8]; unsigned char ProductId[16]; unsigned char ProductRevisionLevel[4]; unsigned char VendorSpecific[20]; unsigned char Reserved3[40]; } __attribute__ ((packed)); /* * INQUIRY data buffer size */ #define INQUIRYDATABUFFERSIZE 36 /* * ISD200 CONFIG data struct */ #define ATACFG_TIMING 0x0f #define ATACFG_ATAPI_RESET 0x10 #define ATACFG_MASTER 0x20 #define ATACFG_BLOCKSIZE 0xa0 #define ATACFGE_LAST_LUN 0x07 #define ATACFGE_DESC_OVERRIDE 0x08 #define ATACFGE_STATE_SUSPEND 0x10 #define ATACFGE_SKIP_BOOT 0x20 #define ATACFGE_CONF_DESC2 0x40 #define ATACFGE_INIT_STATUS 0x80 #define CFG_CAPABILITY_SRST 0x01 struct isd200_config { unsigned char EventNotification; unsigned char ExternalClock; unsigned char ATAInitTimeout; unsigned char ATAConfig; unsigned char ATAMajorCommand; unsigned char ATAMinorCommand; unsigned char ATAExtraConfig; unsigned char Capability; }__attribute__ ((packed)); /* * ISD200 driver information struct */ struct isd200_info { struct inquiry_data InquiryData; u16 *id; struct isd200_config ConfigData; unsigned char *RegsBuf; unsigned char ATARegs[8]; unsigned char DeviceHead; unsigned char DeviceFlags; /* maximum number of LUNs supported */ unsigned char MaxLUNs; unsigned char cmnd[MAX_COMMAND_SIZE]; struct scsi_cmnd srb; struct scatterlist sg; }; /* * Read Capacity Data - returned in Big Endian format */ struct read_capacity_data { __be32 LogicalBlockAddress; __be32 BytesPerBlock; }; /* * Read Block Limits Data - returned in Big Endian format * This structure returns the maximum and minimum block * size for a TAPE device. */ struct read_block_limits { unsigned char Reserved; unsigned char BlockMaximumSize[3]; unsigned char BlockMinimumSize[2]; }; /* * Sense Data Format */ #define SENSE_ERRCODE 0x7f #define SENSE_ERRCODE_VALID 0x80 #define SENSE_FLAG_SENSE_KEY 0x0f #define SENSE_FLAG_BAD_LENGTH 0x20 #define SENSE_FLAG_END_OF_MEDIA 0x40 #define SENSE_FLAG_FILE_MARK 0x80 struct sense_data { unsigned char ErrorCode; unsigned char SegmentNumber; unsigned char Flags; unsigned char Information[4]; unsigned char AdditionalSenseLength; unsigned char CommandSpecificInformation[4]; unsigned char AdditionalSenseCode; unsigned char AdditionalSenseCodeQualifier; unsigned char FieldReplaceableUnitCode; unsigned char SenseKeySpecific[3]; } __attribute__ ((packed)); /* * Default request sense buffer size */ #define SENSE_BUFFER_SIZE 18 /*********************************************************************** * Helper routines ***********************************************************************/ /************************************************************************** * isd200_build_sense * * Builds an artificial sense buffer to report the results of a * failed command. * * RETURNS: * void */ static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb) { struct isd200_info *info = (struct isd200_info *)us->extra; struct sense_data *buf = (struct sense_data *) &srb->sense_buffer[0]; unsigned char error = info->ATARegs[ATA_REG_ERROR_OFFSET]; if(error & ATA_ERROR_MEDIA_CHANGE) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_MCR) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_TRK0NF) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = NOT_READY; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_UNC) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = DATA_PROTECT; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else { buf->ErrorCode = 0; buf->AdditionalSenseLength = 0; buf->Flags = 0; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } } /*********************************************************************** * Transport routines ***********************************************************************/ /************************************************************************** * isd200_set_srb(), isd200_srb_set_bufflen() * * Two helpers to facilitate in initialization of scsi_cmnd structure * Will need to change when struct scsi_cmnd changes */ static void isd200_set_srb(struct isd200_info *info, enum dma_data_direction dir, void* buff, unsigned bufflen) { struct scsi_cmnd *srb = &info->srb; if (buff) sg_init_one(&info->sg, buff, bufflen); srb->sc_data_direction = dir; srb->sdb.table.sgl = buff ? &info->sg : NULL; srb->sdb.length = bufflen; srb->sdb.table.nents = buff ? 1 : 0; } static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen) { srb->sdb.length = bufflen; } /************************************************************************** * isd200_action * * Routine for sending commands to the isd200 * * RETURNS: * ISD status code */ static int isd200_action( struct us_data *us, int action, void* pointer, int value ) { union ata_cdb ata; /* static to prevent this large struct being placed on the valuable stack */ static struct scsi_device srb_dev; struct isd200_info *info = (struct isd200_info *)us->extra; struct scsi_cmnd *srb = &info->srb; int status; memset(&ata, 0, sizeof(ata)); memcpy(srb->cmnd, info->cmnd, MAX_COMMAND_SIZE); srb->device = &srb_dev; ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ata.generic.TransferBlockSize = 1; switch ( action ) { case ACTION_READ_STATUS: usb_stor_dbg(us, " isd200_action(READ_STATUS)\n"); ata.generic.ActionSelect = ACTION_SELECT_0|ACTION_SELECT_2; ata.generic.RegisterSelect = REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_STATUS | REG_ERROR; isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value); break; case ACTION_ENUM: usb_stor_dbg(us, " isd200_action(ENUM,0x%02x)\n", value); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4| ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD; ata.write.DeviceHeadByte = value; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_RESET: usb_stor_dbg(us, " isd200_action(RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_REENABLE: usb_stor_dbg(us, " isd200_action(REENABLE)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_SOFT_RESET: usb_stor_dbg(us, " isd200_action(SOFT_RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND; ata.write.DeviceHeadByte = info->DeviceHead; ata.write.CommandByte = ATA_CMD_DEV_RESET; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_IDENTIFY: usb_stor_dbg(us, " isd200_action(IDENTIFY)\n"); ata.generic.RegisterSelect = REG_COMMAND; ata.write.CommandByte = ATA_CMD_ID_ATA; isd200_set_srb(info, DMA_FROM_DEVICE, info->id, ATA_ID_WORDS * 2); break; default: usb_stor_dbg(us, "Error: Undefined action %d\n", action); return ISD200_ERROR; } memcpy(srb->cmnd, &ata, sizeof(ata.generic)); srb->cmd_len = sizeof(ata.generic); status = usb_stor_Bulk_transport(srb, us); if (status == USB_STOR_TRANSPORT_GOOD) status = ISD200_GOOD; else { usb_stor_dbg(us, " isd200_action(0x%02x) error: %d\n", action, status); status = ISD200_ERROR; /* need to reset device here */ } return status; } /************************************************************************** * isd200_read_regs * * Read ATA Registers * * RETURNS: * ISD status code */ static int isd200_read_regs( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_IssueATAReadRegs\n"); transferStatus = isd200_action( us, ACTION_READ_STATUS, info->RegsBuf, sizeof(info->ATARegs) ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error reading ATA registers\n"); retStatus = ISD200_ERROR; } else { memcpy(info->ATARegs, info->RegsBuf, sizeof(info->ATARegs)); usb_stor_dbg(us, " Got ATA Register[ATA_REG_ERROR_OFFSET] = 0x%x\n", info->ATARegs[ATA_REG_ERROR_OFFSET]); } return retStatus; } /************************************************************************** * Invoke the transport and basic error-handling/recovery methods * * This is used by the protocol layers to actually send the message to * the device and receive the response. */ static void isd200_invoke_transport( struct us_data *us, struct scsi_cmnd *srb, union ata_cdb *ataCdb ) { int need_auto_sense = 0; int transferStatus; int result; /* send the command to the transport layer */ memcpy(srb->cmnd, ataCdb, sizeof(ataCdb->generic)); srb->cmd_len = sizeof(ataCdb->generic); transferStatus = usb_stor_Bulk_transport(srb, us); /* * if the command gets aborted by the higher layers, we need to * short-circuit all other processing */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- command was aborted\n"); goto Handle_Abort; } switch (transferStatus) { case USB_STOR_TRANSPORT_GOOD: /* Indicate a good result */ srb->result = SAM_STAT_GOOD; break; case USB_STOR_TRANSPORT_NO_SENSE: usb_stor_dbg(us, "-- transport indicates protocol failure\n"); srb->result = SAM_STAT_CHECK_CONDITION; return; case USB_STOR_TRANSPORT_FAILED: usb_stor_dbg(us, "-- transport indicates command failure\n"); need_auto_sense = 1; break; case USB_STOR_TRANSPORT_ERROR: usb_stor_dbg(us, "-- transport indicates transport error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; default: usb_stor_dbg(us, "-- transport indicates unknown error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; } if ((scsi_get_resid(srb) > 0) && !((srb->cmnd[0] == REQUEST_SENSE) || (srb->cmnd[0] == INQUIRY) || (srb->cmnd[0] == MODE_SENSE) || (srb->cmnd[0] == LOG_SENSE) || (srb->cmnd[0] == MODE_SENSE_10))) { usb_stor_dbg(us, "-- unexpectedly short transfer\n"); need_auto_sense = 1; } if (need_auto_sense) { result = isd200_read_regs(us); if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- auto-sense aborted\n"); goto Handle_Abort; } if (result == ISD200_GOOD) { isd200_build_sense(us, srb); srb->result = SAM_STAT_CHECK_CONDITION; /* If things are really okay, then let's show that */ if ((srb->sense_buffer[2] & 0xf) == 0x0) srb->result = SAM_STAT_GOOD; } else { srb->result = DID_ERROR << 16; /* Need reset here */ } } /* * Regardless of auto-sense, if we _know_ we have an error * condition, show that in the result code */ if (transferStatus == USB_STOR_TRANSPORT_FAILED) srb->result = SAM_STAT_CHECK_CONDITION; return; /* * abort processing: the bulk-only transport requires a reset * following an abort */ Handle_Abort: srb->result = DID_ABORT << 16; /* permit the reset transfer to take place */ clear_bit(US_FLIDX_ABORTING, &us->dflags); /* Need reset here */ } #ifdef CONFIG_USB_STORAGE_DEBUG static void isd200_log_config(struct us_data *us, struct isd200_info *info) { usb_stor_dbg(us, " Event Notification: 0x%x\n", info->ConfigData.EventNotification); usb_stor_dbg(us, " External Clock: 0x%x\n", info->ConfigData.ExternalClock); usb_stor_dbg(us, " ATA Init Timeout: 0x%x\n", info->ConfigData.ATAInitTimeout); usb_stor_dbg(us, " ATAPI Command Block Size: 0x%x\n", (info->ConfigData.ATAConfig & ATACFG_BLOCKSIZE) >> 6); usb_stor_dbg(us, " Master/Slave Selection: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_MASTER); usb_stor_dbg(us, " ATAPI Reset: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_ATAPI_RESET); usb_stor_dbg(us, " ATA Timing: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_TIMING); usb_stor_dbg(us, " ATA Major Command: 0x%x\n", info->ConfigData.ATAMajorCommand); usb_stor_dbg(us, " ATA Minor Command: 0x%x\n", info->ConfigData.ATAMinorCommand); usb_stor_dbg(us, " Init Status: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_INIT_STATUS); usb_stor_dbg(us, " Config Descriptor 2: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_CONF_DESC2); usb_stor_dbg(us, " Skip Device Boot: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_SKIP_BOOT); usb_stor_dbg(us, " ATA 3 State Suspend: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_STATE_SUSPEND); usb_stor_dbg(us, " Descriptor Override: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_DESC_OVERRIDE); usb_stor_dbg(us, " Last LUN Identifier: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_LAST_LUN); usb_stor_dbg(us, " SRST Enable: 0x%x\n", info->ConfigData.ATAExtraConfig & CFG_CAPABILITY_SRST); } #endif /************************************************************************** * isd200_write_config * * Write the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_write_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; #ifdef CONFIG_USB_STORAGE_DEBUG usb_stor_dbg(us, "Entering isd200_write_config\n"); usb_stor_dbg(us, " Writing the following ISD200 Config Data:\n"); isd200_log_config(us, info); #endif /* let's send the command via the control pipe */ result = usb_stor_ctrl_transfer( us, us->send_ctrl_pipe, 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " ISD200 Config Data was written successfully\n"); } else { usb_stor_dbg(us, " Request to write ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_write_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_read_config * * Reads the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_read_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; usb_stor_dbg(us, "Entering isd200_read_config\n"); /* read the configuration information from ISD200. Use this to */ /* determine what the special ATA CDB bytes are. */ result = usb_stor_ctrl_transfer( us, us->recv_ctrl_pipe, 0x02, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " Retrieved the following ISD200 Config Data:\n"); #ifdef CONFIG_USB_STORAGE_DEBUG isd200_log_config(us, info); #endif } else { usb_stor_dbg(us, " Request to get ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_read_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_atapi_soft_reset * * Perform an Atapi Soft Reset on the device * * RETURNS: * NT status code */ static int isd200_atapi_soft_reset( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_atapi_soft_reset\n"); transferStatus = isd200_action( us, ACTION_SOFT_RESET, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing Atapi Soft Reset\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_atapi_soft_reset %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_srst * * Perform an SRST on the device * * RETURNS: * ISD status code */ static int isd200_srst( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_SRST\n"); transferStatus = isd200_action( us, ACTION_RESET, NULL, 0 ); /* check to see if this request failed */ if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing SRST\n"); retStatus = ISD200_ERROR; } else { /* delay 10ms to give the drive a chance to see it */ msleep(10); transferStatus = isd200_action( us, ACTION_REENABLE, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error taking drive out of reset\n"); retStatus = ISD200_ERROR; } else { /* delay 50ms to give the drive a chance to recover after SRST */ msleep(50); } } usb_stor_dbg(us, "Leaving isd200_srst %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_try_enum * * Helper function for isd200_manual_enum(). Does ENUM and READ_STATUS * and tries to analyze the status registers * * RETURNS: * ISD status code */ static int isd200_try_enum(struct us_data *us, unsigned char master_slave, int detect ) { int status = ISD200_GOOD; unsigned long endTime; struct isd200_info *info = (struct isd200_info *)us->extra; unsigned char *regs = info->RegsBuf; int recheckAsMaster = 0; if ( detect ) endTime = jiffies + ISD200_ENUM_DETECT_TIMEOUT * HZ; else endTime = jiffies + ISD200_ENUM_BSY_TIMEOUT * HZ; /* loop until we detect !BSY or timeout */ while(1) { status = isd200_action( us, ACTION_ENUM, NULL, master_slave ); if ( status != ISD200_GOOD ) break; status = isd200_action( us, ACTION_READ_STATUS, regs, 8 ); if ( status != ISD200_GOOD ) break; if (!detect) { if (regs[ATA_REG_STATUS_OFFSET] & ATA_BUSY) { usb_stor_dbg(us, " %s status is still BSY, try again...\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); } else { usb_stor_dbg(us, " %s status !BSY, continue with next operation\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); break; } } /* check for ATA_BUSY and */ /* ATA_DF (workaround ATA Zip drive) and */ /* ATA_ERR (workaround for Archos CD-ROM) */ else if (regs[ATA_REG_STATUS_OFFSET] & (ATA_BUSY | ATA_DF | ATA_ERR)) { usb_stor_dbg(us, " Status indicates it is not ready, try again...\n"); } /* check for DRDY, ATA devices set DRDY after SRST */ else if (regs[ATA_REG_STATUS_OFFSET] & ATA_DRDY) { usb_stor_dbg(us, " Identified ATA device\n"); info->DeviceFlags |= DF_ATA_DEVICE; info->DeviceHead = master_slave; break; } /* * check Cylinder High/Low to * determine if it is an ATAPI device */ else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB && regs[ATA_REG_LCYL_OFFSET] == 0x14) { /* * It seems that the RICOH * MP6200A CD/RW drive will * report itself okay as a * slave when it is really a * master. So this check again * as a master device just to * make sure it doesn't report * itself okay as a master also */ if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) && !recheckAsMaster) { usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n"); recheckAsMaster = 1; master_slave = ATA_ADDRESS_DEVHEAD_STD; } else { usb_stor_dbg(us, " Identified ATAPI device\n"); info->DeviceHead = master_slave; status = isd200_atapi_soft_reset(us); break; } } else { usb_stor_dbg(us, " Not ATA, not ATAPI - Weird\n"); break; } /* check for timeout on this request */ if (time_after_eq(jiffies, endTime)) { if (!detect) usb_stor_dbg(us, " BSY check timeout, just continue with next operation...\n"); else usb_stor_dbg(us, " Device detect timeout!\n"); break; } } return status; } /************************************************************************** * isd200_manual_enum * * Determines if the drive attached is an ATA or ATAPI and if it is a * master or slave. * * RETURNS: * ISD status code */ static int isd200_manual_enum(struct us_data *us) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; usb_stor_dbg(us, "Entering isd200_manual_enum\n"); retStatus = isd200_read_config(us); if (retStatus == ISD200_GOOD) { int isslave; /* master or slave? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 0); if (retStatus == ISD200_GOOD) retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_SLAVE, 0); if (retStatus == ISD200_GOOD) { retStatus = isd200_srst(us); if (retStatus == ISD200_GOOD) /* ata or atapi? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 1); } isslave = (info->DeviceHead & ATA_ADDRESS_DEVHEAD_SLAVE) ? 1 : 0; if (!(info->ConfigData.ATAConfig & ATACFG_MASTER)) { usb_stor_dbg(us, " Setting Master/Slave selection to %d\n", isslave); info->ConfigData.ATAConfig &= 0x3f; info->ConfigData.ATAConfig |= (isslave<<6); retStatus = isd200_write_config(us); } } usb_stor_dbg(us, "Leaving isd200_manual_enum %08X\n", retStatus); return(retStatus); } static void isd200_fix_driveid(u16 *id) { #ifndef __LITTLE_ENDIAN # ifdef __BIG_ENDIAN int i; for (i = 0; i < ATA_ID_WORDS; i++) id[i] = __le16_to_cpu(id[i]); # else # error "Please fix <asm/byteorder.h>" # endif #endif } static void isd200_dump_driveid(struct us_data *us, u16 *id) { usb_stor_dbg(us, " Identify Data Structure:\n"); usb_stor_dbg(us, " config = 0x%x\n", id[ATA_ID_CONFIG]); usb_stor_dbg(us, " cyls = 0x%x\n", id[ATA_ID_CYLS]); usb_stor_dbg(us, " heads = 0x%x\n", id[ATA_ID_HEADS]); usb_stor_dbg(us, " track_bytes = 0x%x\n", id[4]); usb_stor_dbg(us, " sector_bytes = 0x%x\n", id[5]); usb_stor_dbg(us, " sectors = 0x%x\n", id[ATA_ID_SECTORS]); usb_stor_dbg(us, " serial_no[0] = 0x%x\n", *(char *)&id[ATA_ID_SERNO]); usb_stor_dbg(us, " buf_type = 0x%x\n", id[20]); usb_stor_dbg(us, " buf_size = 0x%x\n", id[ATA_ID_BUF_SIZE]); usb_stor_dbg(us, " ecc_bytes = 0x%x\n", id[22]); usb_stor_dbg(us, " fw_rev[0] = 0x%x\n", *(char *)&id[ATA_ID_FW_REV]); usb_stor_dbg(us, " model[0] = 0x%x\n", *(char *)&id[ATA_ID_PROD]); usb_stor_dbg(us, " max_multsect = 0x%x\n", id[ATA_ID_MAX_MULTSECT] & 0xff); usb_stor_dbg(us, " dword_io = 0x%x\n", id[ATA_ID_DWORD_IO]); usb_stor_dbg(us, " capability = 0x%x\n", id[ATA_ID_CAPABILITY] >> 8); usb_stor_dbg(us, " tPIO = 0x%x\n", id[ATA_ID_OLD_PIO_MODES] >> 8); usb_stor_dbg(us, " tDMA = 0x%x\n", id[ATA_ID_OLD_DMA_MODES] >> 8); usb_stor_dbg(us, " field_valid = 0x%x\n", id[ATA_ID_FIELD_VALID]); usb_stor_dbg(us, " cur_cyls = 0x%x\n", id[ATA_ID_CUR_CYLS]); usb_stor_dbg(us, " cur_heads = 0x%x\n", id[ATA_ID_CUR_HEADS]); usb_stor_dbg(us, " cur_sectors = 0x%x\n", id[ATA_ID_CUR_SECTORS]); usb_stor_dbg(us, " cur_capacity = 0x%x\n", ata_id_u32(id, 57)); usb_stor_dbg(us, " multsect = 0x%x\n", id[ATA_ID_MULTSECT] & 0xff); usb_stor_dbg(us, " lba_capacity = 0x%x\n", ata_id_u32(id, ATA_ID_LBA_CAPACITY)); usb_stor_dbg(us, " command_set_1 = 0x%x\n", id[ATA_ID_COMMAND_SET_1]); usb_stor_dbg(us, " command_set_2 = 0x%x\n", id[ATA_ID_COMMAND_SET_2]); } /************************************************************************** * isd200_get_inquiry_data * * Get inquiry data * * RETURNS: * ISD status code */ static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); /* set default to Master */ info->DeviceHead = ATA_ADDRESS_DEVHEAD_STD; /* attempt to manually enumerate this device */ retStatus = isd200_manual_enum(us); if (retStatus == ISD200_GOOD) { int transferStatus; /* check for an ATA device */ if (info->DeviceFlags & DF_ATA_DEVICE) { /* this must be an ATA device */ /* perform an ATA Command Identify */ transferStatus = isd200_action( us, ACTION_IDENTIFY, id, ATA_ID_WORDS * 2); if (transferStatus != ISD200_TRANSPORT_GOOD) { /* Error issuing ATA Command Identify */ usb_stor_dbg(us, " Error issuing ATA Command Identify\n"); retStatus = ISD200_ERROR; } else { /* ATA Command Identify successful */ int i; __be16 *src; __u16 *dest; isd200_fix_driveid(id); isd200_dump_driveid(us, id); /* Prevent division by 0 in isd200_scsi_to_ata() */ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { usb_stor_dbg(us, " Invalid ATA Identify data\n"); retStatus = ISD200_ERROR; goto Done; } memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE; /* The length must be at least 36 (5 + 31) */ info->InquiryData.AdditionalLength = 0x1F; if (id[ATA_ID_COMMAND_SET_1] & COMMANDSET_MEDIA_STATUS) { /* set the removable bit */ info->InquiryData.DeviceTypeModifier = DEVICE_REMOVABLE; info->DeviceFlags |= DF_REMOVABLE_MEDIA; } /* Fill in vendor identification fields */ src = (__be16 *)&id[ATA_ID_PROD]; dest = (__u16*)info->InquiryData.VendorId; for (i = 0; i < 4; i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_PROD + 8/2]; dest = (__u16*)info->InquiryData.ProductId; for (i=0;i<8;i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_FW_REV]; dest = (__u16*)info->InquiryData.ProductRevisionLevel; for (i=0;i<2;i++) dest[i] = be16_to_cpu(src[i]); /* determine if it supports Media Status Notification */ if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) { usb_stor_dbg(us, " Device supports Media Status Notification\n"); /* * Indicate that it is enabled, even * though it is not. * This allows the lock/unlock of the * media to work correctly. */ info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED; } else info->DeviceFlags &= ~DF_MEDIA_STATUS_ENABLED; } } else { /* * this must be an ATAPI device * use an ATAPI protocol (Transparent SCSI) */ us->protocol_name = "Transparent SCSI"; us->proto_handler = usb_stor_transparent_scsi_command; usb_stor_dbg(us, "Protocol changed to: %s\n", us->protocol_name); /* Free driver structure */ us->extra_destructor(info); kfree(info); us->extra = NULL; us->extra_destructor = NULL; } } Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); } /************************************************************************** * isd200_scsi_to_ata * * Translate SCSI commands to ATA commands. * * RETURNS: * 1 if the command needs to be sent to the transport layer * 0 otherwise */ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us, union ata_cdb * ataCdb) { struct isd200_info *info = (struct isd200_info *)us->extra; u16 *id = info->id; int sendToTransport = 1; unsigned char sectnum, head; unsigned short cylinder; unsigned long lba; unsigned long blockCount; unsigned char senseData[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; memset(ataCdb, 0, sizeof(union ata_cdb)); /* SCSI Command */ switch (srb->cmnd[0]) { case INQUIRY: usb_stor_dbg(us, " ATA OUT - INQUIRY\n"); /* copy InquiryData */ usb_stor_set_xfer_buf((unsigned char *) &info->InquiryData, sizeof(info->InquiryData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; break; case MODE_SENSE: usb_stor_dbg(us, " ATA OUT - SCSIOP_MODE_SENSE\n"); /* Initialize the return buffer */ usb_stor_set_xfer_buf(senseData, sizeof(senseData), srb); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case TEST_UNIT_READY: usb_stor_dbg(us, " ATA OUT - SCSIOP_TEST_UNIT_READY\n"); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_CAPACITY: { unsigned long capacity; struct read_capacity_data readCapacityData; usb_stor_dbg(us, " ATA OUT - SCSIOP_READ_CAPACITY\n"); if (ata_id_has_lba(id)) capacity = ata_id_u32(id, ATA_ID_LBA_CAPACITY) - 1; else capacity = (id[ATA_ID_HEADS] * id[ATA_ID_CYLS] * id[ATA_ID_SECTORS]) - 1; readCapacityData.LogicalBlockAddress = cpu_to_be32(capacity); readCapacityData.BytesPerBlock = cpu_to_be32(0x200); usb_stor_set_xfer_buf((unsigned char *) &readCapacityData, sizeof(readCapacityData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_READ\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_READ; break; case WRITE_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_WRITE\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_WRITE; break; case ALLOW_MEDIUM_REMOVAL: usb_stor_dbg(us, " ATA OUT - SCSIOP_MEDIUM_REMOVAL\n"); if (info->DeviceFlags & DF_REMOVABLE_MEDIA) { usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Not removable media, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case START_STOP: usb_stor_dbg(us, " ATA OUT - SCSIOP_START_STOP_UNIT\n"); usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); if ((srb->cmnd[4] & 0x3) == 0x2) { usb_stor_dbg(us, " Media Eject\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 0; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_MEDIA_EJECT; } else if ((srb->cmnd[4] & 0x3) == 0x1) { usb_stor_dbg(us, " Get Media Status\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Nothing to do, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; default: usb_stor_dbg(us, "Unsupported SCSI command - 0x%X\n", srb->cmnd[0]); srb->result = DID_ERROR << 16; sendToTransport = 0; break; } return(sendToTransport); } /************************************************************************** * isd200_free_info * * Frees the driver structure. */ static void isd200_free_info_ptrs(void *info_) { struct isd200_info *info = (struct isd200_info *) info_; if (info) { kfree(info->id); kfree(info->RegsBuf); kfree(info->srb.sense_buffer); } } /************************************************************************** * isd200_init_info * * Allocates (if necessary) and initializes the driver structure. * * RETURNS: * error status code */ static int isd200_init_info(struct us_data *us) { struct isd200_info *info; info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL); if (!info) return -ENOMEM; info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL); info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL); info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) { isd200_free_info_ptrs(info); kfree(info); return -ENOMEM; } us->extra = info; us->extra_destructor = isd200_free_info_ptrs; return 0; } /************************************************************************** * Initialization for the ISD200 */ static int isd200_Initialization(struct us_data *us) { int rc = 0; usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); rc = -ENOMEM; } else { /* Get device specific data */ if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); rc = -EINVAL; } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); } } return rc; } /************************************************************************** * Protocol and Transport for the ISD200 ASIC * * This protocol and transport are for ATA devices connected to an ISD200 * ASIC. An ATAPI device that is connected as a slave device will be * detected in the driver initialization function and the protocol will * be changed to an ATAPI protocol (Transparent SCSI). * */ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) { int sendToTransport, orig_bufflen; union ata_cdb ataCdb; /* Make sure driver was initialized */ if (us->extra == NULL) { usb_stor_dbg(us, "ERROR Driver not initialized\n"); srb->result = DID_ERROR << 16; return; } scsi_set_resid(srb, 0); /* scsi_bufflen might change in protocol translation to ata */ orig_bufflen = scsi_bufflen(srb); sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb); /* send the command to the transport layer */ if (sendToTransport) isd200_invoke_transport(us, srb, &ataCdb); isd200_srb_set_bufflen(srb, orig_bufflen); } static struct scsi_host_template isd200_host_template; static int isd200_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - isd200_usb_ids) + isd200_unusual_dev_list, &isd200_host_template); if (result) return result; us->protocol_name = "ISD200 ATA/ATAPI"; us->proto_handler = isd200_ata_command; result = usb_stor_probe2(us); return result; } static struct usb_driver isd200_driver = { .name = DRV_NAME, .probe = isd200_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = isd200_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(isd200_driver, isd200_host_template, DRV_NAME); |
46 33 32 47 47 1 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _VHOST_H #define _VHOST_H #include <linux/eventfd.h> #include <linux/vhost.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/atomic.h> #include <linux/vhost_iotlb.h> #include <linux/irqbypass.h> struct vhost_work; struct vhost_task; typedef void (*vhost_work_fn_t)(struct vhost_work *work); #define VHOST_WORK_QUEUED 1 struct vhost_work { struct llist_node node; vhost_work_fn_t fn; unsigned long flags; }; struct vhost_worker { struct vhost_task *vtsk; struct vhost_dev *dev; /* Used to serialize device wide flushing with worker swapping. */ struct mutex mutex; struct llist_head work_list; u64 kcov_handle; u32 id; int attachment_cnt; bool killed; }; /* Poll a file (eventfd or socket) */ /* Note: there's nothing vhost specific about this structure. */ struct vhost_poll { poll_table table; wait_queue_head_t *wqh; wait_queue_entry_t wait; struct vhost_work work; __poll_t mask; struct vhost_dev *dev; struct vhost_virtqueue *vq; }; void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, __poll_t mask, struct vhost_dev *dev, struct vhost_virtqueue *vq); int vhost_poll_start(struct vhost_poll *poll, struct file *file); void vhost_poll_stop(struct vhost_poll *poll); void vhost_poll_queue(struct vhost_poll *poll); void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); void vhost_dev_flush(struct vhost_dev *dev); struct vhost_log { u64 addr; u64 len; }; enum vhost_uaddr_type { VHOST_ADDR_DESC = 0, VHOST_ADDR_AVAIL = 1, VHOST_ADDR_USED = 2, VHOST_NUM_ADDRS = 3, }; struct vhost_vring_call { struct eventfd_ctx *ctx; struct irq_bypass_producer producer; }; /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; struct vhost_worker __rcu *worker; /* The actual ring of buffers. */ struct mutex mutex; unsigned int num; vring_desc_t __user *desc; vring_avail_t __user *avail; vring_used_t __user *used; const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct vhost_vring_call call_ctx; struct eventfd_ctx *error_ctx; struct eventfd_ctx *log_ctx; struct vhost_poll poll; /* The routine to call when the Guest pings us, or timeout. */ vhost_work_fn_t handle_kick; /* Last available index we saw. * Values are limited to 0x7fff, and the high bit is used as * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_avail_idx; /* Caches available index value from user. */ u16 avail_idx; /* Last index we used. * Values are limited to 0x7fff, and the high bit is used as * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_used_idx; /* Used flags */ u16 used_flags; /* Last used index value we have signalled on */ u16 signalled_used; /* Last used index value we have signalled on */ bool signalled_used_valid; /* Log writes to used structure. */ bool log_used; u64 log_addr; struct iovec iov[UIO_MAXIOV]; struct iovec iotlb_iov[64]; struct iovec *indirect; struct vring_used_elem *heads; /* Protected by virtqueue mutex. */ struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; void *private_data; u64 acked_features; u64 acked_backend_features; /* Log write descriptors */ void __user *log_base; struct vhost_log *log; struct iovec log_iov[64]; /* Ring endianness. Defaults to legacy native endianness. * Set to true when starting a modern virtio device. */ bool is_le; #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY /* Ring endianness requested by userspace for cross-endian support. */ bool user_be; #endif u32 busyloop_timeout; }; struct vhost_msg_node { union { struct vhost_msg msg; struct vhost_msg_v2 msg_v2; }; struct vhost_virtqueue *vq; struct list_head node; }; struct vhost_dev { struct mm_struct *mm; struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; struct eventfd_ctx *log_ctx; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; struct list_head read_list; struct list_head pending_list; wait_queue_head_t wait; int iov_limit; int weight; int byte_weight; struct xarray worker_xa; bool use_worker; int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg); }; bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, bool use_worker, int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg)); long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); struct vhost_iotlb *vhost_dev_reset_owner_prepare(void); void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb); void vhost_dev_cleanup(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *); long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl, void __user *argp); bool vhost_vq_access_ok(struct vhost_virtqueue *vq); bool vhost_log_access_ok(struct vhost_dev *); void vhost_clear_msg(struct vhost_dev *dev); int vhost_get_vq_desc(struct vhost_virtqueue *, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num); void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work); bool vhost_vq_has_work(struct vhost_virtqueue *vq); bool vhost_vq_is_setup(struct vhost_virtqueue *vq); int vhost_vq_init_access(struct vhost_virtqueue *); int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads, unsigned count); void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *, unsigned int id, int len); void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, struct vring_used_elem *heads, unsigned count); void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *); bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len, struct iovec *iov, int count); int vq_meta_prefetch(struct vhost_virtqueue *vq); struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, struct vhost_msg_node *node); struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, struct list_head *head); void vhost_set_backend_features(struct vhost_dev *dev, u64 features); __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, poll_table *wait); ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, int noblock); ssize_t vhost_chr_write_iter(struct vhost_dev *dev, struct iov_iter *from); int vhost_init_device_iotlb(struct vhost_dev *d); void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, struct vhost_iotlb_map *map); #define vq_err(vq, fmt, ...) do { \ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ if ((vq)->error_ctx) \ eventfd_signal((vq)->error_ctx);\ } while (0) enum { VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VHOST_F_LOG_ALL) | (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) }; /** * vhost_vq_set_backend - Set backend. * * @vq Virtqueue. * @private_data The private data. * * Context: Need to call with vq->mutex acquired. */ static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq, void *private_data) { vq->private_data = private_data; } /** * vhost_vq_get_backend - Get backend. * * @vq Virtqueue. * * Context: Need to call with vq->mutex acquired. * Return: Private data previously set with vhost_vq_set_backend. */ static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq) { return vq->private_data; } static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) { return vq->acked_features & (1ULL << bit); } static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit) { return vq->acked_backend_features & (1ULL << bit); } #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) { return vq->is_le; } #else static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) { return virtio_legacy_is_little_endian() || vq->is_le; } #endif /* Memory accessors */ static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val) { return __virtio16_to_cpu(vhost_is_little_endian(vq), val); } static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val) { return __cpu_to_virtio16(vhost_is_little_endian(vq), val); } static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val) { return __virtio32_to_cpu(vhost_is_little_endian(vq), val); } static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val) { return __cpu_to_virtio32(vhost_is_little_endian(vq), val); } static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val) { return __virtio64_to_cpu(vhost_is_little_endian(vq), val); } static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val) { return __cpu_to_virtio64(vhost_is_little_endian(vq), val); } #endif |
4 711 8 8 1 7 7 7 3 7 1 7 6 8 1 1 1 157 151 4 1 1 1 146 1 6 5 7 3 3 3 4 1 3 3 3 3 3 3 4 3 3 3 3 3 3 7 6 6 4 4 1 3 3 3 3 3 3 3 3 3 2 2 2 2 1 1 1 1 8 7 7 7 12 165 165 165 164 165 1 339 335 73 20 15 15 4 3 3 3 3 334 3 1 3 6 5 4 4 4 3 4 3 2 2 2 1 9 9 12 1 167 7 4 3 13 956 955 956 144 140 129 115 713 714 711 711 4 709 2 1 158 157 157 157 1 204 1 16 15 1 14 148 151 146 184 176 175 710 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kmod.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/net_tstamp.h> #include <linux/phylib_stubs.h> #include <linux/ptp_clock_kernel.h> #include <linux/wireless.h> #include <linux/if_bridge.h> #include <net/dsa_stubs.h> #include <net/netdev_lock.h> #include <net/wext.h> #include "dev.h" /* * Map an interface index to its name (SIOCGIFNAME) */ /* * We need this ioctl for efficient implementation of the * if_indextoname() function required by the IPv6 API. Without * it, we would have to search all the interfaces to find a * match. --pb */ static int dev_ifname(struct net *net, struct ifreq *ifr) { ifr->ifr_name[IFNAMSIZ-1] = 0; return netdev_get_name(net, ifr->ifr_name, ifr->ifr_ifindex); } /* * Perform a SIOCGIFCONF call. This structure will change * size eventually, and there is nothing I can do about it. * Thus we will need a 'compatibility mode'. */ int dev_ifconf(struct net *net, struct ifconf __user *uifc) { struct net_device *dev; void __user *pos; size_t size; int len, total = 0, done; /* both the ifconf and the ifreq structures are slightly different */ if (in_compat_syscall()) { struct compat_ifconf ifc32; if (copy_from_user(&ifc32, uifc, sizeof(struct compat_ifconf))) return -EFAULT; pos = compat_ptr(ifc32.ifcbuf); len = ifc32.ifc_len; size = sizeof(struct compat_ifreq); } else { struct ifconf ifc; if (copy_from_user(&ifc, uifc, sizeof(struct ifconf))) return -EFAULT; pos = ifc.ifc_buf; len = ifc.ifc_len; size = sizeof(struct ifreq); } /* Loop over the interfaces, and write an info block for each. */ rtnl_net_lock(net); for_each_netdev(net, dev) { if (!pos) done = inet_gifconf(dev, NULL, 0, size); else done = inet_gifconf(dev, pos + total, len - total, size); if (done < 0) { rtnl_net_unlock(net); return -EFAULT; } total += done; } rtnl_net_unlock(net); return put_user(total, &uifc->ifc_len); } static int dev_getifmap(struct net_device *dev, struct ifreq *ifr) { struct ifmap *ifmap = &ifr->ifr_map; if (in_compat_syscall()) { struct compat_ifmap *cifmap = (struct compat_ifmap *)ifmap; cifmap->mem_start = dev->mem_start; cifmap->mem_end = dev->mem_end; cifmap->base_addr = dev->base_addr; cifmap->irq = dev->irq; cifmap->dma = dev->dma; cifmap->port = dev->if_port; return 0; } ifmap->mem_start = dev->mem_start; ifmap->mem_end = dev->mem_end; ifmap->base_addr = dev->base_addr; ifmap->irq = dev->irq; ifmap->dma = dev->dma; ifmap->port = dev->if_port; return 0; } static int netif_setifmap(struct net_device *dev, struct ifreq *ifr) { struct compat_ifmap *cifmap = (struct compat_ifmap *)&ifr->ifr_map; if (!dev->netdev_ops->ndo_set_config) return -EOPNOTSUPP; if (in_compat_syscall()) { struct ifmap ifmap = { .mem_start = cifmap->mem_start, .mem_end = cifmap->mem_end, .base_addr = cifmap->base_addr, .irq = cifmap->irq, .dma = cifmap->dma, .port = cifmap->port, }; return dev->netdev_ops->ndo_set_config(dev, &ifmap); } return dev->netdev_ops->ndo_set_config(dev, &ifr->ifr_map); } /* * Perform the SIOCxIFxxx calls, inside rcu_read_lock() */ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd) { int err; struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name); if (!dev) return -ENODEV; switch (cmd) { case SIOCGIFFLAGS: /* Get interface flags */ ifr->ifr_flags = (short) dev_get_flags(dev); return 0; case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ ifr->ifr_metric = 0; return 0; case SIOCGIFMTU: /* Get the MTU of a device */ ifr->ifr_mtu = dev->mtu; return 0; case SIOCGIFSLAVE: err = -EINVAL; break; case SIOCGIFMAP: return dev_getifmap(dev, ifr); case SIOCGIFINDEX: ifr->ifr_ifindex = dev->ifindex; return 0; case SIOCGIFTXQLEN: ifr->ifr_qlen = dev->tx_queue_len; return 0; default: /* dev_ioctl() should ensure this case * is never reached */ WARN_ON(1); err = -ENOTTY; break; } return err; } int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg) { enum hwtstamp_tx_types tx_type; enum hwtstamp_rx_filters rx_filter; int tx_type_valid = 0; int rx_filter_valid = 0; if (cfg->flags & ~HWTSTAMP_FLAG_MASK) return -EINVAL; tx_type = cfg->tx_type; rx_filter = cfg->rx_filter; switch (tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: case HWTSTAMP_TX_ONESTEP_SYNC: case HWTSTAMP_TX_ONESTEP_P2P: tx_type_valid = 1; break; case __HWTSTAMP_TX_CNT: /* not a real value */ break; } switch (rx_filter) { case HWTSTAMP_FILTER_NONE: case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: rx_filter_valid = 1; break; case __HWTSTAMP_FILTER_CNT: /* not a real value */ break; } if (!tx_type_valid || !rx_filter_valid) return -ERANGE; return 0; } /** * dev_get_hwtstamp_phylib() - Get hardware timestamping settings of NIC * or of attached phylib PHY * @dev: Network device * @cfg: Timestamping configuration structure * * Helper for calling the default hardware provider timestamping. * * Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), and * there only exists a phydev->mii_ts->hwtstamp() method. So this will return * -EOPNOTSUPP for phylib for now, which is still more accurate than letting * the netdev handle the GET request. */ int dev_get_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { struct hwtstamp_provider *hwprov; hwprov = rtnl_dereference(dev->hwprov); if (hwprov) { cfg->qualifier = hwprov->desc.qualifier; if (hwprov->source == HWTSTAMP_SOURCE_PHYLIB && hwprov->phydev) return phy_hwtstamp_get(hwprov->phydev, cfg); if (hwprov->source == HWTSTAMP_SOURCE_NETDEV) return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg); return -EOPNOTSUPP; } if (phy_is_default_hwtstamp(dev->phydev)) return phy_hwtstamp_get(dev->phydev, cfg); return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg); } static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr) { const struct net_device_ops *ops = dev->netdev_ops; struct kernel_hwtstamp_config kernel_cfg = {}; struct hwtstamp_config cfg; int err; if (!ops->ndo_hwtstamp_get) return dev_eth_ioctl(dev, ifr, SIOCGHWTSTAMP); /* legacy */ if (!netif_device_present(dev)) return -ENODEV; kernel_cfg.ifr = ifr; netdev_lock_ops(dev); err = dev_get_hwtstamp_phylib(dev, &kernel_cfg); netdev_unlock_ops(dev); if (err) return err; /* If the request was resolved through an unconverted driver, omit * the copy_to_user(), since the implementation has already done that */ if (!kernel_cfg.copied_to_user) { hwtstamp_config_from_kernel(&cfg, &kernel_cfg); if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) return -EFAULT; } return 0; } /** * dev_set_hwtstamp_phylib() - Change hardware timestamping of NIC * or of attached phylib PHY * @dev: Network device * @cfg: Timestamping configuration structure * @extack: Netlink extended ack message structure, for error reporting * * Helper for enforcing a common policy that phylib timestamping, if available, * should take precedence in front of hardware timestamping provided by the * netdev. If the netdev driver needs to perform specific actions even for PHY * timestamping to work properly (a switch port must trap the timestamped * frames and not forward them), it must set dev->see_all_hwtstamp_requests. */ int dev_set_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; struct kernel_hwtstamp_config old_cfg = {}; struct hwtstamp_provider *hwprov; struct phy_device *phydev; bool changed = false; bool phy_ts; int err; hwprov = rtnl_dereference(dev->hwprov); if (hwprov) { if (hwprov->source == HWTSTAMP_SOURCE_PHYLIB && hwprov->phydev) { phy_ts = true; phydev = hwprov->phydev; } else if (hwprov->source == HWTSTAMP_SOURCE_NETDEV) { phy_ts = false; } else { return -EOPNOTSUPP; } cfg->qualifier = hwprov->desc.qualifier; } else { phy_ts = phy_is_default_hwtstamp(dev->phydev); if (phy_ts) phydev = dev->phydev; } cfg->source = phy_ts ? HWTSTAMP_SOURCE_PHYLIB : HWTSTAMP_SOURCE_NETDEV; if (phy_ts && dev->see_all_hwtstamp_requests) { err = ops->ndo_hwtstamp_get(dev, &old_cfg); if (err) return err; } if (!phy_ts || dev->see_all_hwtstamp_requests) { err = ops->ndo_hwtstamp_set(dev, cfg, extack); if (err) { if (extack->_msg) netdev_err(dev, "%s\n", extack->_msg); return err; } } if (phy_ts && dev->see_all_hwtstamp_requests) changed = kernel_hwtstamp_config_changed(&old_cfg, cfg); if (phy_ts) { err = phy_hwtstamp_set(phydev, cfg, extack); if (err) { if (changed) ops->ndo_hwtstamp_set(dev, &old_cfg, NULL); return err; } } return 0; } static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) { const struct net_device_ops *ops = dev->netdev_ops; struct kernel_hwtstamp_config kernel_cfg = {}; struct netlink_ext_ack extack = {}; struct hwtstamp_config cfg; int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; hwtstamp_config_to_kernel(&kernel_cfg, &cfg); kernel_cfg.ifr = ifr; err = net_hwtstamp_validate(&kernel_cfg); if (err) return err; err = dsa_conduit_hwtstamp_validate(dev, &kernel_cfg, &extack); if (err) { if (extack._msg) netdev_err(dev, "%s\n", extack._msg); return err; } if (!ops->ndo_hwtstamp_set) return dev_eth_ioctl(dev, ifr, SIOCSHWTSTAMP); /* legacy */ if (!netif_device_present(dev)) return -ENODEV; netdev_lock_ops(dev); err = dev_set_hwtstamp_phylib(dev, &kernel_cfg, &extack); netdev_unlock_ops(dev); if (err) return err; /* The driver may have modified the configuration, so copy the * updated version of it back to user space */ if (!kernel_cfg.copied_to_user) { hwtstamp_config_from_kernel(&cfg, &kernel_cfg); if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) return -EFAULT; } return 0; } static int generic_hwtstamp_ioctl_lower(struct net_device *dev, int cmd, struct kernel_hwtstamp_config *kernel_cfg) { struct ifreq ifrr; int err; strscpy_pad(ifrr.ifr_name, dev->name, IFNAMSIZ); ifrr.ifr_ifru = kernel_cfg->ifr->ifr_ifru; err = dev_eth_ioctl(dev, &ifrr, cmd); if (err) return err; kernel_cfg->ifr->ifr_ifru = ifrr.ifr_ifru; kernel_cfg->copied_to_user = true; return 0; } int generic_hwtstamp_get_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg) { const struct net_device_ops *ops = dev->netdev_ops; if (!netif_device_present(dev)) return -ENODEV; if (ops->ndo_hwtstamp_get) return dev_get_hwtstamp_phylib(dev, kernel_cfg); /* Legacy path: unconverted lower driver */ return generic_hwtstamp_ioctl_lower(dev, SIOCGHWTSTAMP, kernel_cfg); } EXPORT_SYMBOL(generic_hwtstamp_get_lower); int generic_hwtstamp_set_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; if (!netif_device_present(dev)) return -ENODEV; if (ops->ndo_hwtstamp_set) return dev_set_hwtstamp_phylib(dev, kernel_cfg, extack); /* Legacy path: unconverted lower driver */ return generic_hwtstamp_ioctl_lower(dev, SIOCSHWTSTAMP, kernel_cfg); } EXPORT_SYMBOL(generic_hwtstamp_set_lower); static int dev_siocbond(struct net_device *dev, struct ifreq *ifr, unsigned int cmd) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocbond) { int ret = -ENODEV; netdev_lock_ops(dev); if (netif_device_present(dev)) ret = ops->ndo_siocbond(dev, ifr, cmd); netdev_unlock_ops(dev); return ret; } return -EOPNOTSUPP; } static int dev_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, unsigned int cmd) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocdevprivate) { int ret = -ENODEV; netdev_lock_ops(dev); if (netif_device_present(dev)) ret = ops->ndo_siocdevprivate(dev, ifr, data, cmd); netdev_unlock_ops(dev); return ret; } return -EOPNOTSUPP; } static int dev_siocwandev(struct net_device *dev, struct if_settings *ifs) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocwandev) { int ret = -ENODEV; netdev_lock_ops(dev); if (netif_device_present(dev)) ret = ops->ndo_siocwandev(dev, ifs); netdev_unlock_ops(dev); return ret; } return -EOPNOTSUPP; } /* * Perform the SIOCxIFxxx calls, inside rtnl_net_lock() */ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, unsigned int cmd) { int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; if (!dev) return -ENODEV; ops = dev->netdev_ops; switch (cmd) { case SIOCSIFFLAGS: /* Set interface flags */ return dev_change_flags(dev, ifr->ifr_flags, NULL); case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ return -EOPNOTSUPP; case SIOCSIFMTU: /* Set the MTU of a device */ return dev_set_mtu(dev, ifr->ifr_mtu); case SIOCSIFHWADDR: if (dev->addr_len > sizeof(ifr->ifr_hwaddr)) return -EINVAL; return dev_set_mac_address_user(dev, (struct sockaddr_storage *)&ifr->ifr_hwaddr, NULL); case SIOCSIFHWBROADCAST: if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, min(sizeof(ifr->ifr_hwaddr.sa_data_min), (size_t)dev->addr_len)); netdev_lock_ops(dev); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); netdev_unlock_ops(dev); return 0; case SIOCSIFMAP: netdev_lock_ops(dev); err = netif_setifmap(dev, ifr); netdev_unlock_ops(dev); return err; case SIOCADDMULTI: if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; netdev_lock_ops(dev); err = dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); netdev_unlock_ops(dev); return err; case SIOCDELMULTI: if (!ops->ndo_set_rx_mode || ifr->ifr_hwaddr.sa_family != AF_UNSPEC) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; netdev_lock_ops(dev); err = dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); netdev_unlock_ops(dev); return err; case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) return -EINVAL; return dev_change_tx_queue_len(dev, ifr->ifr_qlen); case SIOCSIFNAME: ifr->ifr_newname[IFNAMSIZ-1] = '\0'; return dev_change_name(dev, ifr->ifr_newname); case SIOCWANDEV: return dev_siocwandev(dev, &ifr->ifr_settings); case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15: return dev_siocdevprivate(dev, ifr, data, cmd); case SIOCSHWTSTAMP: return dev_set_hwtstamp(dev, ifr); case SIOCGHWTSTAMP: return dev_get_hwtstamp(dev, ifr); case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_eth_ioctl(dev, ifr, cmd); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return dev_siocbond(dev, ifr, cmd); /* Unknown ioctl */ default: err = -EINVAL; } return err; } /** * dev_load - load a network module * @net: the applicable net namespace * @name: name of interface * * If a network interface is not present and the process has suitable * privileges this function loads the module. If module loading is not * available in this kernel then it becomes a nop. */ void dev_load(struct net *net, const char *name) { struct net_device *dev; int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); no_module = !dev; if (no_module && capable(CAP_NET_ADMIN)) no_module = request_module("netdev-%s", name); if (no_module && capable(CAP_SYS_MODULE)) request_module("%s", name); } EXPORT_SYMBOL(dev_load); /* * This function handles all "interface"-type I/O control requests. The actual * 'doing' part of this is dev_ifsioc above. */ /** * dev_ioctl - network device ioctl * @net: the applicable net namespace * @cmd: command to issue * @ifr: pointer to a struct ifreq in user space * @data: data exchanged with userspace * @need_copyout: whether or not copy_to_user() should be called * * Issue ioctl functions to devices. This is normally called by the * user space syscall interfaces but can sometimes be useful for * other purposes. The return value is the return from the syscall if * positive or a negative errno code on error. */ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, void __user *data, bool *need_copyout) { int ret; char *colon; if (need_copyout) *need_copyout = true; if (cmd == SIOCGIFNAME) return dev_ifname(net, ifr); ifr->ifr_name[IFNAMSIZ-1] = 0; colon = strchr(ifr->ifr_name, ':'); if (colon) *colon = 0; /* * See which interface the caller is talking about. */ switch (cmd) { case SIOCGIFHWADDR: dev_load(net, ifr->ifr_name); ret = dev_get_mac_address(&ifr->ifr_hwaddr, net, ifr->ifr_name); if (colon) *colon = ':'; return ret; /* * These ioctl calls: * - can be done by all. * - atomic and do not require locking. * - return a value */ case SIOCGIFFLAGS: case SIOCGIFMETRIC: case SIOCGIFMTU: case SIOCGIFSLAVE: case SIOCGIFMAP: case SIOCGIFINDEX: case SIOCGIFTXQLEN: dev_load(net, ifr->ifr_name); rcu_read_lock(); ret = dev_ifsioc_locked(net, ifr, cmd); rcu_read_unlock(); if (colon) *colon = ':'; return ret; case SIOCETHTOOL: dev_load(net, ifr->ifr_name); ret = dev_ethtool(net, ifr, data); if (colon) *colon = ':'; return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - return a value */ case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSIFNAME: dev_load(net, ifr->ifr_name); if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; rtnl_net_lock(net); ret = dev_ifsioc(net, ifr, data, cmd); rtnl_net_unlock(net); if (colon) *colon = ':'; return ret; /* * These ioctl calls: * - require superuser power. * - require strict serialization. * - do not return a value */ case SIOCSIFMAP: case SIOCSIFTXQLEN: if (!capable(CAP_NET_ADMIN)) return -EPERM; fallthrough; /* * These ioctl calls: * - require local superuser power. * - require strict serialization. * - do not return a value */ case SIOCSIFFLAGS: case SIOCSIFMETRIC: case SIOCSIFMTU: case SIOCSIFHWADDR: case SIOCSIFSLAVE: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCSIFHWBROADCAST: case SIOCSMIIREG: case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: case SIOCSHWTSTAMP: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; fallthrough; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: dev_load(net, ifr->ifr_name); rtnl_net_lock(net); ret = dev_ifsioc(net, ifr, data, cmd); rtnl_net_unlock(net); if (need_copyout) *need_copyout = false; return ret; case SIOCGIFMEM: /* Get the per device memory space. We can add this but * currently do not support it */ case SIOCSIFMEM: /* Set the per device memory buffer space. * Not applicable in our case */ case SIOCSIFLINK: return -ENOTTY; /* * Unknown or private ioctl. */ default: if (cmd == SIOCWANDEV || cmd == SIOCGHWTSTAMP || (cmd >= SIOCDEVPRIVATE && cmd <= SIOCDEVPRIVATE + 15)) { dev_load(net, ifr->ifr_name); rtnl_net_lock(net); ret = dev_ifsioc(net, ifr, data, cmd); rtnl_net_unlock(net); return ret; } return -ENOTTY; } } |
7 7 7 7 7 7 7 7 7 7 7 7 9 9 9 9 7 9 9 9 9 7 7 8 8 8 7 7 4 1 1 1 3 1 2 7 4 7 9 9 9 9 8 9 9 9 9 9 9 9 9 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 | // SPDX-License-Identifier: ISC /* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> */ #include <linux/module.h> #include <linux/usb.h> #include "debug.h" #include "core.h" #include "bmi.h" #include "hif.h" #include "htc.h" #include "usb.h" static void ath10k_usb_post_recv_transfers(struct ath10k *ar, struct ath10k_usb_pipe *recv_pipe); /* inlined helper functions */ static inline enum ath10k_htc_ep_id eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr) { return (enum ath10k_htc_ep_id)htc_hdr->eid; } static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr) { return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len; } /* pipe/urb operations */ static struct ath10k_urb_context * ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) { struct ath10k_urb_context *urb_context = NULL; unsigned long flags; /* bail if this pipe is not initialized */ if (!pipe->ar_usb) return NULL; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); if (!list_empty(&pipe->urb_list_head)) { urb_context = list_first_entry(&pipe->urb_list_head, struct ath10k_urb_context, link); list_del(&urb_context->link); pipe->urb_cnt--; } spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); return urb_context; } static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, struct ath10k_urb_context *urb_context) { unsigned long flags; /* bail if this pipe is not initialized */ if (!pipe->ar_usb) return; spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); pipe->urb_cnt++; list_add(&urb_context->link, &pipe->urb_list_head); spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); } static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context) { dev_kfree_skb(urb_context->skb); urb_context->skb = NULL; ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); } static void ath10k_usb_free_pipe_resources(struct ath10k *ar, struct ath10k_usb_pipe *pipe) { struct ath10k_urb_context *urb_context; if (!pipe->ar_usb) { /* nothing allocated for this pipe */ return; } ath10k_dbg(ar, ATH10K_DBG_USB, "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc, pipe->urb_cnt); if (pipe->urb_alloc != pipe->urb_cnt) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc, pipe->urb_cnt); } for (;;) { urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); if (!urb_context) break; kfree(urb_context); } } static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); int i; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]); } /* hif usb rx/tx completion functions */ static void ath10k_usb_recv_complete(struct urb *urb) { struct ath10k_urb_context *urb_context = urb->context; struct ath10k_usb_pipe *pipe = urb_context->pipe; struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; int status = 0; ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb recv pipe %d stat %d len %d urb 0x%p\n", pipe->logical_pipe_num, urb->status, urb->actual_length, urb); if (urb->status != 0) { status = -EIO; switch (urb->status) { case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* no need to spew these errors when device * removed or urb killed due to driver shutdown */ status = -ECANCELED; break; default: ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb recv pipe %d ep 0x%2.2x failed: %d\n", pipe->logical_pipe_num, pipe->ep_address, urb->status); break; } goto cleanup_recv_urb; } if (urb->actual_length == 0) goto cleanup_recv_urb; skb = urb_context->skb; /* we are going to pass it up */ urb_context->skb = NULL; skb_put(skb, urb->actual_length); /* note: queue implements a lock */ skb_queue_tail(&pipe->io_comp_queue, skb); schedule_work(&pipe->io_complete_work); cleanup_recv_urb: ath10k_usb_cleanup_recv_urb(urb_context); if (status == 0 && pipe->urb_cnt >= pipe->urb_cnt_thresh) { /* our free urbs are piling up, post more transfers */ ath10k_usb_post_recv_transfers(ar, pipe); } } static void ath10k_usb_transmit_complete(struct urb *urb) { struct ath10k_urb_context *urb_context = urb->context; struct ath10k_usb_pipe *pipe = urb_context->pipe; struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; if (urb->status != 0) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "pipe: %d, failed:%d\n", pipe->logical_pipe_num, urb->status); } skb = urb_context->skb; urb_context->skb = NULL; ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); /* note: queue implements a lock */ skb_queue_tail(&pipe->io_comp_queue, skb); schedule_work(&pipe->io_complete_work); } /* pipe operations */ static void ath10k_usb_post_recv_transfers(struct ath10k *ar, struct ath10k_usb_pipe *recv_pipe) { struct ath10k_urb_context *urb_context; struct urb *urb; int usb_status; for (;;) { urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe); if (!urb_context) break; urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE); if (!urb_context->skb) goto err; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto err; usb_fill_bulk_urb(urb, recv_pipe->ar_usb->udev, recv_pipe->usb_pipe_handle, urb_context->skb->data, ATH10K_USB_RX_BUFFER_SIZE, ath10k_usb_recv_complete, urb_context); ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%p\n", recv_pipe->logical_pipe_num, recv_pipe->usb_pipe_handle, recv_pipe->ep_address, ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb); usb_anchor_urb(urb, &recv_pipe->urb_submitted); usb_status = usb_submit_urb(urb, GFP_ATOMIC); if (usb_status) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk recv failed: %d\n", usb_status); usb_unanchor_urb(urb); usb_free_urb(urb); goto err; } usb_free_urb(urb); } return; err: ath10k_usb_cleanup_recv_urb(urb_context); } static void ath10k_usb_flush_all(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); int i; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { if (ar_usb->pipes[i].ar_usb) { usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); cancel_work_sync(&ar_usb->pipes[i].io_complete_work); } } } static void ath10k_usb_start_recv_pipes(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; ath10k_usb_post_recv_transfers(ar, &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); } static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc_hdr *htc_hdr; struct ath10k_htc_ep *ep; htc_hdr = (struct ath10k_htc_hdr *)skb->data; ep = &ar->htc.endpoint[htc_hdr->eid]; ath10k_htc_notify_tx_completion(ep, skb); /* The TX complete handler now owns the skb... */ } static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; struct ath10k_htc_hdr *htc_hdr; enum ath10k_htc_ep_id eid; struct ath10k_htc_ep *ep; u16 payload_len; u8 *trailer; int ret; htc_hdr = (struct ath10k_htc_hdr *)skb->data; eid = eid_from_htc_hdr(htc_hdr); ep = &ar->htc.endpoint[eid]; if (ep->service_id == 0) { ath10k_warn(ar, "ep %d is not connected\n", eid); goto out_free_skb; } payload_len = le16_to_cpu(htc_hdr->len); if (!payload_len) { ath10k_warn(ar, "zero length frame received, firmware crashed?\n"); goto out_free_skb; } if (payload_len < htc_hdr->trailer_len) { ath10k_warn(ar, "malformed frame received, firmware crashed?\n"); goto out_free_skb; } if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) { trailer = skb->data + sizeof(*htc_hdr) + payload_len - htc_hdr->trailer_len; ret = ath10k_htc_process_trailer(htc, trailer, htc_hdr->trailer_len, eid, NULL, NULL); if (ret) goto out_free_skb; if (is_trailer_only_msg(htc_hdr)) goto out_free_skb; /* strip off the trailer from the skb since it should not * be passed on to upper layers */ skb_trim(skb, skb->len - htc_hdr->trailer_len); } skb_pull(skb, sizeof(*htc_hdr)); ep->ep_ops.ep_rx_complete(ar, skb); /* The RX complete handler now owns the skb... */ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) { local_bh_disable(); napi_schedule(&ar->napi); local_bh_enable(); } return; out_free_skb: dev_kfree_skb(skb); } static void ath10k_usb_io_comp_work(struct work_struct *work) { struct ath10k_usb_pipe *pipe = container_of(work, struct ath10k_usb_pipe, io_complete_work); struct ath10k *ar = pipe->ar_usb->ar; struct sk_buff *skb; while ((skb = skb_dequeue(&pipe->io_comp_queue))) { if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX) ath10k_usb_tx_complete(ar, skb); else ath10k_usb_rx_complete(ar, skb); } } #define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write)) #define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read)) static void ath10k_usb_destroy(struct ath10k *ar) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ath10k_usb_flush_all(ar); ath10k_usb_cleanup_pipe_resources(ar); usb_set_intfdata(ar_usb->interface, NULL); kfree(ar_usb->diag_cmd_buffer); kfree(ar_usb->diag_resp_buffer); } static int ath10k_usb_hif_start(struct ath10k *ar) { int i; struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); ath10k_core_napi_enable(ar); ath10k_usb_start_recv_pipes(ar); /* set the TX resource avail threshold for each TX pipe */ for (i = ATH10K_USB_PIPE_TX_CTRL; i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) { ar_usb->pipes[i].urb_cnt_thresh = ar_usb->pipes[i].urb_alloc / 2; } return 0; } static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id]; struct ath10k_urb_context *urb_context; struct sk_buff *skb; struct urb *urb; int ret, i; for (i = 0; i < n_items; i++) { urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); if (!urb_context) { ret = -ENOMEM; goto err; } skb = items[i].transfer_context; urb_context->skb = skb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { ret = -ENOMEM; goto err_free_urb_to_pipe; } usb_fill_bulk_urb(urb, ar_usb->udev, pipe->usb_pipe_handle, skb->data, skb->len, ath10k_usb_transmit_complete, urb_context); if (!(skb->len % pipe->max_packet_size)) { /* hit a max packet boundary on this pipe */ urb->transfer_flags |= URB_ZERO_PACKET; } usb_anchor_urb(urb, &pipe->urb_submitted); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { ath10k_dbg(ar, ATH10K_DBG_USB_BULK, "usb bulk transmit failed: %d\n", ret); usb_unanchor_urb(urb); usb_free_urb(urb); ret = -EINVAL; goto err_free_urb_to_pipe; } usb_free_urb(urb); } return 0; err_free_urb_to_pipe: ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); err: return ret; } static void ath10k_usb_hif_stop(struct ath10k *ar) { ath10k_usb_flush_all(ar); ath10k_core_napi_sync_disable(ar); } static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); return ar_usb->pipes[pipe_id].urb_cnt; } static int ath10k_usb_submit_ctrl_out(struct ath10k *ar, u8 req, u16 value, u16 index, void *data, u32 size) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); u8 *buf = NULL; int ret; if (size > 0) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) return -ENOMEM; } /* note: if successful returns number of bytes transferred */ ret = usb_control_msg(ar_usb->udev, usb_sndctrlpipe(ar_usb->udev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 1000); if (ret < 0) { ath10k_warn(ar, "Failed to submit usb control message: %d\n", ret); kfree(buf); return ret; } kfree(buf); return 0; } static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, u8 req, u16 value, u16 index, void *data, u32 size) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); u8 *buf = NULL; int ret; if (size > 0) { buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; } /* note: if successful returns number of bytes transferred */ ret = usb_control_msg(ar_usb->udev, usb_rcvctrlpipe(ar_usb->udev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, buf, size, 2000); if (ret < 0) { ath10k_warn(ar, "Failed to read usb control message: %d\n", ret); kfree(buf); return ret; } memcpy((u8 *)data, buf, size); kfree(buf); return 0; } static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar, u8 req_val, u8 *req_buf, u32 req_len, u8 resp_val, u8 *resp_buf, u32 *resp_len) { int ret; /* send command */ ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0, req_buf, req_len); if (ret) goto err; /* get response */ if (resp_buf) { ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0, resp_buf, *resp_len); if (ret) goto err; } return 0; err: return ret; } static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_ctrl_diag_cmd_read *cmd; u32 resp_len; int ret; if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read)) return -EINVAL; cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ; cmd->address = cpu_to_le32(address); resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read); ret = ath10k_usb_ctrl_msg_exchange(ar, ATH10K_USB_CONTROL_REQ_DIAG_CMD, (u8 *)cmd, sizeof(*cmd), ATH10K_USB_CONTROL_REQ_DIAG_RESP, ar_usb->diag_resp_buffer, &resp_len); if (ret) return ret; if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read)) return -EMSGSIZE; memcpy(buf, ar_usb->diag_resp_buffer, sizeof(struct ath10k_usb_ctrl_diag_resp_read)); return 0; } static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb_ctrl_diag_cmd_write *cmd; int ret; if (nbytes != sizeof(cmd->value)) return -EINVAL; cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer; memset(cmd, 0, sizeof(*cmd)); cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE); cmd->address = cpu_to_le32(address); memcpy(&cmd->value, data, nbytes); ret = ath10k_usb_ctrl_msg_exchange(ar, ATH10K_USB_CONTROL_REQ_DIAG_CMD, (u8 *)cmd, sizeof(*cmd), 0, NULL, NULL); if (ret) return ret; return 0; } static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar, void *req, u32 req_len, void *resp, u32 *resp_len) { int ret; if (req) { ret = ath10k_usb_submit_ctrl_out(ar, ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD, 0, 0, req, req_len); if (ret) { ath10k_warn(ar, "unable to send the bmi data to the device: %d\n", ret); return ret; } } if (resp) { ret = ath10k_usb_submit_ctrl_in(ar, ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP, 0, 0, resp, *resp_len); if (ret) { ath10k_warn(ar, "Unable to read the bmi data from the device: %d\n", ret); return ret; } } return 0; } static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe) { *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; *dl_pipe = ATH10K_USB_PIPE_RX_CTRL; } static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id, u8 *ul_pipe, u8 *dl_pipe) { switch (svc_id) { case ATH10K_HTC_SVC_ID_RSVD_CTRL: case ATH10K_HTC_SVC_ID_WMI_CONTROL: *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; /* due to large control packets, shift to data pipe */ *dl_pipe = ATH10K_USB_PIPE_RX_DATA; break; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP; /* Disable rxdata2 directly, it will be enabled * if FW enable rxdata2 */ *dl_pipe = ATH10K_USB_PIPE_RX_DATA; break; default: return -EPERM; } return 0; } static int ath10k_usb_hif_power_up(struct ath10k *ar, enum ath10k_firmware_mode fw_mode) { return 0; } static void ath10k_usb_hif_power_down(struct ath10k *ar) { ath10k_usb_flush_all(ar); } #ifdef CONFIG_PM static int ath10k_usb_hif_suspend(struct ath10k *ar) { return -EOPNOTSUPP; } static int ath10k_usb_hif_resume(struct ath10k *ar) { return -EOPNOTSUPP; } #endif static const struct ath10k_hif_ops ath10k_usb_hif_ops = { .tx_sg = ath10k_usb_hif_tx_sg, .diag_read = ath10k_usb_hif_diag_read, .diag_write = ath10k_usb_hif_diag_write, .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg, .start = ath10k_usb_hif_start, .stop = ath10k_usb_hif_stop, .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, .get_default_pipe = ath10k_usb_hif_get_default_pipe, .get_free_queue_number = ath10k_usb_hif_get_free_queue_number, .power_up = ath10k_usb_hif_power_up, .power_down = ath10k_usb_hif_power_down, #ifdef CONFIG_PM .suspend = ath10k_usb_hif_suspend, .resume = ath10k_usb_hif_resume, #endif }; static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count) { u8 pipe_num = ATH10K_USB_PIPE_INVALID; switch (ep_address) { case ATH10K_USB_EP_ADDR_APP_CTRL_IN: pipe_num = ATH10K_USB_PIPE_RX_CTRL; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_IN: pipe_num = ATH10K_USB_PIPE_RX_DATA; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_INT_IN: pipe_num = ATH10K_USB_PIPE_RX_INT; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA2_IN: pipe_num = ATH10K_USB_PIPE_RX_DATA2; *urb_count = RX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_CTRL_OUT: pipe_num = ATH10K_USB_PIPE_TX_CTRL; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_LP; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_MP; *urb_count = TX_URB_COUNT; break; case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT: pipe_num = ATH10K_USB_PIPE_TX_DATA_HP; *urb_count = TX_URB_COUNT; break; default: /* note: there may be endpoints not currently used */ break; } return pipe_num; } static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar, struct ath10k_usb_pipe *pipe, int urb_cnt) { struct ath10k_urb_context *urb_context; int i; INIT_LIST_HEAD(&pipe->urb_list_head); init_usb_anchor(&pipe->urb_submitted); for (i = 0; i < urb_cnt; i++) { urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL); if (!urb_context) return -ENOMEM; urb_context->pipe = pipe; /* we are only allocate the urb contexts here, the actual URB * is allocated from the kernel as needed to do a transaction */ pipe->urb_alloc++; ath10k_usb_free_urb_to_pipe(pipe, urb_context); } ath10k_dbg(ar, ATH10K_DBG_USB, "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc); return 0; } static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, struct usb_interface *interface) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct usb_host_interface *iface_desc = interface->cur_altsetting; struct usb_endpoint_descriptor *endpoint; struct ath10k_usb_pipe *pipe; int ret, i, urbcount; u8 pipe_num; ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); /* walk descriptors and setup pipes */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s bulk ep 0x%2.2x maxpktsz %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize)); } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { /* TODO for ISO */ ath10k_dbg(ar, ATH10K_DBG_USB, "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n", ATH10K_USB_IS_DIR_IN (endpoint->bEndpointAddress) ? "rx" : "tx", endpoint->bEndpointAddress, le16_to_cpu(endpoint->wMaxPacketSize), endpoint->bInterval); } /* Ignore broken descriptors. */ if (usb_endpoint_maxp(endpoint) == 0) continue; urbcount = 0; pipe_num = ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress, &urbcount); if (pipe_num == ATH10K_USB_PIPE_INVALID) continue; pipe = &ar_usb->pipes[pipe_num]; if (pipe->ar_usb) /* hmmm..pipe was already setup */ continue; pipe->ar_usb = ar_usb; pipe->logical_pipe_num = pipe_num; pipe->ep_address = endpoint->bEndpointAddress; pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvbulkpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndbulkpipe(ar_usb->udev, pipe->ep_address); } } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvintpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndintpipe(ar_usb->udev, pipe->ep_address); } } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { /* TODO for ISO */ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { pipe->usb_pipe_handle = usb_rcvisocpipe(ar_usb->udev, pipe->ep_address); } else { pipe->usb_pipe_handle = usb_sndisocpipe(ar_usb->udev, pipe->ep_address); } } pipe->ep_desc = endpoint; if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address)) pipe->flags |= ATH10K_USB_PIPE_FLAG_TX; ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount); if (ret) return ret; } return 0; } static int ath10k_usb_create(struct ath10k *ar, struct usb_interface *interface) { struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct usb_device *dev = interface_to_usbdev(interface); struct ath10k_usb_pipe *pipe; int ret, i; usb_set_intfdata(interface, ar_usb); spin_lock_init(&ar_usb->cs_lock); ar_usb->udev = dev; ar_usb->interface = interface; for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { pipe = &ar_usb->pipes[i]; INIT_WORK(&pipe->io_complete_work, ath10k_usb_io_comp_work); skb_queue_head_init(&pipe->io_comp_queue); } ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL); if (!ar_usb->diag_cmd_buffer) { ret = -ENOMEM; goto err; } ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP, GFP_KERNEL); if (!ar_usb->diag_resp_buffer) { ret = -ENOMEM; goto err; } ret = ath10k_usb_setup_pipe_resources(ar, interface); if (ret) goto err; return 0; err: ath10k_usb_destroy(ar); return ret; } static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget) { struct ath10k *ar = container_of(ctx, struct ath10k, napi); int done; done = ath10k_htt_rx_hl_indication(ar, budget); ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget); if (done < budget) napi_complete_done(ctx, done); return done; } /* ath10k usb driver registered functions */ static int ath10k_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct ath10k *ar; struct ath10k_usb *ar_usb; struct usb_device *dev = interface_to_usbdev(interface); int ret, vendor_id, product_id; enum ath10k_hw_rev hw_rev; struct ath10k_bus_params bus_params = {}; /* Assumption: All USB based chipsets (so far) are QCA9377 based. * If there will be newer chipsets that does not use the hw reg * setup as defined in qca6174_regs and qca6174_values, this * assumption is no longer valid and hw_rev must be setup differently * depending on chipset. */ hw_rev = ATH10K_HW_QCA9377; ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB, hw_rev, &ath10k_usb_hif_ops); if (!ar) { dev_err(&dev->dev, "failed to allocate core\n"); return -ENOMEM; } netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll); usb_get_dev(dev); vendor_id = le16_to_cpu(dev->descriptor.idVendor); product_id = le16_to_cpu(dev->descriptor.idProduct); ath10k_dbg(ar, ATH10K_DBG_BOOT, "usb new func vendor 0x%04x product 0x%04x\n", vendor_id, product_id); ar_usb = ath10k_usb_priv(ar); ret = ath10k_usb_create(ar, interface); if (ret) goto err; ar_usb->ar = ar; ar->dev_id = product_id; ar->id.vendor = vendor_id; ar->id.device = product_id; bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with USB */ bus_params.chip_id = 0; bus_params.hl_msdu_ids = true; ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); goto err_usb_destroy; } /* TODO: remove this once USB support is fully implemented */ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n"); return 0; err_usb_destroy: ath10k_usb_destroy(ar); err: ath10k_core_destroy(ar); usb_put_dev(dev); return ret; } static void ath10k_usb_remove(struct usb_interface *interface) { struct ath10k_usb *ar_usb; ar_usb = usb_get_intfdata(interface); if (!ar_usb) return; ath10k_core_unregister(ar_usb->ar); netif_napi_del(&ar_usb->ar->napi); ath10k_usb_destroy(ar_usb->ar); usb_put_dev(interface_to_usbdev(interface)); ath10k_core_destroy(ar_usb->ar); } #ifdef CONFIG_PM static int ath10k_usb_pm_suspend(struct usb_interface *interface, pm_message_t message) { struct ath10k_usb *ar_usb = usb_get_intfdata(interface); ath10k_usb_flush_all(ar_usb->ar); return 0; } static int ath10k_usb_pm_resume(struct usb_interface *interface) { struct ath10k_usb *ar_usb = usb_get_intfdata(interface); struct ath10k *ar = ar_usb->ar; ath10k_usb_post_recv_transfers(ar, &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); return 0; } #else #define ath10k_usb_pm_suspend NULL #define ath10k_usb_pm_resume NULL #endif /* table of devices that work with this driver */ static struct usb_device_id ath10k_usb_ids[] = { {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */ { /* Terminating entry */ }, }; MODULE_DEVICE_TABLE(usb, ath10k_usb_ids); static struct usb_driver ath10k_usb_driver = { .name = "ath10k_usb", .probe = ath10k_usb_probe, .suspend = ath10k_usb_pm_suspend, .resume = ath10k_usb_pm_resume, .disconnect = ath10k_usb_remove, .id_table = ath10k_usb_ids, .supports_autosuspend = true, .disable_hub_initiated_lpm = 1, }; module_usb_driver(ath10k_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Qualcomm Atheros USB 802.11ac WLAN devices"); MODULE_LICENSE("Dual BSD/GPL"); |
1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 | // SPDX-License-Identifier: GPL-2.0-or-later /* * kinect sensor device camera, gspca driver * * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it> * * Based on the OpenKinect project and libfreenect * http://openkinect.org/wiki/Init_Analysis * * Special thanks to Steven Toth and kernellabs.com for sponsoring a Kinect * sensor device which I tested the driver on. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "kinect" #include "gspca.h" #define CTRL_TIMEOUT 500 MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver"); MODULE_LICENSE("GPL"); static bool depth_mode; struct pkt_hdr { uint8_t magic[2]; uint8_t pad; uint8_t flag; uint8_t unk1; uint8_t seq; uint8_t unk2; uint8_t unk3; uint32_t timestamp; }; struct cam_hdr { uint8_t magic[2]; __le16 len; __le16 cmd; __le16 tag; }; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ uint16_t cam_tag; /* a sequence number for packets */ uint8_t stream_flag; /* to identify different stream types */ uint8_t obuf[0x400]; /* output buffer for control commands */ uint8_t ibuf[0x200]; /* input buffer for control commands */ }; #define MODE_640x480 0x0001 #define MODE_640x488 0x0002 #define MODE_1280x1024 0x0004 #define FORMAT_BAYER 0x0010 #define FORMAT_UYVY 0x0020 #define FORMAT_Y10B 0x0040 #define FPS_HIGH 0x0100 static const struct v4l2_pix_format depth_camera_mode[] = { {640, 480, V4L2_PIX_FMT_Y10BPACK, V4L2_FIELD_NONE, .bytesperline = 640 * 10 / 8, .sizeimage = 640 * 480 * 10 / 8, .colorspace = V4L2_COLORSPACE_SRGB, .priv = MODE_640x488 | FORMAT_Y10B}, }; static const struct v4l2_pix_format video_camera_mode[] = { {640, 480, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = MODE_640x480 | FORMAT_BAYER | FPS_HIGH}, {640, 480, V4L2_PIX_FMT_UYVY, V4L2_FIELD_NONE, .bytesperline = 640 * 2, .sizeimage = 640 * 480 * 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = MODE_640x480 | FORMAT_UYVY}, {1280, 1024, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024, .colorspace = V4L2_COLORSPACE_SRGB, .priv = MODE_1280x1024 | FORMAT_BAYER}, {640, 488, V4L2_PIX_FMT_Y10BPACK, V4L2_FIELD_NONE, .bytesperline = 640 * 10 / 8, .sizeimage = 640 * 488 * 10 / 8, .colorspace = V4L2_COLORSPACE_SRGB, .priv = MODE_640x488 | FORMAT_Y10B | FPS_HIGH}, {1280, 1024, V4L2_PIX_FMT_Y10BPACK, V4L2_FIELD_NONE, .bytesperline = 1280 * 10 / 8, .sizeimage = 1280 * 1024 * 10 / 8, .colorspace = V4L2_COLORSPACE_SRGB, .priv = MODE_1280x1024 | FORMAT_Y10B}, }; static int kinect_write(struct usb_device *udev, uint8_t *data, uint16_t wLength) { return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, wLength, CTRL_TIMEOUT); } static int kinect_read(struct usb_device *udev, uint8_t *data, uint16_t wLength) { return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, wLength, CTRL_TIMEOUT); } static int send_cmd(struct gspca_dev *gspca_dev, uint16_t cmd, void *cmdbuf, unsigned int cmd_len, void *replybuf, unsigned int reply_len) { struct sd *sd = (struct sd *) gspca_dev; struct usb_device *udev = gspca_dev->dev; int res, actual_len; uint8_t *obuf = sd->obuf; uint8_t *ibuf = sd->ibuf; struct cam_hdr *chdr = (void *)obuf; struct cam_hdr *rhdr = (void *)ibuf; if (cmd_len & 1 || cmd_len > (0x400 - sizeof(*chdr))) { pr_err("send_cmd: Invalid command length (0x%x)\n", cmd_len); return -1; } chdr->magic[0] = 0x47; chdr->magic[1] = 0x4d; chdr->cmd = cpu_to_le16(cmd); chdr->tag = cpu_to_le16(sd->cam_tag); chdr->len = cpu_to_le16(cmd_len / 2); memcpy(obuf+sizeof(*chdr), cmdbuf, cmd_len); res = kinect_write(udev, obuf, cmd_len + sizeof(*chdr)); gspca_dbg(gspca_dev, D_USBO, "Control cmd=%04x tag=%04x len=%04x: %d\n", cmd, sd->cam_tag, cmd_len, res); if (res < 0) { pr_err("send_cmd: Output control transfer failed (%d)\n", res); return res; } do { actual_len = kinect_read(udev, ibuf, 0x200); } while (actual_len == 0); gspca_dbg(gspca_dev, D_USBO, "Control reply: %d\n", actual_len); if (actual_len < (int)sizeof(*rhdr)) { pr_err("send_cmd: Input control transfer failed (%d)\n", actual_len); return actual_len < 0 ? actual_len : -EREMOTEIO; } actual_len -= sizeof(*rhdr); if (rhdr->magic[0] != 0x52 || rhdr->magic[1] != 0x42) { pr_err("send_cmd: Bad magic %02x %02x\n", rhdr->magic[0], rhdr->magic[1]); return -1; } if (rhdr->cmd != chdr->cmd) { pr_err("send_cmd: Bad cmd %02x != %02x\n", rhdr->cmd, chdr->cmd); return -1; } if (rhdr->tag != chdr->tag) { pr_err("send_cmd: Bad tag %04x != %04x\n", rhdr->tag, chdr->tag); return -1; } if (le16_to_cpu(rhdr->len) != (actual_len/2)) { pr_err("send_cmd: Bad len %04x != %04x\n", le16_to_cpu(rhdr->len), (int)(actual_len/2)); return -1; } if (actual_len > reply_len) { pr_warn("send_cmd: Data buffer is %d bytes long, but got %d bytes\n", reply_len, actual_len); memcpy(replybuf, ibuf+sizeof(*rhdr), reply_len); } else { memcpy(replybuf, ibuf+sizeof(*rhdr), actual_len); } sd->cam_tag++; return actual_len; } static int write_register(struct gspca_dev *gspca_dev, uint16_t reg, uint16_t data) { uint16_t reply[2]; __le16 cmd[2]; int res; cmd[0] = cpu_to_le16(reg); cmd[1] = cpu_to_le16(data); gspca_dbg(gspca_dev, D_USBO, "Write Reg 0x%04x <= 0x%02x\n", reg, data); res = send_cmd(gspca_dev, 0x03, cmd, 4, reply, 4); if (res < 0) return res; if (res != 2) { pr_warn("send_cmd returned %d [%04x %04x], 0000 expected\n", res, reply[0], reply[1]); } return 0; } /* this function is called at probe time */ static int sd_config_video(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->cam_tag = 0; sd->stream_flag = 0x80; cam = &gspca_dev->cam; cam->cam_mode = video_camera_mode; cam->nmodes = ARRAY_SIZE(video_camera_mode); gspca_dev->xfer_ep = 0x81; #if 0 /* Setting those values is not needed for video stream */ cam->npkt = 15; gspca_dev->pkt_size = 960 * 2; #endif return 0; } static int sd_config_depth(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->cam_tag = 0; sd->stream_flag = 0x70; cam = &gspca_dev->cam; cam->cam_mode = depth_camera_mode; cam->nmodes = ARRAY_SIZE(depth_camera_mode); gspca_dev->xfer_ep = 0x82; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { gspca_dbg(gspca_dev, D_PROBE, "Kinect Camera device.\n"); return 0; } static int sd_start_video(struct gspca_dev *gspca_dev) { int mode; uint8_t fmt_reg, fmt_val; uint8_t res_reg, res_val; uint8_t fps_reg, fps_val; uint8_t mode_val; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; if (mode & FORMAT_Y10B) { fmt_reg = 0x19; res_reg = 0x1a; fps_reg = 0x1b; mode_val = 0x03; } else { fmt_reg = 0x0c; res_reg = 0x0d; fps_reg = 0x0e; mode_val = 0x01; } /* format */ if (mode & FORMAT_UYVY) fmt_val = 0x05; else fmt_val = 0x00; if (mode & MODE_1280x1024) res_val = 0x02; else res_val = 0x01; if (mode & FPS_HIGH) fps_val = 0x1e; else fps_val = 0x0f; /* turn off IR-reset function */ write_register(gspca_dev, 0x105, 0x00); /* Reset video stream */ write_register(gspca_dev, 0x05, 0x00); /* Due to some ridiculous condition in the firmware, we have to start * and stop the depth stream before the camera will hand us 1280x1024 * IR. This is a stupid workaround, but we've yet to find a better * solution. * * Thanks to Drew Fisher for figuring this out. */ if (mode & (FORMAT_Y10B | MODE_1280x1024)) { write_register(gspca_dev, 0x13, 0x01); write_register(gspca_dev, 0x14, 0x1e); write_register(gspca_dev, 0x06, 0x02); write_register(gspca_dev, 0x06, 0x00); } write_register(gspca_dev, fmt_reg, fmt_val); write_register(gspca_dev, res_reg, res_val); write_register(gspca_dev, fps_reg, fps_val); /* Start video stream */ write_register(gspca_dev, 0x05, mode_val); /* disable Hflip */ write_register(gspca_dev, 0x47, 0x00); return 0; } static int sd_start_depth(struct gspca_dev *gspca_dev) { /* turn off IR-reset function */ write_register(gspca_dev, 0x105, 0x00); /* reset depth stream */ write_register(gspca_dev, 0x06, 0x00); /* Depth Stream Format 0x03: 11 bit stream | 0x02: 10 bit */ write_register(gspca_dev, 0x12, 0x02); /* Depth Stream Resolution 1: standard (640x480) */ write_register(gspca_dev, 0x13, 0x01); /* Depth Framerate / 0x1e (30): 30 fps */ write_register(gspca_dev, 0x14, 0x1e); /* Depth Stream Control / 2: Open Depth Stream */ write_register(gspca_dev, 0x06, 0x02); /* disable depth hflip / LSB = 0: Smoothing Disabled */ write_register(gspca_dev, 0x17, 0x00); return 0; } static void sd_stopN_video(struct gspca_dev *gspca_dev) { /* reset video stream */ write_register(gspca_dev, 0x05, 0x00); } static void sd_stopN_depth(struct gspca_dev *gspca_dev) { /* reset depth stream */ write_register(gspca_dev, 0x06, 0x00); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *__data, int len) { struct sd *sd = (struct sd *) gspca_dev; struct pkt_hdr *hdr = (void *)__data; uint8_t *data = __data + sizeof(*hdr); int datalen = len - sizeof(*hdr); uint8_t sof = sd->stream_flag | 1; uint8_t mof = sd->stream_flag | 2; uint8_t eof = sd->stream_flag | 5; if (len < 12) return; if (hdr->magic[0] != 'R' || hdr->magic[1] != 'B') { pr_warn("[Stream %02x] Invalid magic %02x%02x\n", sd->stream_flag, hdr->magic[0], hdr->magic[1]); return; } if (hdr->flag == sof) gspca_frame_add(gspca_dev, FIRST_PACKET, data, datalen); else if (hdr->flag == mof) gspca_frame_add(gspca_dev, INTER_PACKET, data, datalen); else if (hdr->flag == eof) gspca_frame_add(gspca_dev, LAST_PACKET, data, datalen); else pr_warn("Packet type not recognized...\n"); } /* sub-driver description */ static const struct sd_desc sd_desc_video = { .name = MODULE_NAME, .config = sd_config_video, .init = sd_init, .start = sd_start_video, .stopN = sd_stopN_video, .pkt_scan = sd_pkt_scan, /* .get_streamparm = sd_get_streamparm, .set_streamparm = sd_set_streamparm, */ }; static const struct sd_desc sd_desc_depth = { .name = MODULE_NAME, .config = sd_config_depth, .init = sd_init, .start = sd_start_depth, .stopN = sd_stopN_depth, .pkt_scan = sd_pkt_scan, /* .get_streamparm = sd_get_streamparm, .set_streamparm = sd_set_streamparm, */ }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x045e, 0x02ae)}, {USB_DEVICE(0x045e, 0x02bf)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (depth_mode) return gspca_dev_probe(intf, id, &sd_desc_depth, sizeof(struct sd), THIS_MODULE); else return gspca_dev_probe(intf, id, &sd_desc_video, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(depth_mode, bool, 0644); MODULE_PARM_DESC(depth_mode, "0=video 1=depth"); |
3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2014 Samsung Electronics Co., Ltd. * Sylwester Nawrocki <s.nawrocki@samsung.com> */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clk/clk-conf.h> #include <linux/device.h> #include <linux/of.h> #include <linux/printk.h> #include <linux/slab.h> static int __set_clk_parents(struct device_node *node, bool clk_supplier) { struct of_phandle_args clkspec; int index, rc, num_parents; struct clk *clk, *pclk; num_parents = of_count_phandle_with_args(node, "assigned-clock-parents", "#clock-cells"); if (num_parents == -EINVAL) pr_err("clk: invalid value of clock-parents property at %pOF\n", node); for (index = 0; index < num_parents; index++) { rc = of_parse_phandle_with_args(node, "assigned-clock-parents", "#clock-cells", index, &clkspec); if (rc < 0) { /* skip empty (null) phandles */ if (rc == -ENOENT) continue; else return rc; } if (clkspec.np == node && !clk_supplier) { of_node_put(clkspec.np); return 0; } pclk = of_clk_get_from_provider(&clkspec); of_node_put(clkspec.np); if (IS_ERR(pclk)) { if (PTR_ERR(pclk) != -EPROBE_DEFER) pr_warn("clk: couldn't get parent clock %d for %pOF\n", index, node); return PTR_ERR(pclk); } rc = of_parse_phandle_with_args(node, "assigned-clocks", "#clock-cells", index, &clkspec); if (rc < 0) goto err; if (clkspec.np == node && !clk_supplier) { of_node_put(clkspec.np); rc = 0; goto err; } clk = of_clk_get_from_provider(&clkspec); of_node_put(clkspec.np); if (IS_ERR(clk)) { if (PTR_ERR(clk) != -EPROBE_DEFER) pr_warn("clk: couldn't get assigned clock %d for %pOF\n", index, node); rc = PTR_ERR(clk); goto err; } rc = clk_set_parent(clk, pclk); if (rc < 0) pr_err("clk: failed to reparent %s to %s: %d\n", __clk_get_name(clk), __clk_get_name(pclk), rc); clk_put(clk); clk_put(pclk); } return 0; err: clk_put(pclk); return rc; } static int __set_clk_rates(struct device_node *node, bool clk_supplier) { struct of_phandle_args clkspec; int rc, count, count_64, index; struct clk *clk; u64 *rates_64 __free(kfree) = NULL; u32 *rates __free(kfree) = NULL; count = of_property_count_u32_elems(node, "assigned-clock-rates"); count_64 = of_property_count_u64_elems(node, "assigned-clock-rates-u64"); if (count_64 > 0) { count = count_64; rates_64 = kcalloc(count, sizeof(*rates_64), GFP_KERNEL); if (!rates_64) return -ENOMEM; rc = of_property_read_u64_array(node, "assigned-clock-rates-u64", rates_64, count); } else if (count > 0) { rates = kcalloc(count, sizeof(*rates), GFP_KERNEL); if (!rates) return -ENOMEM; rc = of_property_read_u32_array(node, "assigned-clock-rates", rates, count); } else { return 0; } if (rc) return rc; for (index = 0; index < count; index++) { unsigned long rate; if (rates_64) rate = rates_64[index]; else rate = rates[index]; if (rate) { rc = of_parse_phandle_with_args(node, "assigned-clocks", "#clock-cells", index, &clkspec); if (rc < 0) { /* skip empty (null) phandles */ if (rc == -ENOENT) continue; else return rc; } if (clkspec.np == node && !clk_supplier) { of_node_put(clkspec.np); return 0; } clk = of_clk_get_from_provider(&clkspec); of_node_put(clkspec.np); if (IS_ERR(clk)) { if (PTR_ERR(clk) != -EPROBE_DEFER) pr_warn("clk: couldn't get clock %d for %pOF\n", index, node); return PTR_ERR(clk); } rc = clk_set_rate(clk, rate); if (rc < 0) pr_err("clk: couldn't set %s clk rate to %lu (%d), current rate: %lu\n", __clk_get_name(clk), rate, rc, clk_get_rate(clk)); clk_put(clk); } } return 0; } /** * of_clk_set_defaults() - parse and set assigned clocks configuration * @node: device node to apply clock settings for * @clk_supplier: true if clocks supplied by @node should also be considered * * This function parses 'assigned-{clocks/clock-parents/clock-rates}' properties * and sets any specified clock parents and rates. The @clk_supplier argument * should be set to true if @node may be also a clock supplier of any clock * listed in its 'assigned-clocks' or 'assigned-clock-parents' properties. * If @clk_supplier is false the function exits returning 0 as soon as it * determines the @node is also a supplier of any of the clocks. */ int of_clk_set_defaults(struct device_node *node, bool clk_supplier) { int rc; if (!node) return 0; rc = __set_clk_parents(node, clk_supplier); if (rc < 0) return rc; return __set_clk_rates(node, clk_supplier); } EXPORT_SYMBOL_GPL(of_clk_set_defaults); |
14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 13 14 14 14 13 13 14 14 14 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 | // SPDX-License-Identifier: GPL-2.0 /* * Interface between ext4 and JBD */ #include "ext4_jbd2.h" #include <trace/events/ext4.h> int ext4_inode_journal_mode(struct inode *inode) { if (EXT4_JOURNAL(inode) == NULL) return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */ /* We do not support data journalling with delayed allocation */ if (!S_ISREG(inode->i_mode) || ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE) || test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) && !test_opt(inode->i_sb, DELALLOC) && !mapping_large_folio_support(inode->i_mapping))) { /* We do not support data journalling for encrypted data */ if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */ return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */ } if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */ BUG(); } /* Just increment the non-pointer handle value */ static handle_t *ext4_get_nojournal(void) { handle_t *handle = current->journal_info; unsigned long ref_cnt = (unsigned long)handle; BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT); ref_cnt++; handle = (handle_t *)ref_cnt; current->journal_info = handle; return handle; } /* Decrement the non-pointer handle value */ static void ext4_put_nojournal(handle_t *handle) { unsigned long ref_cnt = (unsigned long)handle; BUG_ON(ref_cnt == 0); ref_cnt--; handle = (handle_t *)ref_cnt; current->journal_info = handle; } /* * Wrappers for jbd2_journal_start/end. */ static int ext4_journal_check_start(struct super_block *sb) { int ret; journal_t *journal; might_sleep(); ret = ext4_emergency_state(sb); if (unlikely(ret)) return ret; if (WARN_ON_ONCE(sb_rdonly(sb))) return -EROFS; WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE); journal = EXT4_SB(sb)->s_journal; /* * Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ if (journal && is_journal_aborted(journal)) { ext4_abort(sb, -journal->j_errno, "Detected aborted journal"); return -EROFS; } return 0; } handle_t *__ext4_journal_start_sb(struct inode *inode, struct super_block *sb, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds) { journal_t *journal; int err; if (inode) trace_ext4_journal_start_inode(inode, blocks, rsv_blocks, revoke_creds, type, _RET_IP_); else trace_ext4_journal_start_sb(sb, blocks, rsv_blocks, revoke_creds, type, _RET_IP_); err = ext4_journal_check_start(sb); if (err < 0) return ERR_PTR(err); journal = EXT4_SB(sb)->s_journal; if (!journal || (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)) return ext4_get_nojournal(); return jbd2__journal_start(journal, blocks, rsv_blocks, revoke_creds, GFP_NOFS, type, line); } int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) { struct super_block *sb; int err; int rc; if (!ext4_handle_valid(handle)) { ext4_put_nojournal(handle); return 0; } err = handle->h_err; if (!handle->h_transaction) { rc = jbd2_journal_stop(handle); return err ? err : rc; } sb = handle->h_transaction->t_journal->j_private; rc = jbd2_journal_stop(handle); if (!err) err = rc; if (err) __ext4_std_error(sb, where, line, err); return err; } handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type) { struct super_block *sb; int err; if (!ext4_handle_valid(handle)) return ext4_get_nojournal(); sb = handle->h_journal->j_private; trace_ext4_journal_start_reserved(sb, jbd2_handle_buffer_credits(handle), _RET_IP_); err = ext4_journal_check_start(sb); if (err < 0) { jbd2_journal_free_reserved(handle); return ERR_PTR(err); } err = jbd2_journal_start_reserved(handle, type, line); if (err < 0) return ERR_PTR(err); return handle; } int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, int extend_cred, int revoke_cred) { if (!ext4_handle_valid(handle)) return 0; if (is_handle_aborted(handle)) return -EROFS; if (jbd2_handle_buffer_credits(handle) >= check_cred && handle->h_revoke_credits >= revoke_cred) return 0; extend_cred = max(0, extend_cred - jbd2_handle_buffer_credits(handle)); revoke_cred = max(0, revoke_cred - handle->h_revoke_credits); return ext4_journal_extend(handle, extend_cred, revoke_cred); } static void ext4_journal_abort_handle(const char *caller, unsigned int line, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext4_decode_error(NULL, err, nbuf); BUG_ON(!ext4_handle_valid(handle)); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "EXT4-fs: %s:%d: aborting transaction: %s in %s\n", caller, line, errstr, err_fn); jbd2_journal_abort_handle(handle); } static void ext4_check_bdev_write_error(struct super_block *sb) { struct address_space *mapping = sb->s_bdev->bd_mapping; struct ext4_sb_info *sbi = EXT4_SB(sb); int err; /* * If the block device has write error flag, it may have failed to * async write out metadata buffers in the background. In this case, * we could read old data from disk and write it out again, which * may lead to on-disk filesystem inconsistency. */ if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) { spin_lock(&sbi->s_bdev_wb_lock); err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err); spin_unlock(&sbi->s_bdev_wb_lock); if (err) ext4_error_err(sb, -err, "Error while async write back metadata"); } } int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type) { int err; might_sleep(); if (ext4_handle_valid(handle)) { err = jbd2_journal_get_write_access(handle, bh); if (err) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); return err; } } else ext4_check_bdev_write_error(sb); if (trigger_type == EXT4_JTR_NONE || !ext4_has_feature_metadata_csum(sb)) return 0; BUG_ON(trigger_type >= EXT4_JOURNAL_TRIGGER_COUNT); jbd2_journal_set_triggers(bh, &EXT4_SB(sb)->s_journal_triggers[trigger_type].tr_triggers); return 0; } /* * The ext4 forget function must perform a revoke if we are freeing data * which has been journaled. Metadata (eg. indirect blocks) must be * revoked in all cases. * * "bh" may be NULL: a metadata block may have been freed from memory * but there may still be a record of it in the journal, and that record * still needs to be revoked. */ int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr) { int err; might_sleep(); trace_ext4_forget(inode, is_metadata, blocknr); BUFFER_TRACE(bh, "enter"); ext4_debug("forgetting bh %p: is_metadata=%d, mode %o, data mode %x\n", bh, is_metadata, inode->i_mode, test_opt(inode->i_sb, DATA_FLAGS)); /* In the no journal case, we can just do a bforget and return */ if (!ext4_handle_valid(handle)) { bforget(bh); return 0; } /* Never use the revoke function if we are doing full data * journaling: there is no need to, and a V1 superblock won't * support it. Otherwise, only skip the revoke on un-journaled * data blocks. */ if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || (!is_metadata && !ext4_should_journal_data(inode))) { if (bh) { BUFFER_TRACE(bh, "call jbd2_journal_forget"); err = jbd2_journal_forget(handle, bh); if (err) ext4_journal_abort_handle(where, line, __func__, bh, handle, err); return err; } return 0; } /* * data!=journal && (is_metadata || should_journal_data(inode)) */ BUFFER_TRACE(bh, "call jbd2_journal_revoke"); err = jbd2_journal_revoke(handle, blocknr, bh); if (err) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); __ext4_error(inode->i_sb, where, line, true, -err, 0, "error %d when attempting revoke", err); } BUFFER_TRACE(bh, "exit"); return err; } int __ext4_journal_get_create_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type) { int err; if (!ext4_handle_valid(handle)) return 0; err = jbd2_journal_get_create_access(handle, bh); if (err) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); return err; } if (trigger_type == EXT4_JTR_NONE || !ext4_has_feature_metadata_csum(sb)) return 0; BUG_ON(trigger_type >= EXT4_JOURNAL_TRIGGER_COUNT); jbd2_journal_set_triggers(bh, &EXT4_SB(sb)->s_journal_triggers[trigger_type].tr_triggers); return 0; } int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh) { int err = 0; might_sleep(); set_buffer_meta(bh); set_buffer_prio(bh); set_buffer_uptodate(bh); if (ext4_handle_valid(handle)) { err = jbd2_journal_dirty_metadata(handle, bh); /* Errors can only happen due to aborted journal or a nasty bug */ if (!is_handle_aborted(handle) && WARN_ON_ONCE(err)) { ext4_journal_abort_handle(where, line, __func__, bh, handle, err); if (inode == NULL) { pr_err("EXT4: jbd2_journal_dirty_metadata " "failed: handle type %u started at " "line %u, credits %u/%u, errcode %d", handle->h_type, handle->h_line_no, handle->h_requested_credits, jbd2_handle_buffer_credits(handle), err); return err; } ext4_error_inode(inode, where, line, bh->b_blocknr, "journal_dirty_metadata failed: " "handle type %u started at line %u, " "credits %u/%u, errcode %d", handle->h_type, handle->h_line_no, handle->h_requested_credits, jbd2_handle_buffer_credits(handle), err); } } else { if (inode) mark_buffer_dirty_inode(bh, inode); else mark_buffer_dirty(bh); if (inode && inode_needs_sync(inode)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { ext4_error_inode_err(inode, where, line, bh->b_blocknr, EIO, "IO error syncing itable block"); err = -EIO; } } } return err; } |
122 18 93 39 6 14 20 234 1562 22213 2166 28 29 18 84 1423 118 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_NODEMASK_H #define __LINUX_NODEMASK_H /* * Nodemasks provide a bitmap suitable for representing the * set of Node's in a system, one bit position per Node number. * * See detailed comments in the file linux/bitmap.h describing the * data type on which these nodemasks are based. * * For details of nodemask_parse_user(), see bitmap_parse_user() in * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in * lib/bitmap.c. * * The available nodemask operations are: * * void node_set(node, mask) turn on bit 'node' in mask * void node_clear(node, mask) turn off bit 'node' in mask * void nodes_setall(mask) set all bits * void nodes_clear(mask) clear all bits * int node_isset(node, mask) true iff bit 'node' set in mask * int node_test_and_set(node, mask) test and set bit 'node' in mask * * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection] * void nodes_or(dst, src1, src2) dst = src1 | src2 [union] * void nodes_xor(dst, src1, src2) dst = src1 ^ src2 * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2 * void nodes_complement(dst, src) dst = ~src * * int nodes_equal(mask1, mask2) Does mask1 == mask2? * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect? * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2? * int nodes_empty(mask) Is mask empty (no bits sets)? * int nodes_full(mask) Is mask full (all bits sets)? * int nodes_weight(mask) Hamming weight - number of set bits * * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first, * or MAX_NUMNODES * unsigned int first_unset_node(mask) First node not set in mask, or * MAX_NUMNODES * * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set * NODE_MASK_ALL Initializer - all bits set * NODE_MASK_NONE Initializer - no bits set * unsigned long *nodes_addr(mask) Array of unsigned long's in mask * * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask * int nodelist_parse(buf, map) Parse ascii string as nodelist * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz * * for_each_node_mask(node, mask) for-loop node over mask * * int num_online_nodes() Number of online Nodes * int num_possible_nodes() Number of all possible Nodes * * int node_random(mask) Random node with set bit in mask * * int node_online(node) Is some node online? * int node_possible(node) Is some node possible? * * node_set_online(node) set bit 'node' in node_online_map * node_set_offline(node) clear bit 'node' in node_online_map * * for_each_node(node) for-loop node over node_possible_map * for_each_online_node(node) for-loop node over node_online_map * * Subtlety: * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway) * to generate slightly worse code. So use a simple one-line #define * for node_isset(), instead of wrapping an inline inside a macro, the * way we do the other calls. * * NODEMASK_SCRATCH * When doing above logical AND, OR, XOR, Remap operations the callers tend to * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper * for such situations. See below and CPUMASK_ALLOC also. */ #include <linux/threads.h> #include <linux/bitmap.h> #include <linux/minmax.h> #include <linux/nodemask_types.h> #include <linux/random.h> extern nodemask_t _unused_nodemask_arg_; /** * nodemask_pr_args - printf args to output a nodemask * @maskp: nodemask to be printed * * Can be used to provide arguments for '%*pb[l]' when printing a nodemask. */ #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \ __nodemask_pr_bits(maskp) static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m) { return m ? MAX_NUMNODES : 0; } static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m) { return m ? m->bits : NULL; } /* * The inline keyword gives the compiler room to decide to inline, or * not inline a function as it sees best. However, as these functions * are called in both __init and non-__init functions, if they are not * inlined we will end up with a section mismatch error (of the type of * freeable items not being freed). So we must use __always_inline here * to fix the problem. If other functions in the future also end up in * this situation they will also need to be annotated as __always_inline */ #define node_set(node, dst) __node_set((node), &(dst)) static __always_inline void __node_set(int node, volatile nodemask_t *dstp) { set_bit(node, dstp->bits); } #define node_clear(node, dst) __node_clear((node), &(dst)) static __always_inline void __node_clear(int node, volatile nodemask_t *dstp) { clear_bit(node, dstp->bits); } #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES) static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits) { bitmap_fill(dstp->bits, nbits); } #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES) static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits) { bitmap_zero(dstp->bits, nbits); } /* No static inline type checking - see Subtlety (1) above. */ #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) #define node_test_and_set(node, nodemask) \ __node_test_and_set((node), &(nodemask)) static __always_inline bool __node_test_and_set(int node, nodemask_t *addr) { return test_and_set_bit(node, addr->bits); } #define nodes_and(dst, src1, src2) \ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_or(dst, src1, src2) \ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_xor(dst, src1, src2) \ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_andnot(dst, src1, src2) \ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES) static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } #define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES) static __always_inline void __nodes_copy(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_copy(dstp->bits, srcp->bits, nbits); } #define nodes_complement(dst, src) \ __nodes_complement(&(dst), &(src), MAX_NUMNODES) static __always_inline void __nodes_complement(nodemask_t *dstp, const nodemask_t *srcp, unsigned int nbits) { bitmap_complement(dstp->bits, srcp->bits, nbits); } #define nodes_equal(src1, src2) \ __nodes_equal(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_equal(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_equal(src1p->bits, src2p->bits, nbits); } #define nodes_intersects(src1, src2) \ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_intersects(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_intersects(src1p->bits, src2p->bits, nbits); } #define nodes_subset(src1, src2) \ __nodes_subset(&(src1), &(src2), MAX_NUMNODES) static __always_inline bool __nodes_subset(const nodemask_t *src1p, const nodemask_t *src2p, unsigned int nbits) { return bitmap_subset(src1p->bits, src2p->bits, nbits); } #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES) static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits) { return bitmap_empty(srcp->bits, nbits); } #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits) { return bitmap_full(srcp->bits, nbits); } #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits) { return bitmap_weight(srcp->bits, nbits); } /* FIXME: better would be to fix all architectures to never return > MAX_NUMNODES, then the silly min_ts could be dropped. */ #define first_node(src) __first_node(&(src)) static __always_inline unsigned int __first_node(const nodemask_t *srcp) { return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES)); } #define next_node(n, src) __next_node((n), &(src)) static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp) { return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); } /* * Find the next present node in src, starting after node n, wrapping around to * the first node in src if needed. Returns MAX_NUMNODES if src is empty. */ #define next_node_in(n, src) __next_node_in((n), &(src)) static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp) { unsigned int ret = __next_node(node, srcp); if (ret == MAX_NUMNODES) ret = __first_node(srcp); return ret; } static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node) { nodes_clear(*mask); node_set(node, *mask); } #define nodemask_of_node(node) \ ({ \ typeof(_unused_nodemask_arg_) m; \ if (sizeof(m) == sizeof(unsigned long)) { \ m.bits[0] = 1UL << (node); \ } else { \ init_nodemask_of_node(&m, (node)); \ } \ m; \ }) #define first_unset_node(mask) __first_unset_node(&(mask)) static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp) { return min_t(unsigned int, MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES)); } #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES) #if MAX_NUMNODES <= BITS_PER_LONG #define NODE_MASK_ALL \ ((nodemask_t) { { \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #else #define NODE_MASK_ALL \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \ } }) #endif #define NODE_MASK_NONE \ ((nodemask_t) { { \ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \ } }) #define nodes_addr(src) ((src).bits) #define nodemask_parse_user(ubuf, ulen, dst) \ __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) static __always_inline int __nodemask_parse_user(const char __user *buf, int len, nodemask_t *dstp, int nbits) { return bitmap_parse_user(buf, len, dstp->bits, nbits); } #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) { return bitmap_parselist(buf, dstp->bits, nbits); } #define node_remap(oldbit, old, new) \ __node_remap((oldbit), &(old), &(new), MAX_NUMNODES) static __always_inline int __node_remap(int oldbit, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); } #define nodes_remap(dst, src, old, new) \ __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES) static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, const nodemask_t *oldp, const nodemask_t *newp, int nbits) { bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); } #define nodes_onto(dst, orig, relmap) \ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES) static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, const nodemask_t *relmapp, int nbits) { bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); } #define nodes_fold(dst, orig, sz) \ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES) static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, int sz, int nbits) { bitmap_fold(dstp->bits, origp->bits, sz, nbits); } #if MAX_NUMNODES > 1 #define for_each_node_mask(node, mask) \ for ((node) = first_node(mask); \ (node) < MAX_NUMNODES; \ (node) = next_node((node), (mask))) #else /* MAX_NUMNODES == 1 */ #define for_each_node_mask(node, mask) \ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++) #endif /* MAX_NUMNODES */ /* * Bitmasks that are kept for all the nodes. */ enum node_states { N_POSSIBLE, /* The node could become online at some point */ N_ONLINE, /* The node is online */ N_NORMAL_MEMORY, /* The node has regular memory */ #ifdef CONFIG_HIGHMEM N_HIGH_MEMORY, /* The node has regular or high memory */ #else N_HIGH_MEMORY = N_NORMAL_MEMORY, #endif N_MEMORY, /* The node has memory(regular, high, movable) */ N_CPU, /* The node has one or more cpus */ N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */ NR_NODE_STATES }; /* * The following particular system nodemasks and operations * on them manage all possible and online nodes. */ extern nodemask_t node_states[NR_NODE_STATES]; #if MAX_NUMNODES > 1 static __always_inline int node_state(int node, enum node_states state) { return node_isset(node, node_states[state]); } static __always_inline void node_set_state(int node, enum node_states state) { __node_set(node, &node_states[state]); } static __always_inline void node_clear_state(int node, enum node_states state) { __node_clear(node, &node_states[state]); } static __always_inline int num_node_state(enum node_states state) { return nodes_weight(node_states[state]); } #define for_each_node_state(__node, __state) \ for_each_node_mask((__node), node_states[__state]) #define first_online_node first_node(node_states[N_ONLINE]) #define first_memory_node first_node(node_states[N_MEMORY]) static __always_inline unsigned int next_online_node(int nid) { return next_node(nid, node_states[N_ONLINE]); } static __always_inline unsigned int next_memory_node(int nid) { return next_node(nid, node_states[N_MEMORY]); } extern unsigned int nr_node_ids; extern unsigned int nr_online_nodes; static __always_inline void node_set_online(int nid) { node_set_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } static __always_inline void node_set_offline(int nid) { node_clear_state(nid, N_ONLINE); nr_online_nodes = num_node_state(N_ONLINE); } #else static __always_inline int node_state(int node, enum node_states state) { return node == 0; } static __always_inline void node_set_state(int node, enum node_states state) { } static __always_inline void node_clear_state(int node, enum node_states state) { } static __always_inline int num_node_state(enum node_states state) { return 1; } #define for_each_node_state(node, __state) \ for ( (node) = 0; (node) == 0; (node) = 1) #define first_online_node 0 #define first_memory_node 0 #define next_online_node(nid) (MAX_NUMNODES) #define next_memory_node(nid) (MAX_NUMNODES) #define nr_node_ids 1U #define nr_online_nodes 1U #define node_set_online(node) node_set_state((node), N_ONLINE) #define node_set_offline(node) node_clear_state((node), N_ONLINE) #endif static __always_inline int node_random(const nodemask_t *maskp) { #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) int w, bit; w = nodes_weight(*maskp); switch (w) { case 0: bit = NUMA_NO_NODE; break; case 1: bit = first_node(*maskp); break; default: bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w)); break; } return bit; #else return 0; #endif } #define node_online_map node_states[N_ONLINE] #define node_possible_map node_states[N_POSSIBLE] #define num_online_nodes() num_node_state(N_ONLINE) #define num_possible_nodes() num_node_state(N_POSSIBLE) #define node_online(node) node_state((node), N_ONLINE) #define node_possible(node) node_state((node), N_POSSIBLE) #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) #define for_each_node_with_cpus(node) for_each_node_state(node, N_CPU) /* * For nodemask scratch area. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and * name. */ #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */ #define NODEMASK_ALLOC(type, name, gfp_flags) \ type *name = kmalloc(sizeof(*name), gfp_flags) #define NODEMASK_FREE(m) kfree(m) #else #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name #define NODEMASK_FREE(m) do {} while (0) #endif /* Example structure for using NODEMASK_ALLOC, used in mempolicy. */ struct nodemask_scratch { nodemask_t mask1; nodemask_t mask2; }; #define NODEMASK_SCRATCH(x) \ NODEMASK_ALLOC(struct nodemask_scratch, x, \ GFP_KERNEL | __GFP_NORETRY) #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) #endif /* __LINUX_NODEMASK_H */ |
9 82 79 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _INPUT_MT_H #define _INPUT_MT_H /* * Input Multitouch Library * * Copyright (c) 2010 Henrik Rydberg */ #include <linux/input.h> #define TRKID_MAX 0xffff #define INPUT_MT_POINTER 0x0001 /* pointer device, e.g. trackpad */ #define INPUT_MT_DIRECT 0x0002 /* direct device, e.g. touchscreen */ #define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */ #define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */ #define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */ /** * struct input_mt_slot - represents the state of an input MT slot * @abs: holds current values of ABS_MT axes for this slot * @frame: last frame at which input_mt_report_slot_state() was called * @key: optional driver designation of this slot */ struct input_mt_slot { int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; unsigned int frame; unsigned int key; }; /** * struct input_mt - state of tracked contacts * @trkid: stores MT tracking ID for the next contact * @num_slots: number of MT slots the device uses * @slot: MT slot currently being transmitted * @flags: input_mt operation flags * @frame: increases every time input_mt_sync_frame() is called * @red: reduced cost matrix for in-kernel tracking * @slots: array of slots holding current values of tracked contacts */ struct input_mt { int trkid; int num_slots; int slot; unsigned int flags; unsigned int frame; int *red; struct input_mt_slot slots[] __counted_by(num_slots); }; static inline void input_mt_set_value(struct input_mt_slot *slot, unsigned code, int value) { slot->abs[code - ABS_MT_FIRST] = value; } static inline int input_mt_get_value(const struct input_mt_slot *slot, unsigned code) { return slot->abs[code - ABS_MT_FIRST]; } static inline bool input_mt_is_active(const struct input_mt_slot *slot) { return input_mt_get_value(slot, ABS_MT_TRACKING_ID) >= 0; } static inline bool input_mt_is_used(const struct input_mt *mt, const struct input_mt_slot *slot) { return slot->frame == mt->frame; } int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, unsigned int flags); void input_mt_destroy_slots(struct input_dev *dev); static inline int input_mt_new_trkid(struct input_mt *mt) { return mt->trkid++ & TRKID_MAX; } static inline void input_mt_slot(struct input_dev *dev, int slot) { input_event(dev, EV_ABS, ABS_MT_SLOT, slot); } static inline bool input_is_mt_value(int axis) { return axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST; } static inline bool input_is_mt_axis(int axis) { return axis == ABS_MT_SLOT || input_is_mt_value(axis); } bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); static inline void input_mt_report_slot_inactive(struct input_dev *dev) { input_mt_report_slot_state(dev, 0, false); } void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); void input_mt_drop_unused(struct input_dev *dev); void input_mt_sync_frame(struct input_dev *dev); /** * struct input_mt_pos - contact position * @x: horizontal coordinate * @y: vertical coordinate */ struct input_mt_pos { s16 x, y; }; int input_mt_assign_slots(struct input_dev *dev, int *slots, const struct input_mt_pos *pos, int num_pos, int dmax); int input_mt_get_slot_by_key(struct input_dev *dev, int key); #endif |
423 17 406 417 421 406 17 17 423 423 420 2181 422 423 17 418 423 423 39 39 422 70 422 423 69 422 392 393 420 423 58 33 2178 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 | // SPDX-License-Identifier: GPL-2.0 /* * A fast, small, non-recursive O(n log n) sort for the Linux kernel * * This performs n*log2(n) + 0.37*n + o(n) comparisons on average, * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case. * * Quicksort manages n*log2(n) - 1.26*n for random inputs (1.63*n * better) at the expense of stack usage and much larger code to avoid * quicksort's O(n^2) worst case. */ #include <linux/types.h> #include <linux/export.h> #include <linux/sort.h> /** * is_aligned - is this pointer & size okay for word-wide copying? * @base: pointer to data * @size: size of each element * @align: required alignment (typically 4 or 8) * * Returns true if elements can be copied using word loads and stores. * The size must be a multiple of the alignment, and the base address must * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. * * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" * to "if ((a | b) & mask)", so we do that by hand. */ __attribute_const__ __always_inline static bool is_aligned(const void *base, size_t size, unsigned char align) { unsigned char lsbits = (unsigned char)size; (void)base; #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS lsbits |= (unsigned char)(uintptr_t)base; #endif return (lsbits & (align - 1)) == 0; } /** * swap_words_32 - swap two elements in 32-bit chunks * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size (must be a multiple of 4) * * Exchange the two objects in memory. This exploits base+index addressing, * which basically all CPUs have, to minimize loop overhead computations. * * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the * bottom of the loop, even though the zero flag is still valid from the * subtract (since the intervening mov instructions don't alter the flags). * Gcc 8.1.0 doesn't have that problem. */ static void swap_words_32(void *a, void *b, size_t n) { do { u32 t = *(u32 *)(a + (n -= 4)); *(u32 *)(a + n) = *(u32 *)(b + n); *(u32 *)(b + n) = t; } while (n); } /** * swap_words_64 - swap two elements in 64-bit chunks * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size (must be a multiple of 8) * * Exchange the two objects in memory. This exploits base+index * addressing, which basically all CPUs have, to minimize loop overhead * computations. * * We'd like to use 64-bit loads if possible. If they're not, emulating * one requires base+index+4 addressing which x86 has but most other * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, * but it's possible to have 64-bit loads without 64-bit pointers (e.g. * x32 ABI). Are there any cases the kernel needs to worry about? */ static void swap_words_64(void *a, void *b, size_t n) { do { #ifdef CONFIG_64BIT u64 t = *(u64 *)(a + (n -= 8)); *(u64 *)(a + n) = *(u64 *)(b + n); *(u64 *)(b + n) = t; #else /* Use two 32-bit transfers to avoid base+index+4 addressing */ u32 t = *(u32 *)(a + (n -= 4)); *(u32 *)(a + n) = *(u32 *)(b + n); *(u32 *)(b + n) = t; t = *(u32 *)(a + (n -= 4)); *(u32 *)(a + n) = *(u32 *)(b + n); *(u32 *)(b + n) = t; #endif } while (n); } /** * swap_bytes - swap two elements a byte at a time * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size * * This is the fallback if alignment doesn't allow using larger chunks. */ static void swap_bytes(void *a, void *b, size_t n) { do { char t = ((char *)a)[--n]; ((char *)a)[n] = ((char *)b)[n]; ((char *)b)[n] = t; } while (n); } /* * The values are arbitrary as long as they can't be confused with * a pointer, but small integers make for the smallest compare * instructions. */ #define SWAP_WORDS_64 (swap_r_func_t)0 #define SWAP_WORDS_32 (swap_r_func_t)1 #define SWAP_BYTES (swap_r_func_t)2 #define SWAP_WRAPPER (swap_r_func_t)3 struct wrapper { cmp_func_t cmp; swap_func_t swap; }; /* * The function pointer is last to make tail calls most efficient if the * compiler decides not to inline this function. */ static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv) { if (swap_func == SWAP_WRAPPER) { ((const struct wrapper *)priv)->swap(a, b, (int)size); return; } if (swap_func == SWAP_WORDS_64) swap_words_64(a, b, size); else if (swap_func == SWAP_WORDS_32) swap_words_32(a, b, size); else if (swap_func == SWAP_BYTES) swap_bytes(a, b, size); else swap_func(a, b, (int)size, priv); } #define _CMP_WRAPPER ((cmp_r_func_t)0L) static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv) { if (cmp == _CMP_WRAPPER) return ((const struct wrapper *)priv)->cmp(a, b); return cmp(a, b, priv); } /** * parent - given the offset of the child, find the offset of the parent. * @i: the offset of the heap element whose parent is sought. Non-zero. * @lsbit: a precomputed 1-bit mask, equal to "size & -size" * @size: size of each element * * In terms of array indexes, the parent of element j = @i/@size is simply * (j-1)/2. But when working in byte offsets, we can't use implicit * truncation of integer divides. * * Fortunately, we only need one bit of the quotient, not the full divide. * @size has a least significant bit. That bit will be clear if @i is * an even multiple of @size, and set if it's an odd multiple. * * Logically, we're doing "if (i & lsbit) i -= size;", but since the * branch is unpredictable, it's done with a bit of clever branch-free * code instead. */ __attribute_const__ __always_inline static size_t parent(size_t i, unsigned int lsbit, size_t size) { i -= size; i -= size & -(i & lsbit); return i / 2; } #include <linux/sched.h> static void __sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, swap_r_func_t swap_func, const void *priv, bool may_schedule) { /* pre-scale counters for performance */ size_t n = num * size, a = (num/2) * size; const unsigned int lsbit = size & -size; /* Used to find parent */ size_t shift = 0; if (!a) /* num < 2 || size == 0 */ return; /* called from 'sort' without swap function, let's pick the default */ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap) swap_func = NULL; if (!swap_func) { if (is_aligned(base, size, 8)) swap_func = SWAP_WORDS_64; else if (is_aligned(base, size, 4)) swap_func = SWAP_WORDS_32; else swap_func = SWAP_BYTES; } /* * Loop invariants: * 1. elements [a,n) satisfy the heap property (compare greater than * all of their children), * 2. elements [n,num*size) are sorted, and * 3. a <= b <= c <= d <= n (whenever they are valid). */ for (;;) { size_t b, c, d; if (a) /* Building heap: sift down a */ a -= size << shift; else if (n > 3 * size) { /* Sorting: Extract two largest elements */ n -= size; do_swap(base, base + n, size, swap_func, priv); shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0; a = size << shift; n -= size; do_swap(base + a, base + n, size, swap_func, priv); } else { /* Sort complete */ break; } /* * Sift element at "a" down into heap. This is the * "bottom-up" variant, which significantly reduces * calls to cmp_func(): we find the sift-down path all * the way to the leaves (one compare per level), then * backtrack to find where to insert the target element. * * Because elements tend to sift down close to the leaves, * this uses fewer compares than doing two per level * on the way down. (A bit more than half as many on * average, 3/4 worst-case.) */ for (b = a; c = 2*b + size, (d = c + size) < n;) b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d; if (d == n) /* Special case last leaf with no sibling */ b = c; /* Now backtrack from "b" to the correct location for "a" */ while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0) b = parent(b, lsbit, size); c = b; /* Where "a" belongs */ while (b != a) { /* Shift it into place */ b = parent(b, lsbit, size); do_swap(base + b, base + c, size, swap_func, priv); } if (may_schedule) cond_resched(); } n -= size; do_swap(base, base + n, size, swap_func, priv); if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0) do_swap(base, base + size, size, swap_func, priv); } /** * sort_r - sort an array of elements * @base: pointer to data to sort * @num: number of elements * @size: size of each element * @cmp_func: pointer to comparison function * @swap_func: pointer to swap function or NULL * @priv: third argument passed to comparison function * * This function does a heapsort on the given array. You may provide * a swap_func function if you need to do something more than a memory * copy (e.g. fix up pointers or auxiliary data), but the built-in swap * avoids a slow retpoline and so is significantly faster. * * The comparison function must adhere to specific mathematical * properties to ensure correct and stable sorting: * - Antisymmetry: cmp_func(a, b) must return the opposite sign of * cmp_func(b, a). * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then * cmp_func(a, c) <= 0. * * Sorting time is O(n log n) both on average and worst-case. While * quicksort is slightly faster on average, it suffers from exploitable * O(n*n) worst-case behavior and extra memory requirements that make * it less suitable for kernel use. */ void sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, swap_r_func_t swap_func, const void *priv) { __sort_r(base, num, size, cmp_func, swap_func, priv, false); } EXPORT_SYMBOL(sort_r); /** * sort_r_nonatomic - sort an array of elements, with cond_resched * @base: pointer to data to sort * @num: number of elements * @size: size of each element * @cmp_func: pointer to comparison function * @swap_func: pointer to swap function or NULL * @priv: third argument passed to comparison function * * Same as sort_r, but preferred for larger arrays as it does a periodic * cond_resched(). */ void sort_r_nonatomic(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, swap_r_func_t swap_func, const void *priv) { __sort_r(base, num, size, cmp_func, swap_func, priv, true); } EXPORT_SYMBOL(sort_r_nonatomic); void sort(void *base, size_t num, size_t size, cmp_func_t cmp_func, swap_func_t swap_func) { struct wrapper w = { .cmp = cmp_func, .swap = swap_func, }; return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false); } EXPORT_SYMBOL(sort); void sort_nonatomic(void *base, size_t num, size_t size, cmp_func_t cmp_func, swap_func_t swap_func) { struct wrapper w = { .cmp = cmp_func, .swap = swap_func, }; return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true); } EXPORT_SYMBOL(sort_nonatomic); |
132 132 132 145 143 142 5 142 137 137 130 137 142 145 132 132 132 120 120 120 130 130 43 43 43 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 28 28 2 2 2 2 2 2 1 2 28 5 5 3 3 2 1 5 8 8 8 8 8 8 1 7 15 14 14 14 14 14 14 13 14 9 8 6 5 2 13 14 1 15 15 15 1 14 1 14 3 14 3 14 14 1 14 13 13 13 12 15 2 2 1 2 1 1 3 44 44 3 2 3 2 2 3 37 37 37 37 33 31 1 31 10 1 10 27 29 29 29 29 29 1 1 1 1 1 1 29 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 | /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth address family and sockets. */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/stringify.h> #include <linux/sched/signal.h> #include <asm/ioctls.h> #include <net/bluetooth/bluetooth.h> #include <linux/proc_fs.h> #include <linux/ethtool.h> #include <linux/sockios.h> #include "leds.h" #include "selftest.h" /* Bluetooth sockets */ #define BT_MAX_PROTO (BTPROTO_LAST + 1) static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; static DEFINE_RWLOCK(bt_proto_lock); static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; static const char *const bt_key_strings[BT_MAX_PROTO] = { "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM", "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP", "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", "sk_lock-AF_BLUETOOTH-BTPROTO_ISO", }; static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { "slock-AF_BLUETOOTH-BTPROTO_L2CAP", "slock-AF_BLUETOOTH-BTPROTO_HCI", "slock-AF_BLUETOOTH-BTPROTO_SCO", "slock-AF_BLUETOOTH-BTPROTO_RFCOMM", "slock-AF_BLUETOOTH-BTPROTO_BNEP", "slock-AF_BLUETOOTH-BTPROTO_CMTP", "slock-AF_BLUETOOTH-BTPROTO_HIDP", "slock-AF_BLUETOOTH-BTPROTO_AVDTP", "slock-AF_BLUETOOTH-BTPROTO_ISO", }; void bt_sock_reclassify_lock(struct sock *sk, int proto) { BUG_ON(!sk); BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, bt_slock_key_strings[proto], &bt_slock_key[proto], bt_key_strings[proto], &bt_lock_key[proto]); } EXPORT_SYMBOL(bt_sock_reclassify_lock); int bt_sock_register(int proto, const struct net_proto_family *ops) { int err = 0; if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; write_lock(&bt_proto_lock); if (bt_proto[proto]) err = -EEXIST; else bt_proto[proto] = ops; write_unlock(&bt_proto_lock); return err; } EXPORT_SYMBOL(bt_sock_register); void bt_sock_unregister(int proto) { if (proto < 0 || proto >= BT_MAX_PROTO) return; write_lock(&bt_proto_lock); bt_proto[proto] = NULL; write_unlock(&bt_proto_lock); } EXPORT_SYMBOL(bt_sock_unregister); static int bt_sock_create(struct net *net, struct socket *sock, int proto, int kern) { int err; if (net != &init_net) return -EAFNOSUPPORT; if (proto < 0 || proto >= BT_MAX_PROTO) return -EINVAL; if (!bt_proto[proto]) request_module("bt-proto-%d", proto); err = -EPROTONOSUPPORT; read_lock(&bt_proto_lock); if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { err = bt_proto[proto]->create(net, sock, proto, kern); if (!err) bt_sock_reclassify_lock(sock->sk, proto); module_put(bt_proto[proto]->owner); } read_unlock(&bt_proto_lock); return err; } struct sock *bt_sock_alloc(struct net *net, struct socket *sock, struct proto *prot, int proto, gfp_t prio, int kern) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; /* Init peer information so it can be properly monitored */ if (!kern) { spin_lock(&sk->sk_peer_lock); sk->sk_peer_pid = get_pid(task_tgid(current)); sk->sk_peer_cred = get_current_cred(); spin_unlock(&sk->sk_peer_lock); } return sk; } EXPORT_SYMBOL(bt_sock_alloc); void bt_sock_link(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } EXPORT_SYMBOL(bt_sock_link); void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } EXPORT_SYMBOL(bt_sock_unlink); bool bt_sock_linked(struct bt_sock_list *l, struct sock *s) { struct sock *sk; if (!l || !s) return false; read_lock(&l->lock); sk_for_each(sk, &l->head) { if (s == sk) { read_unlock(&l->lock); return true; } } read_unlock(&l->lock); return false; } EXPORT_SYMBOL(bt_sock_linked); void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) { const struct cred *old_cred; struct pid *old_pid; BT_DBG("parent %p, sk %p", parent, sk); sock_hold(sk); if (bh) bh_lock_sock_nested(sk); else lock_sock_nested(sk, SINGLE_DEPTH_NESTING); list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); bt_sk(sk)->parent = parent; /* Copy credentials from parent since for incoming connections the * socket is allocated by the kernel. */ spin_lock(&sk->sk_peer_lock); old_pid = sk->sk_peer_pid; old_cred = sk->sk_peer_cred; sk->sk_peer_pid = get_pid(parent->sk_peer_pid); sk->sk_peer_cred = get_cred(parent->sk_peer_cred); spin_unlock(&sk->sk_peer_lock); put_pid(old_pid); put_cred(old_cred); if (bh) bh_unlock_sock(sk); else release_sock(sk); sk_acceptq_added(parent); } EXPORT_SYMBOL(bt_accept_enqueue); /* Calling function must hold the sk lock. * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list. */ void bt_accept_unlink(struct sock *sk) { BT_DBG("sk %p state %d", sk, sk->sk_state); list_del_init(&bt_sk(sk)->accept_q); sk_acceptq_removed(bt_sk(sk)->parent); bt_sk(sk)->parent = NULL; sock_put(sk); } EXPORT_SYMBOL(bt_accept_unlink); struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) { struct bt_sock *s, *n; struct sock *sk; BT_DBG("parent %p", parent); restart: list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; /* Prevent early freeing of sk due to unlink and sock_kill */ sock_hold(sk); lock_sock(sk); /* Check sk has not already been unlinked via * bt_accept_unlink() due to serialisation caused by sk locking */ if (!bt_sk(sk)->parent) { BT_DBG("sk %p, already unlinked", sk); release_sock(sk); sock_put(sk); /* Restart the loop as sk is no longer in the list * and also avoid a potential infinite loop because * list_for_each_entry_safe() is not thread safe. */ goto restart; } /* sk is safely in the parent list so reduce reference count */ sock_put(sk); /* FIXME: Is this check still needed */ if (sk->sk_state == BT_CLOSED) { bt_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == BT_CONNECTED || !newsock || test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { bt_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; } EXPORT_SYMBOL(bt_accept_dequeue); int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; size_t copied; size_t skblen; int err; BT_DBG("sock %p sk %p len %zu", sock, sk, len); if (flags & MSG_OOB) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) err = 0; return err; } skblen = skb->len; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err == 0) { sock_recv_cmsgs(msg, sk, skb); if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); if (test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags)) { u8 pkt_status = hci_skb_pkt_status(skb); put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, sizeof(pkt_status), &pkt_status); } } skb_free_datagram(sk, skb); if (flags & MSG_TRUNC) copied = skblen; return err ? : copied; } EXPORT_SYMBOL(bt_sock_recvmsg); static long bt_sock_data_wait(struct sock *sk, long timeo) { DECLARE_WAITQUEUE(wait, current); add_wait_queue(sk_sleep(sk), &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue)) break; if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN)) break; if (signal_pending(current) || !timeo) break; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return timeo; } int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int err = 0; size_t target, copied = 0; long timeo; if (flags & MSG_OOB) return -EOPNOTSUPP; BT_DBG("sk %p size %zu", sk, size); lock_sock(sk); target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); do { struct sk_buff *skb; int chunk; skb = skb_dequeue(&sk->sk_receive_queue); if (!skb) { if (copied >= target) break; err = sock_error(sk); if (err) break; if (sk->sk_shutdown & RCV_SHUTDOWN) break; err = -EAGAIN; if (!timeo) break; timeo = bt_sock_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } continue; } chunk = min_t(unsigned int, skb->len, size); if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (!copied) copied = -EFAULT; break; } copied += chunk; size -= chunk; sock_recv_cmsgs(msg, sk, skb); if (!(flags & MSG_PEEK)) { int skb_len = skb_headlen(skb); if (chunk <= skb_len) { __skb_pull(skb, chunk); } else { struct sk_buff *frag; __skb_pull(skb, skb_len); chunk -= skb_len; skb_walk_frags(skb, frag) { if (chunk <= frag->len) { /* Pulling partial data */ skb->len -= chunk; skb->data_len -= chunk; __skb_pull(frag, chunk); break; } else if (frag->len) { /* Pulling all frag data */ chunk -= frag->len; skb->len -= frag->len; skb->data_len -= frag->len; __skb_pull(frag, frag->len); } } } if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); out: release_sock(sk); return copied ? : err; } EXPORT_SYMBOL(bt_sock_stream_recvmsg); static inline __poll_t bt_accept_poll(struct sock *parent) { struct bt_sock *s, *n; struct sock *sk; list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { sk = (struct sock *)s; if (sk->sk_state == BT_CONNECTED || (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && sk->sk_state == BT_CONNECT2)) return EPOLLIN | EPOLLRDNORM; } return 0; } __poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == BT_LISTEN) return bt_accept_poll(sk); if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == BT_CLOSED) mask |= EPOLLHUP; if (sk->sk_state == BT_CONNECT || sk->sk_state == BT_CONNECT2 || sk->sk_state == BT_CONFIG) return mask; if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); return mask; } EXPORT_SYMBOL(bt_sock_poll); static int bt_ethtool_get_ts_info(struct sock *sk, unsigned int index, void __user *useraddr) { struct ethtool_ts_info info; struct kernel_ethtool_ts_info ts_info = {}; int ret; ret = hci_ethtool_ts_info(index, sk->sk_protocol, &ts_info); if (ret == -ENODEV) return ret; else if (ret < 0) return -EIO; memset(&info, 0, sizeof(info)); info.cmd = ETHTOOL_GET_TS_INFO; info.so_timestamping = ts_info.so_timestamping; info.phc_index = ts_info.phc_index; info.tx_types = ts_info.tx_types; info.rx_filters = ts_info.rx_filters; if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; return 0; } static int bt_ethtool(struct sock *sk, const struct ifreq *ifr, void __user *useraddr) { unsigned int index; u32 ethcmd; int n; if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd))) return -EFAULT; if (sscanf(ifr->ifr_name, "hci%u%n", &index, &n) != 1 || n != strlen(ifr->ifr_name)) return -ENODEV; switch (ethcmd) { case ETHTOOL_GET_TS_INFO: return bt_ethtool_get_ts_info(sk, index, useraddr); } return -EOPNOTSUPP; } static int bt_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) { struct sock *sk = sock->sk; struct ifreq ifr = {}; void __user *data; char *colon; int ret = -ENOIOCTLCMD; if (get_user_ifreq(&ifr, &data, arg)) return -EFAULT; ifr.ifr_name[IFNAMSIZ - 1] = 0; colon = strchr(ifr.ifr_name, ':'); if (colon) *colon = 0; switch (cmd) { case SIOCETHTOOL: ret = bt_ethtool(sk, &ifr, data); break; } if (colon) *colon = ':'; if (put_user_ifreq(&ifr, arg)) return -EFAULT; return ret; } int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct sk_buff *skb; long amount; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); switch (cmd) { case TIOCOUTQ: if (sk->sk_state == BT_LISTEN) return -EINVAL; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *)arg); break; case TIOCINQ: if (sk->sk_state == BT_LISTEN) return -EINVAL; spin_lock(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); amount = skb ? skb->len : 0; spin_unlock(&sk->sk_receive_queue.lock); err = put_user(amount, (int __user *)arg); break; case SIOCETHTOOL: err = bt_dev_ioctl(sock, cmd, (void __user *)arg); break; default: err = -ENOIOCTLCMD; break; } return err; } EXPORT_SYMBOL(bt_sock_ioctl); /* This function expects the sk lock to be held when called */ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) { DECLARE_WAITQUEUE(wait, current); int err = 0; BT_DBG("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { if (!timeo) { err = -EINPROGRESS; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } EXPORT_SYMBOL(bt_sock_wait_state); /* This function expects the sk lock to be held when called */ int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags) { DECLARE_WAITQUEUE(wait, current); unsigned long timeo; int err = 0; BT_DBG("sk %p", sk); timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT)); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) { if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } EXPORT_SYMBOL(bt_sock_wait_ready); #ifdef CONFIG_PROC_FS static void *bt_seq_start(struct seq_file *seq, loff_t *pos) __acquires(seq->private->l->lock) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); read_lock(&l->lock); return seq_hlist_start_head(&l->head, *pos); } static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); return seq_hlist_next(v, &l->head, pos); } static void bt_seq_stop(struct seq_file *seq, void *v) __releases(seq->private->l->lock) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); read_unlock(&l->lock); } static int bt_seq_show(struct seq_file *seq, void *v) { struct bt_sock_list *l = pde_data(file_inode(seq->file)); if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent"); if (l->custom_seq_show) { seq_putc(seq, ' '); l->custom_seq_show(seq, v); } seq_putc(seq, '\n'); } else { struct sock *sk = sk_entry(v); struct bt_sock *bt = bt_sk(sk); seq_printf(seq, "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu", sk, refcount_read(&sk->sk_refcnt), sk_rmem_alloc_get(sk), sk_wmem_alloc_get(sk), from_kuid(seq_user_ns(seq), sock_i_uid(sk)), sock_i_ino(sk), bt->parent ? sock_i_ino(bt->parent) : 0LU); if (l->custom_seq_show) { seq_putc(seq, ' '); l->custom_seq_show(seq, v); } seq_putc(seq, '\n'); } return 0; } static const struct seq_operations bt_seq_ops = { .start = bt_seq_start, .next = bt_seq_next, .stop = bt_seq_stop, .show = bt_seq_show, }; int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list *sk_list, int (*seq_show)(struct seq_file *, void *)) { sk_list->custom_seq_show = seq_show; if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list)) return -ENOMEM; return 0; } void bt_procfs_cleanup(struct net *net, const char *name) { remove_proc_entry(name, net->proc_net); } #else int bt_procfs_init(struct net *net, const char *name, struct bt_sock_list *sk_list, int (*seq_show)(struct seq_file *, void *)) { return 0; } void bt_procfs_cleanup(struct net *net, const char *name) { } #endif EXPORT_SYMBOL(bt_procfs_init); EXPORT_SYMBOL(bt_procfs_cleanup); static const struct net_proto_family bt_sock_family_ops = { .owner = THIS_MODULE, .family = PF_BLUETOOTH, .create = bt_sock_create, }; struct dentry *bt_debugfs; EXPORT_SYMBOL_GPL(bt_debugfs); #define VERSION __stringify(BT_SUBSYS_VERSION) "." \ __stringify(BT_SUBSYS_REVISION) static int __init bt_init(void) { int err; sock_skb_cb_check_size(sizeof(struct bt_skb_cb)); BT_INFO("Core ver %s", VERSION); err = bt_selftest(); if (err < 0) return err; bt_debugfs = debugfs_create_dir("bluetooth", NULL); bt_leds_init(); err = bt_sysfs_init(); if (err < 0) goto cleanup_led; err = sock_register(&bt_sock_family_ops); if (err) goto cleanup_sysfs; BT_INFO("HCI device and connection manager initialized"); err = hci_sock_init(); if (err) goto unregister_socket; err = l2cap_init(); if (err) goto cleanup_socket; err = sco_init(); if (err) goto cleanup_cap; err = mgmt_init(); if (err) goto cleanup_sco; return 0; cleanup_sco: sco_exit(); cleanup_cap: l2cap_exit(); cleanup_socket: hci_sock_cleanup(); unregister_socket: sock_unregister(PF_BLUETOOTH); cleanup_sysfs: bt_sysfs_cleanup(); cleanup_led: bt_leds_cleanup(); debugfs_remove_recursive(bt_debugfs); return err; } static void __exit bt_exit(void) { iso_exit(); mgmt_exit(); sco_exit(); l2cap_exit(); hci_sock_cleanup(); sock_unregister(PF_BLUETOOTH); bt_sysfs_cleanup(); bt_leds_cleanup(); debugfs_remove_recursive(bt_debugfs); } subsys_initcall(bt_init); module_exit(bt_exit); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_BLUETOOTH); |
37 37 69 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ #include <linux/rhashtable-types.h> #include <linux/completion.h> #include <linux/in6.h> #include <linux/rbtree_types.h> #include <linux/refcount.h> #include <net/dropreason-core.h> /* Per netns frag queues directory */ struct fqdir { /* sysctls */ long high_thresh; long low_thresh; int timeout; int max_dist; struct inet_frags *f; struct net *net; bool dead; struct rhashtable rhashtable ____cacheline_aligned_in_smp; /* Keep atomic mem on separate cachelines in structs that include it */ atomic_long_t mem ____cacheline_aligned_in_smp; struct work_struct destroy_work; struct llist_node free_list; }; /** * enum: fragment queue flags * * @INET_FRAG_FIRST_IN: first fragment has arrived * @INET_FRAG_LAST_IN: final fragment has arrived * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable * @INET_FRAG_DROP: if skbs must be dropped (instead of being consumed) */ enum { INET_FRAG_FIRST_IN = BIT(0), INET_FRAG_LAST_IN = BIT(1), INET_FRAG_COMPLETE = BIT(2), INET_FRAG_HASH_DEAD = BIT(3), INET_FRAG_DROP = BIT(4), }; struct frag_v4_compare_key { __be32 saddr; __be32 daddr; u32 user; u32 vif; __be16 id; u16 protocol; }; struct frag_v6_compare_key { struct in6_addr saddr; struct in6_addr daddr; u32 user; __be32 id; u32 iif; }; /** * struct inet_frag_queue - fragment queue * * @node: rhash node * @key: keys identifying this frag. * @timer: queue expiration timer * @lock: spinlock protecting this frag * @refcnt: reference count of the queue * @rb_fragments: received fragments rb-tree root * @fragments_tail: received fragments tail * @last_run_head: the head of the last "run". see ip_fragment.c * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far * @tstamp_type: stamp has a mono delivery time (EDT) * @flags: fragment queue flags * @max_size: maximum received fragment size * @fqdir: pointer to struct fqdir * @rcu: rcu head for freeing deferall */ struct inet_frag_queue { struct rhash_head node; union { struct frag_v4_compare_key v4; struct frag_v6_compare_key v6; } key; struct timer_list timer; spinlock_t lock; refcount_t refcnt; struct rb_root rb_fragments; struct sk_buff *fragments_tail; struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; u8 tstamp_type; __u8 flags; u16 max_size; struct fqdir *fqdir; struct rcu_head rcu; }; struct inet_frags { unsigned int qsize; void (*constructor)(struct inet_frag_queue *q, const void *arg); void (*destructor)(struct inet_frag_queue *); void (*frag_expire)(struct timer_list *t); struct kmem_cache *frags_cachep; const char *frags_cache_name; struct rhashtable_params rhash_params; refcount_t refcnt; struct completion completion; }; int inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); static inline void fqdir_pre_exit(struct fqdir *fqdir) { /* Prevent creation of new frags. * Pairs with READ_ONCE() in inet_frag_find(). */ WRITE_ONCE(fqdir->high_thresh, 0); /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire() * and ip6frag_expire_frag_queue(). */ WRITE_ONCE(fqdir->dead, true); } void fqdir_exit(struct fqdir *fqdir); void inet_frag_kill(struct inet_frag_queue *q, int *refs); void inet_frag_destroy(struct inet_frag_queue *q); struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); /* Free all skbs in the queue; return the sum of their truesizes. */ unsigned int inet_frag_rbtree_purge(struct rb_root *root, enum skb_drop_reason reason); static inline void inet_frag_putn(struct inet_frag_queue *q, int refs) { if (refs && refcount_sub_and_test(refs, &q->refcnt)) inet_frag_destroy(q); } /* Memory Tracking Functions. */ static inline long frag_mem_limit(const struct fqdir *fqdir) { return atomic_long_read(&fqdir->mem); } static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val) { atomic_long_sub(val, &fqdir->mem); } static inline void add_frag_mem_limit(struct fqdir *fqdir, long val) { atomic_long_add(val, &fqdir->mem); } /* RFC 3168 support : * We want to check ECN values of all fragments, do detect invalid combinations. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. */ #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ extern const u8 ip_frag_ecn_table[16]; /* Return values of inet_frag_queue_insert() */ #define IPFRAG_OK 0 #define IPFRAG_DUP 1 #define IPFRAG_OVERLAP 2 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, int offset, int end); void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, struct sk_buff *parent); void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, void *reasm_data, bool try_coalesce); struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); #endif |
2788 2603 2223 2226 2149 2145 2145 2184 2178 2177 77 76 2181 2183 2179 2174 2177 2179 2180 2150 2150 1787 1784 2094 2095 2091 2089 2579 2580 2578 50 50 50 50 50 50 50 50 50 50 50 50 17 17 1182 3501 2482 2489 25 25 25 2640 2471 435 2636 2638 1130 1126 1126 3582 921 4262 4274 3215 4254 4200 3273 4252 2293 3262 3272 4256 2294 2138 2289 4251 2725 2726 9 2726 2725 2730 4258 2796 4257 4250 2292 2180 4262 1858 6 4258 2220 44 4256 2200 2132 3507 2279 3503 2607 3507 3506 3510 50 3504 2094 3501 2576 2584 2576 2236 2579 1405 2579 3515 3520 3518 3513 3511 2764 2764 2764 2254 2252 2144 2141 3659 3669 1330 962 3267 1035 1035 3506 1330 1072 2281 2261 798 1977 1408 2264 1038 2256 3502 2152 93 39 2097 2589 1410 1410 1410 1389 1389 2589 1389 963 1382 3502 3502 2279 2278 1061 2282 2279 1406 1402 1405 1406 994 1407 1408 3372 2236 48 2279 2242 2237 2234 2090 2096 2093 2091 3505 3510 3513 3507 3513 3506 3504 3506 3511 1426 3508 3456 3458 2261 3505 3501 3511 3511 3502 3501 3501 2238 2242 2236 3498 50 50 50 3496 50 50 3488 2254 3497 2273 2276 3489 3495 5 5 5 2102 4583 3549 4687 2584 2579 2578 2578 2578 33 33 2580 2140 2579 2091 2098 2096 2564 1406 1406 2584 2578 2584 2581 2142 2578 2255 2254 2256 2 2 171 157 2726 2280 2282 2728 1503 1036 1296 157 156 157 157 157 156 157 2 2 2 2 2 2 2 14 15 15 15 15 15 688 690 873 873 873 871 870 862 24 680 680 680 26 26 25 26 26 26 26 26 26 26 26 2478 157 2387 2387 2485 2209 2208 2371 1 2379 2477 3 2386 2389 3 49 1310 1920 1719 2256 10 10 10 10 2094 2064 2062 2061 58 50 58 58 467 466 33 1041 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 | // SPDX-License-Identifier: GPL-2.0 /* * drivers/base/core.c - core driver model code (device registration, etc) * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> * Copyright (c) 2006 Novell, Inc. */ #include <linux/acpi.h> #include <linux/blkdev.h> #include <linux/cleanup.h> #include <linux/cpufreq.h> #include <linux/device.h> #include <linux/dma-map-ops.h> /* for dma_default_coherent */ #include <linux/err.h> #include <linux/fwnode.h> #include <linux/init.h> #include <linux/kdev_t.h> #include <linux/kstrtox.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pm_runtime.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <linux/swiotlb.h> #include <linux/sysfs.h> #include "base.h" #include "physical_location.h" #include "power/power.h" /* Device links support. */ static LIST_HEAD(deferred_sync); static unsigned int defer_sync_state_count = 1; static DEFINE_MUTEX(fwnode_link_lock); static bool fw_devlink_is_permissive(void); static void __fw_devlink_link_to_consumers(struct device *dev); static bool fw_devlink_drv_reg_done; static bool fw_devlink_best_effort; static struct workqueue_struct *device_link_wq; /** * __fwnode_link_add - Create a link between two fwnode_handles. * @con: Consumer end of the link. * @sup: Supplier end of the link. * @flags: Link flags. * * Create a fwnode link between fwnode handles @con and @sup. The fwnode link * represents the detail that the firmware lists @sup fwnode as supplying a * resource to @con. * * The driver core will use the fwnode link to create a device link between the * two device objects corresponding to @con and @sup when they are created. The * driver core will automatically delete the fwnode link between @con and @sup * after doing that. * * Attempts to create duplicate links between the same pair of fwnode handles * are ignored and there is no reference counting. */ static int __fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup, u8 flags) { struct fwnode_link *link; list_for_each_entry(link, &sup->consumers, s_hook) if (link->consumer == con) { link->flags |= flags; return 0; } link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; link->supplier = sup; INIT_LIST_HEAD(&link->s_hook); link->consumer = con; INIT_LIST_HEAD(&link->c_hook); link->flags = flags; list_add(&link->s_hook, &sup->consumers); list_add(&link->c_hook, &con->suppliers); pr_debug("%pfwf Linked as a fwnode consumer to %pfwf\n", con, sup); return 0; } int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup, u8 flags) { guard(mutex)(&fwnode_link_lock); return __fwnode_link_add(con, sup, flags); } /** * __fwnode_link_del - Delete a link between two fwnode_handles. * @link: the fwnode_link to be deleted * * The fwnode_link_lock needs to be held when this function is called. */ static void __fwnode_link_del(struct fwnode_link *link) { pr_debug("%pfwf Dropping the fwnode link to %pfwf\n", link->consumer, link->supplier); list_del(&link->s_hook); list_del(&link->c_hook); kfree(link); } /** * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle. * @link: the fwnode_link to be marked * * The fwnode_link_lock needs to be held when this function is called. */ static void __fwnode_link_cycle(struct fwnode_link *link) { pr_debug("%pfwf: cycle: depends on %pfwf\n", link->consumer, link->supplier); link->flags |= FWLINK_FLAG_CYCLE; } /** * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle. * @fwnode: fwnode whose supplier links need to be deleted * * Deletes all supplier links connecting directly to @fwnode. */ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode) { struct fwnode_link *link, *tmp; guard(mutex)(&fwnode_link_lock); list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) __fwnode_link_del(link); } /** * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle. * @fwnode: fwnode whose consumer links need to be deleted * * Deletes all consumer links connecting directly to @fwnode. */ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode) { struct fwnode_link *link, *tmp; guard(mutex)(&fwnode_link_lock); list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) __fwnode_link_del(link); } /** * fwnode_links_purge - Delete all links connected to a fwnode_handle. * @fwnode: fwnode whose links needs to be deleted * * Deletes all links connecting directly to a fwnode. */ void fwnode_links_purge(struct fwnode_handle *fwnode) { fwnode_links_purge_suppliers(fwnode); fwnode_links_purge_consumers(fwnode); } void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode) { struct fwnode_handle *child; /* Don't purge consumer links of an added child */ if (fwnode->dev) return; fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; fwnode_links_purge_consumers(fwnode); fwnode_for_each_available_child_node(fwnode, child) fw_devlink_purge_absent_suppliers(child); } EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers); /** * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle * @from: move consumers away from this fwnode * @to: move consumers to this fwnode * * Move all consumer links from @from fwnode to @to fwnode. */ static void __fwnode_links_move_consumers(struct fwnode_handle *from, struct fwnode_handle *to) { struct fwnode_link *link, *tmp; list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) { __fwnode_link_add(link->consumer, to, link->flags); __fwnode_link_del(link); } } /** * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers * @fwnode: fwnode from which to pick up dangling consumers * @new_sup: fwnode of new supplier * * If the @fwnode has a corresponding struct device and the device supports * probing (that is, added to a bus), then we want to let fw_devlink create * MANAGED device links to this device, so leave @fwnode and its descendant's * fwnode links alone. * * Otherwise, move its consumers to the new supplier @new_sup. */ static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode, struct fwnode_handle *new_sup) { struct fwnode_handle *child; if (fwnode->dev && fwnode->dev->bus) return; fwnode->flags |= FWNODE_FLAG_NOT_DEVICE; __fwnode_links_move_consumers(fwnode, new_sup); fwnode_for_each_available_child_node(fwnode, child) __fw_devlink_pickup_dangling_consumers(child, new_sup); } static DEFINE_MUTEX(device_links_lock); DEFINE_STATIC_SRCU(device_links_srcu); static inline void device_links_write_lock(void) { mutex_lock(&device_links_lock); } static inline void device_links_write_unlock(void) { mutex_unlock(&device_links_lock); } int device_links_read_lock(void) __acquires(&device_links_srcu) { return srcu_read_lock(&device_links_srcu); } void device_links_read_unlock(int idx) __releases(&device_links_srcu) { srcu_read_unlock(&device_links_srcu, idx); } int device_links_read_lock_held(void) { return srcu_read_lock_held(&device_links_srcu); } static void device_link_synchronize_removal(void) { synchronize_srcu(&device_links_srcu); } static void device_link_remove_from_lists(struct device_link *link) { list_del_rcu(&link->s_node); list_del_rcu(&link->c_node); } static bool device_is_ancestor(struct device *dev, struct device *target) { while (target->parent) { target = target->parent; if (dev == target) return true; } return false; } #define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \ DL_FLAG_CYCLE | \ DL_FLAG_MANAGED) static inline bool device_link_flag_is_sync_state_only(u32 flags) { return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY; } /** * device_is_dependent - Check if one device depends on another one * @dev: Device to check dependencies for. * @target: Device to check against. * * Check if @target depends on @dev or any device dependent on it (its child or * its consumer etc). Return 1 if that is the case or 0 otherwise. */ static int device_is_dependent(struct device *dev, void *target) { struct device_link *link; int ret; /* * The "ancestors" check is needed to catch the case when the target * device has not been completely initialized yet and it is still * missing from the list of children of its parent device. */ if (dev == target || device_is_ancestor(dev, target)) return 1; ret = device_for_each_child(dev, target, device_is_dependent); if (ret) return ret; list_for_each_entry(link, &dev->links.consumers, s_node) { if (device_link_flag_is_sync_state_only(link->flags)) continue; if (link->consumer == target) return 1; ret = device_is_dependent(link->consumer, target); if (ret) break; } return ret; } static void device_link_init_status(struct device_link *link, struct device *consumer, struct device *supplier) { switch (supplier->links.status) { case DL_DEV_PROBING: switch (consumer->links.status) { case DL_DEV_PROBING: /* * A consumer driver can create a link to a supplier * that has not completed its probing yet as long as it * knows that the supplier is already functional (for * example, it has just acquired some resources from the * supplier). */ link->status = DL_STATE_CONSUMER_PROBE; break; default: link->status = DL_STATE_DORMANT; break; } break; case DL_DEV_DRIVER_BOUND: switch (consumer->links.status) { case DL_DEV_PROBING: link->status = DL_STATE_CONSUMER_PROBE; break; case DL_DEV_DRIVER_BOUND: link->status = DL_STATE_ACTIVE; break; default: link->status = DL_STATE_AVAILABLE; break; } break; case DL_DEV_UNBINDING: link->status = DL_STATE_SUPPLIER_UNBIND; break; default: link->status = DL_STATE_DORMANT; break; } } static int device_reorder_to_tail(struct device *dev, void *not_used) { struct device_link *link; /* * Devices that have not been registered yet will be put to the ends * of the lists during the registration, so skip them here. */ if (device_is_registered(dev)) devices_kset_move_last(dev); if (device_pm_initialized(dev)) device_pm_move_last(dev); device_for_each_child(dev, NULL, device_reorder_to_tail); list_for_each_entry(link, &dev->links.consumers, s_node) { if (device_link_flag_is_sync_state_only(link->flags)) continue; device_reorder_to_tail(link->consumer, NULL); } return 0; } /** * device_pm_move_to_tail - Move set of devices to the end of device lists * @dev: Device to move * * This is a device_reorder_to_tail() wrapper taking the requisite locks. * * It moves the @dev along with all of its children and all of its consumers * to the ends of the device_kset and dpm_list, recursively. */ void device_pm_move_to_tail(struct device *dev) { int idx; idx = device_links_read_lock(); device_pm_lock(); device_reorder_to_tail(dev, NULL); device_pm_unlock(); device_links_read_unlock(idx); } #define to_devlink(dev) container_of((dev), struct device_link, link_dev) static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *output; switch (to_devlink(dev)->status) { case DL_STATE_NONE: output = "not tracked"; break; case DL_STATE_DORMANT: output = "dormant"; break; case DL_STATE_AVAILABLE: output = "available"; break; case DL_STATE_CONSUMER_PROBE: output = "consumer probing"; break; case DL_STATE_ACTIVE: output = "active"; break; case DL_STATE_SUPPLIER_UNBIND: output = "supplier unbinding"; break; default: output = "unknown"; break; } return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(status); static ssize_t auto_remove_on_show(struct device *dev, struct device_attribute *attr, char *buf) { struct device_link *link = to_devlink(dev); const char *output; if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) output = "supplier unbind"; else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) output = "consumer unbind"; else output = "never"; return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(auto_remove_on); static ssize_t runtime_pm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct device_link *link = to_devlink(dev); return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); } static DEVICE_ATTR_RO(runtime_pm); static ssize_t sync_state_only_show(struct device *dev, struct device_attribute *attr, char *buf) { struct device_link *link = to_devlink(dev); return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); } static DEVICE_ATTR_RO(sync_state_only); static struct attribute *devlink_attrs[] = { &dev_attr_status.attr, &dev_attr_auto_remove_on.attr, &dev_attr_runtime_pm.attr, &dev_attr_sync_state_only.attr, NULL, }; ATTRIBUTE_GROUPS(devlink); static void device_link_release_fn(struct work_struct *work) { struct device_link *link = container_of(work, struct device_link, rm_work); /* Ensure that all references to the link object have been dropped. */ device_link_synchronize_removal(); pm_runtime_release_supplier(link); /* * If supplier_preactivated is set, the link has been dropped between * the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls * in __driver_probe_device(). In that case, drop the supplier's * PM-runtime usage counter to remove the reference taken by * pm_runtime_get_suppliers(). */ if (link->supplier_preactivated) pm_runtime_put_noidle(link->supplier); pm_request_idle(link->supplier); put_device(link->consumer); put_device(link->supplier); kfree(link); } static void devlink_dev_release(struct device *dev) { struct device_link *link = to_devlink(dev); INIT_WORK(&link->rm_work, device_link_release_fn); /* * It may take a while to complete this work because of the SRCU * synchronization in device_link_release_fn() and if the consumer or * supplier devices get deleted when it runs, so put it into the * dedicated workqueue. */ queue_work(device_link_wq, &link->rm_work); } /** * device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate */ void device_link_wait_removal(void) { /* * devlink removal jobs are queued in the dedicated work queue. * To be sure that all removal jobs are terminated, ensure that any * scheduled work has run to completion. */ flush_workqueue(device_link_wq); } EXPORT_SYMBOL_GPL(device_link_wait_removal); static const struct class devlink_class = { .name = "devlink", .dev_groups = devlink_groups, .dev_release = devlink_dev_release, }; static int devlink_add_symlinks(struct device *dev) { char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL; int ret; struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; struct device *con = link->consumer; ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); if (ret) goto out; ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); if (ret) goto err_con; buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); if (!buf_con) { ret = -ENOMEM; goto err_con_dev; } ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf_con); if (ret) goto err_con_dev; buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); if (!buf_sup) { ret = -ENOMEM; goto err_sup_dev; } ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf_sup); if (ret) goto err_sup_dev; goto out; err_sup_dev: sysfs_remove_link(&sup->kobj, buf_con); err_con_dev: sysfs_remove_link(&link->link_dev.kobj, "consumer"); err_con: sysfs_remove_link(&link->link_dev.kobj, "supplier"); out: return ret; } static void devlink_remove_symlinks(struct device *dev) { char *buf_con __free(kfree) = NULL, *buf_sup __free(kfree) = NULL; struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; struct device *con = link->consumer; sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "supplier"); if (device_is_registered(con)) { buf_sup = kasprintf(GFP_KERNEL, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); if (!buf_sup) goto out; sysfs_remove_link(&con->kobj, buf_sup); } buf_con = kasprintf(GFP_KERNEL, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); if (!buf_con) goto out; sysfs_remove_link(&sup->kobj, buf_con); return; out: WARN(1, "Unable to properly free device link symlinks!\n"); } static struct class_interface devlink_class_intf = { .class = &devlink_class, .add_dev = devlink_add_symlinks, .remove_dev = devlink_remove_symlinks, }; static int __init devlink_class_init(void) { int ret; ret = class_register(&devlink_class); if (ret) return ret; ret = class_interface_register(&devlink_class_intf); if (ret) class_unregister(&devlink_class); return ret; } postcore_initcall(devlink_class_init); #define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ DL_FLAG_AUTOREMOVE_SUPPLIER | \ DL_FLAG_AUTOPROBE_CONSUMER | \ DL_FLAG_SYNC_STATE_ONLY | \ DL_FLAG_INFERRED | \ DL_FLAG_CYCLE) #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) /** * device_link_add - Create a link between two devices. * @consumer: Consumer end of the link. * @supplier: Supplier end of the link. * @flags: Link flags. * * Return: On success, a device_link struct will be returned. * On error or invalid flag settings, NULL will be returned. * * The caller is responsible for the proper synchronization of the link creation * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the * runtime PM framework to take the link into account. Second, if the * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will * be forced into the active meta state and reference-counted upon the creation * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * * If DL_FLAG_STATELESS is set in @flags, the caller of this function is * expected to release the link returned by it directly with the help of either * device_link_del() or device_link_remove(). * * If that flag is not set, however, the caller of this function is handing the * management of the link over to the driver core entirely and its return value * can only be used to check whether or not the link is present. In that case, * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link * flags can be used to indicate to the driver core when the link can be safely * deleted. Namely, setting one of them in @flags indicates to the driver core * that the link is not going to be used (by the given caller of this function) * after unbinding the consumer or supplier driver, respectively, from its * device, so the link can be deleted at that point. If none of them is set, * the link will be maintained until one of the devices pointed to by it (either * the consumer or the supplier) is unregistered. * * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can * be used to request the driver core to automatically probe for a consumer * driver after successfully binding a driver to the supplier device. * * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at * the same time is invalid and will cause NULL to be returned upfront. * However, if a device link between the given @consumer and @supplier pair * exists already when this function is called for them, the existing link will * be returned regardless of its current type and status (the link's flags may * be modified then). The caller of this function is then expected to treat * the link as though it has just been created, so (in particular) if * DL_FLAG_STATELESS was passed in @flags, the link needs to be released * explicitly when not needed any more (as stated above). * * A side effect of the link creation is re-ordering of dpm_list and the * devices_kset list by moving the consumer device and all devices depending * on it to the ends of these lists (that does not happen to devices that have * not been registered when this function is called). * * The supplier device is required to be registered when this function is called * and NULL will be returned if that is not the case. The consumer device need * not be registered, however. */ struct device_link *device_link_add(struct device *consumer, struct device *supplier, u32 flags) { struct device_link *link; if (!consumer || !supplier || consumer == supplier || flags & ~DL_ADD_VALID_FLAGS || (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || (flags & DL_FLAG_AUTOPROBE_CONSUMER && flags & (DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER))) return NULL; if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { if (pm_runtime_get_sync(supplier) < 0) { pm_runtime_put_noidle(supplier); return NULL; } } if (!(flags & DL_FLAG_STATELESS)) flags |= DL_FLAG_MANAGED; if (flags & DL_FLAG_SYNC_STATE_ONLY && !device_link_flag_is_sync_state_only(flags)) return NULL; device_links_write_lock(); device_pm_lock(); /* * If the supplier has not been fully registered yet or there is a * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and * the supplier already in the graph, return NULL. If the link is a * SYNC_STATE_ONLY link, we don't check for reverse dependencies * because it only affects sync_state() callbacks. */ if (!device_pm_initialized(supplier) || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && device_is_dependent(consumer, supplier))) { link = NULL; goto out; } /* * SYNC_STATE_ONLY links are useless once a consumer device has probed. * So, only create it if the consumer hasn't probed yet. */ if (flags & DL_FLAG_SYNC_STATE_ONLY && consumer->links.status != DL_DEV_NO_DRIVER && consumer->links.status != DL_DEV_PROBING) { link = NULL; goto out; } /* * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. */ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer != consumer) continue; if (link->flags & DL_FLAG_INFERRED && !(flags & DL_FLAG_INFERRED)) link->flags &= ~DL_FLAG_INFERRED; if (flags & DL_FLAG_PM_RUNTIME) { if (!(link->flags & DL_FLAG_PM_RUNTIME)) { pm_runtime_new_link(consumer); link->flags |= DL_FLAG_PM_RUNTIME; } if (flags & DL_FLAG_RPM_ACTIVE) refcount_inc(&link->rpm_active); } if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); if (link->flags & DL_FLAG_SYNC_STATE_ONLY && !(link->flags & DL_FLAG_STATELESS)) { link->flags |= DL_FLAG_STATELESS; goto reorder; } else { link->flags |= DL_FLAG_STATELESS; goto out; } } /* * If the life time of the link following from the new flags is * longer than indicated by the flags of the existing link, * update the existing link to stay around longer. */ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; } } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER); } if (!(link->flags & DL_FLAG_MANAGED)) { kref_get(&link->kref); link->flags |= DL_FLAG_MANAGED; device_link_init_status(link, consumer, supplier); } if (link->flags & DL_FLAG_SYNC_STATE_ONLY && !(flags & DL_FLAG_SYNC_STATE_ONLY)) { link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; goto reorder; } goto out; } link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) goto out; refcount_set(&link->rpm_active, 1); get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); get_device(consumer); link->consumer = consumer; INIT_LIST_HEAD(&link->c_node); link->flags = flags; kref_init(&link->kref); link->link_dev.class = &devlink_class; device_set_pm_not_required(&link->link_dev); dev_set_name(&link->link_dev, "%s:%s--%s:%s", dev_bus_name(supplier), dev_name(supplier), dev_bus_name(consumer), dev_name(consumer)); if (device_register(&link->link_dev)) { put_device(&link->link_dev); link = NULL; goto out; } if (flags & DL_FLAG_PM_RUNTIME) { if (flags & DL_FLAG_RPM_ACTIVE) refcount_inc(&link->rpm_active); pm_runtime_new_link(consumer); } /* Determine the initial link state. */ if (flags & DL_FLAG_STATELESS) link->status = DL_STATE_NONE; else device_link_init_status(link, consumer, supplier); /* * Some callers expect the link creation during consumer driver probe to * resume the supplier even without DL_FLAG_RPM_ACTIVE. */ if (link->status == DL_STATE_CONSUMER_PROBE && flags & DL_FLAG_PM_RUNTIME) pm_runtime_resume(supplier); list_add_tail_rcu(&link->s_node, &supplier->links.consumers); list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); if (flags & DL_FLAG_SYNC_STATE_ONLY) { dev_dbg(consumer, "Linked as a sync state only consumer to %s\n", dev_name(supplier)); goto out; } reorder: /* * Move the consumer and all of the devices depending on it to the end * of dpm_list and the devices_kset list. * * It is necessary to hold dpm_list locked throughout all that or else * we may end up suspending with a wrong ordering of it. */ device_reorder_to_tail(consumer, NULL); dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); out: device_pm_unlock(); device_links_write_unlock(); if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) pm_runtime_put(supplier); return link; } EXPORT_SYMBOL_GPL(device_link_add); static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); dev_dbg(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); pm_runtime_drop_link(link); device_link_remove_from_lists(link); device_unregister(&link->link_dev); } static void device_link_put_kref(struct device_link *link) { if (link->flags & DL_FLAG_STATELESS) kref_put(&link->kref, __device_link_del); else if (!device_is_registered(link->consumer)) __device_link_del(&link->kref); else WARN(1, "Unable to drop a managed device link reference\n"); } /** * device_link_del - Delete a stateless link between two devices. * @link: Device link to delete. * * The caller must ensure proper synchronization of this function with runtime * PM. If the link was added multiple times, it needs to be deleted as often. * Care is required for hotplugged devices: Their links are purged on removal * and calling device_link_del() is then no longer allowed. */ void device_link_del(struct device_link *link) { device_links_write_lock(); device_link_put_kref(link); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); /** * device_link_remove - Delete a stateless link between two devices. * @consumer: Consumer end of the link. * @supplier: Supplier end of the link. * * The caller must ensure proper synchronization of this function with runtime * PM. */ void device_link_remove(void *consumer, struct device *supplier) { struct device_link *link; if (WARN_ON(consumer == supplier)) return; device_links_write_lock(); list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer == consumer) { device_link_put_kref(link); break; } } device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_remove); static void device_links_missing_supplier(struct device *dev) { struct device_link *link; list_for_each_entry(link, &dev->links.suppliers, c_node) { if (link->status != DL_STATE_CONSUMER_PROBE) continue; if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } } static bool dev_is_best_effort(struct device *dev) { return (fw_devlink_best_effort && dev->can_match) || (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT)); } static struct fwnode_handle *fwnode_links_check_suppliers( struct fwnode_handle *fwnode) { struct fwnode_link *link; if (!fwnode || fw_devlink_is_permissive()) return NULL; list_for_each_entry(link, &fwnode->suppliers, c_hook) if (!(link->flags & (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE))) return link->supplier; return NULL; } /** * device_links_check_suppliers - Check presence of supplier drivers. * @dev: Consumer device. * * Check links from this device to any suppliers. Walk the list of the device's * links to suppliers and see if all of them are available. If not, simply * return -EPROBE_DEFER. * * We need to guarantee that the supplier will not go away after the check has * been positive here. It only can go away in __device_release_driver() and * that function checks the device's links to consumers. This means we need to * mark the link as "consumer probe in progress" to make the supplier removal * wait for us to complete (or bad things may happen). * * Links without the DL_FLAG_MANAGED flag set are ignored. */ int device_links_check_suppliers(struct device *dev) { struct device_link *link; int ret = 0, fwnode_ret = 0; struct fwnode_handle *sup_fw; /* * Device waiting for supplier to become available is not allowed to * probe. */ scoped_guard(mutex, &fwnode_link_lock) { sup_fw = fwnode_links_check_suppliers(dev->fwnode); if (sup_fw) { if (dev_is_best_effort(dev)) fwnode_ret = -EAGAIN; else return dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwf\n", sup_fw); } } device_links_write_lock(); list_for_each_entry(link, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE && !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { if (dev_is_best_effort(dev) && link->flags & DL_FLAG_INFERRED && !link->supplier->can_match) { ret = -EAGAIN; continue; } device_links_missing_supplier(dev); ret = dev_err_probe(dev, -EPROBE_DEFER, "supplier %s not ready\n", dev_name(link->supplier)); break; } WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); } dev->links.status = DL_DEV_PROBING; device_links_write_unlock(); return ret ? ret : fwnode_ret; } /** * __device_links_queue_sync_state - Queue a device for sync_state() callback * @dev: Device to call sync_state() on * @list: List head to queue the @dev on * * Queues a device for a sync_state() callback when the device links write lock * isn't held. This allows the sync_state() execution flow to use device links * APIs. The caller must ensure this function is called with * device_links_write_lock() held. * * This function does a get_device() to make sure the device is not freed while * on this list. * * So the caller must also ensure that device_links_flush_sync_list() is called * as soon as the caller releases device_links_write_lock(). This is necessary * to make sure the sync_state() is called in a timely fashion and the * put_device() is called on this device. */ static void __device_links_queue_sync_state(struct device *dev, struct list_head *list) { struct device_link *link; if (!dev_has_sync_state(dev)) return; if (dev->state_synced) return; list_for_each_entry(link, &dev->links.consumers, s_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_ACTIVE) return; } /* * Set the flag here to avoid adding the same device to a list more * than once. This can happen if new consumers get added to the device * and probed before the list is flushed. */ dev->state_synced = true; if (WARN_ON(!list_empty(&dev->links.defer_sync))) return; get_device(dev); list_add_tail(&dev->links.defer_sync, list); } /** * device_links_flush_sync_list - Call sync_state() on a list of devices * @list: List of devices to call sync_state() on * @dont_lock_dev: Device for which lock is already held by the caller * * Calls sync_state() on all the devices that have been queued for it. This * function is used in conjunction with __device_links_queue_sync_state(). The * @dont_lock_dev parameter is useful when this function is called from a * context where a device lock is already held. */ static void device_links_flush_sync_list(struct list_head *list, struct device *dont_lock_dev) { struct device *dev, *tmp; list_for_each_entry_safe(dev, tmp, list, links.defer_sync) { list_del_init(&dev->links.defer_sync); if (dev != dont_lock_dev) device_lock(dev); dev_sync_state(dev); if (dev != dont_lock_dev) device_unlock(dev); put_device(dev); } } void device_links_supplier_sync_state_pause(void) { device_links_write_lock(); defer_sync_state_count++; device_links_write_unlock(); } void device_links_supplier_sync_state_resume(void) { struct device *dev, *tmp; LIST_HEAD(sync_list); device_links_write_lock(); if (!defer_sync_state_count) { WARN(true, "Unmatched sync_state pause/resume!"); goto out; } defer_sync_state_count--; if (defer_sync_state_count) goto out; list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) { /* * Delete from deferred_sync list before queuing it to * sync_list because defer_sync is used for both lists. */ list_del_init(&dev->links.defer_sync); __device_links_queue_sync_state(dev, &sync_list); } out: device_links_write_unlock(); device_links_flush_sync_list(&sync_list, NULL); } static int sync_state_resume_initcall(void) { device_links_supplier_sync_state_resume(); return 0; } late_initcall(sync_state_resume_initcall); static void __device_links_supplier_defer_sync(struct device *sup) { if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup)) list_add_tail(&sup->links.defer_sync, &deferred_sync); } static void device_link_drop_managed(struct device_link *link) { link->flags &= ~DL_FLAG_MANAGED; WRITE_ONCE(link->status, DL_STATE_NONE); kref_put(&link->kref, __device_link_del); } static ssize_t waiting_for_supplier_show(struct device *dev, struct device_attribute *attr, char *buf) { bool val; device_lock(dev); scoped_guard(mutex, &fwnode_link_lock) val = !!fwnode_links_check_suppliers(dev->fwnode); device_unlock(dev); return sysfs_emit(buf, "%u\n", val); } static DEVICE_ATTR_RO(waiting_for_supplier); /** * device_links_force_bind - Prepares device to be force bound * @dev: Consumer device. * * device_bind_driver() force binds a device to a driver without calling any * driver probe functions. So the consumer really isn't going to wait for any * supplier before it's bound to the driver. We still want the device link * states to be sensible when this happens. * * In preparation for device_bind_driver(), this function goes through each * supplier device links and checks if the supplier is bound. If it is, then * the device link status is set to CONSUMER_PROBE. Otherwise, the device link * is dropped. Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_force_bind(struct device *dev) { struct device_link *link, *ln; device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_AVAILABLE) { device_link_drop_managed(link); continue; } WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); } dev->links.status = DL_DEV_PROBING; device_links_write_unlock(); } /** * device_links_driver_bound - Update device links after probing its driver. * @dev: Device to update the links for. * * The probe has been successful, so update links from this device to any * consumers by changing their status to "available". * * Also change the status of @dev's links to suppliers to "active". * * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_driver_bound(struct device *dev) { struct device_link *link, *ln; LIST_HEAD(sync_list); /* * If a device binds successfully, it's expected to have created all * the device links it needs to or make new device links as it needs * them. So, fw_devlink no longer needs to create device links to any * of the device's suppliers. * * Also, if a child firmware node of this bound device is not added as a * device by now, assume it is never going to be added. Make this bound * device the fallback supplier to the dangling consumers of the child * firmware node because this bound device is probably implementing the * child firmware node functionality and we don't want the dangling * consumers to defer probe indefinitely waiting for a device for the * child firmware node. */ if (dev->fwnode && dev->fwnode->dev == dev) { struct fwnode_handle *child; fwnode_links_purge_suppliers(dev->fwnode); guard(mutex)(&fwnode_link_lock); fwnode_for_each_available_child_node(dev->fwnode, child) __fw_devlink_pickup_dangling_consumers(child, dev->fwnode); __fw_devlink_link_to_consumers(dev); } device_remove_file(dev, &dev_attr_waiting_for_supplier); device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; /* * Links created during consumer probe may be in the "consumer * probe" state to start with if the supplier is still probing * when they are created and they may become "active" if the * consumer probe returns first. Skip them here. */ if (link->status == DL_STATE_CONSUMER_PROBE || link->status == DL_STATE_ACTIVE) continue; WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) driver_deferred_probe_add(link->consumer); } if (defer_sync_state_count) __device_links_supplier_defer_sync(dev); else __device_links_queue_sync_state(dev, &sync_list); list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { struct device *supplier; if (!(link->flags & DL_FLAG_MANAGED)) continue; supplier = link->supplier; if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { /* * When DL_FLAG_SYNC_STATE_ONLY is set, it means no * other DL_MANAGED_LINK_FLAGS have been set. So, it's * save to drop the managed link completely. */ device_link_drop_managed(link); } else if (dev_is_best_effort(dev) && link->flags & DL_FLAG_INFERRED && link->status != DL_STATE_CONSUMER_PROBE && !link->supplier->can_match) { /* * When dev_is_best_effort() is true, we ignore device * links to suppliers that don't have a driver. If the * consumer device still managed to probe, there's no * point in maintaining a device link in a weird state * (consumer probed before supplier). So delete it. */ device_link_drop_managed(link); } else { WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); WRITE_ONCE(link->status, DL_STATE_ACTIVE); } /* * This needs to be done even for the deleted * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last * device link that was preventing the supplier from getting a * sync_state() call. */ if (defer_sync_state_count) __device_links_supplier_defer_sync(supplier); else __device_links_queue_sync_state(supplier, &sync_list); } dev->links.status = DL_DEV_DRIVER_BOUND; device_links_write_unlock(); device_links_flush_sync_list(&sync_list, dev); } /** * __device_links_no_driver - Update links of a device without a driver. * @dev: Device without a drvier. * * Delete all non-persistent links from this device to any suppliers. * * Persistent links stay around, but their status is changed to "available", * unless they already are in the "supplier unbind in progress" state in which * case they need not be updated. * * Links without the DL_FLAG_MANAGED flag set are ignored. */ static void __device_links_no_driver(struct device *dev) { struct device_link *link, *ln; list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { device_link_drop_managed(link); continue; } if (link->status != DL_STATE_CONSUMER_PROBE && link->status != DL_STATE_ACTIVE) continue; if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } else { WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); WRITE_ONCE(link->status, DL_STATE_DORMANT); } } dev->links.status = DL_DEV_NO_DRIVER; } /** * device_links_no_driver - Update links after failing driver probe. * @dev: Device whose driver has just failed to probe. * * Clean up leftover links to consumers for @dev and invoke * %__device_links_no_driver() to update links to suppliers for it as * appropriate. * * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_no_driver(struct device *dev) { struct device_link *link; device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; /* * The probe has failed, so if the status of the link is * "consumer probe" or "active", it must have been added by * a probing consumer while this device was still probing. * Change its state to "dormant", as it represents a valid * relationship, but it is not functionally meaningful. */ if (link->status == DL_STATE_CONSUMER_PROBE || link->status == DL_STATE_ACTIVE) WRITE_ONCE(link->status, DL_STATE_DORMANT); } __device_links_no_driver(dev); device_links_write_unlock(); } /** * device_links_driver_cleanup - Update links after driver removal. * @dev: Device whose driver has just gone away. * * Update links to consumers for @dev by changing their status to "dormant" and * invoke %__device_links_no_driver() to update links to suppliers for it as * appropriate. * * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_driver_cleanup(struct device *dev) { struct device_link *link, *ln; device_links_write_lock(); list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); /* * autoremove the links between this @dev and its consumer * devices that are not active, i.e. where the link state * has moved to DL_STATE_SUPPLIER_UNBIND. */ if (link->status == DL_STATE_SUPPLIER_UNBIND && link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) device_link_drop_managed(link); WRITE_ONCE(link->status, DL_STATE_DORMANT); } list_del_init(&dev->links.defer_sync); __device_links_no_driver(dev); device_links_write_unlock(); } /** * device_links_busy - Check if there are any busy links to consumers. * @dev: Device to check. * * Check each consumer of the device and return 'true' if its link's status * is one of "consumer probe" or "active" (meaning that the given consumer is * probing right now or its driver is present). Otherwise, change the link * state to "supplier unbind" to prevent the consumer from being probed * successfully going forward. * * Return 'false' if there are no probing or active consumers. * * Links without the DL_FLAG_MANAGED flag set are ignored. */ bool device_links_busy(struct device *dev) { struct device_link *link; bool ret = false; device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status == DL_STATE_CONSUMER_PROBE || link->status == DL_STATE_ACTIVE) { ret = true; break; } WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); } dev->links.status = DL_DEV_UNBINDING; device_links_write_unlock(); return ret; } /** * device_links_unbind_consumers - Force unbind consumers of the given device. * @dev: Device to unbind the consumers of. * * Walk the list of links to consumers for @dev and if any of them is in the * "consumer probe" state, wait for all device probes in progress to complete * and start over. * * If that's not the case, change the status of the link to "supplier unbind" * and check if the link was in the "active" state. If so, force the consumer * driver to unbind and start over (the consumer will not re-probe as we have * changed the state of the link already). * * Links without the DL_FLAG_MANAGED flag set are ignored. */ void device_links_unbind_consumers(struct device *dev) { struct device_link *link; start: device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) { enum device_link_state status; if (!(link->flags & DL_FLAG_MANAGED) || link->flags & DL_FLAG_SYNC_STATE_ONLY) continue; status = link->status; if (status == DL_STATE_CONSUMER_PROBE) { device_links_write_unlock(); wait_for_device_probe(); goto start; } WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); if (status == DL_STATE_ACTIVE) { struct device *consumer = link->consumer; get_device(consumer); device_links_write_unlock(); device_release_driver_internal(consumer, NULL, consumer->parent); put_device(consumer); goto start; } } device_links_write_unlock(); } /** * device_links_purge - Delete existing links to other devices. * @dev: Target device. */ static void device_links_purge(struct device *dev) { struct device_link *link, *ln; if (dev->class == &devlink_class) return; /* * Delete all of the remaining links from this device to any other * devices (either consumers or suppliers). */ device_links_write_lock(); list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { WARN_ON(link->status == DL_STATE_ACTIVE); __device_link_del(&link->kref); } list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { WARN_ON(link->status != DL_STATE_DORMANT && link->status != DL_STATE_NONE); __device_link_del(&link->kref); } device_links_write_unlock(); } #define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \ DL_FLAG_SYNC_STATE_ONLY) #define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \ DL_FLAG_AUTOPROBE_CONSUMER) #define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \ DL_FLAG_PM_RUNTIME) static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_RPM; static int __init fw_devlink_setup(char *arg) { if (!arg) return -EINVAL; if (strcmp(arg, "off") == 0) { fw_devlink_flags = 0; } else if (strcmp(arg, "permissive") == 0) { fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE; } else if (strcmp(arg, "on") == 0) { fw_devlink_flags = FW_DEVLINK_FLAGS_ON; } else if (strcmp(arg, "rpm") == 0) { fw_devlink_flags = FW_DEVLINK_FLAGS_RPM; } return 0; } early_param("fw_devlink", fw_devlink_setup); static bool fw_devlink_strict; static int __init fw_devlink_strict_setup(char *arg) { return kstrtobool(arg, &fw_devlink_strict); } early_param("fw_devlink.strict", fw_devlink_strict_setup); #define FW_DEVLINK_SYNC_STATE_STRICT 0 #define FW_DEVLINK_SYNC_STATE_TIMEOUT 1 #ifndef CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT static int fw_devlink_sync_state; #else static int fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT; #endif static int __init fw_devlink_sync_state_setup(char *arg) { if (!arg) return -EINVAL; if (strcmp(arg, "strict") == 0) { fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_STRICT; return 0; } else if (strcmp(arg, "timeout") == 0) { fw_devlink_sync_state = FW_DEVLINK_SYNC_STATE_TIMEOUT; return 0; } return -EINVAL; } early_param("fw_devlink.sync_state", fw_devlink_sync_state_setup); static inline u32 fw_devlink_get_flags(u8 fwlink_flags) { if (fwlink_flags & FWLINK_FLAG_CYCLE) return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE; return fw_devlink_flags; } static bool fw_devlink_is_permissive(void) { return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE; } bool fw_devlink_is_strict(void) { return fw_devlink_strict && !fw_devlink_is_permissive(); } static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode) { if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED) return; fwnode_call_int_op(fwnode, add_links); fwnode->flags |= FWNODE_FLAG_LINKS_ADDED; } static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode) { struct fwnode_handle *child = NULL; fw_devlink_parse_fwnode(fwnode); while ((child = fwnode_get_next_available_child_node(fwnode, child))) fw_devlink_parse_fwtree(child); } static void fw_devlink_relax_link(struct device_link *link) { if (!(link->flags & DL_FLAG_INFERRED)) return; if (device_link_flag_is_sync_state_only(link->flags)) return; pm_runtime_drop_link(link); link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE; dev_dbg(link->consumer, "Relaxing link with %s\n", dev_name(link->supplier)); } static int fw_devlink_no_driver(struct device *dev, void *data) { struct device_link *link = to_devlink(dev); if (!link->supplier->can_match) fw_devlink_relax_link(link); return 0; } void fw_devlink_drivers_done(void) { fw_devlink_drv_reg_done = true; device_links_write_lock(); class_for_each_device(&devlink_class, NULL, NULL, fw_devlink_no_driver); device_links_write_unlock(); } static int fw_devlink_dev_sync_state(struct device *dev, void *data) { struct device_link *link = to_devlink(dev); struct device *sup = link->supplier; if (!(link->flags & DL_FLAG_MANAGED) || link->status == DL_STATE_ACTIVE || sup->state_synced || !dev_has_sync_state(sup)) return 0; if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) { dev_warn(sup, "sync_state() pending due to %s\n", dev_name(link->consumer)); return 0; } if (!list_empty(&sup->links.defer_sync)) return 0; dev_warn(sup, "Timed out. Forcing sync_state()\n"); sup->state_synced = true; get_device(sup); list_add_tail(&sup->links.defer_sync, data); return 0; } void fw_devlink_probing_done(void) { LIST_HEAD(sync_list); device_links_write_lock(); class_for_each_device(&devlink_class, NULL, &sync_list, fw_devlink_dev_sync_state); device_links_write_unlock(); device_links_flush_sync_list(&sync_list, NULL); } /** * wait_for_init_devices_probe - Try to probe any device needed for init * * Some devices might need to be probed and bound successfully before the kernel * boot sequence can finish and move on to init/userspace. For example, a * network interface might need to be bound to be able to mount a NFS rootfs. * * With fw_devlink=on by default, some of these devices might be blocked from * probing because they are waiting on a optional supplier that doesn't have a * driver. While fw_devlink will eventually identify such devices and unblock * the probing automatically, it might be too late by the time it unblocks the * probing of devices. For example, the IP4 autoconfig might timeout before * fw_devlink unblocks probing of the network interface. * * This function is available to temporarily try and probe all devices that have * a driver even if some of their suppliers haven't been added or don't have * drivers. * * The drivers can then decide which of the suppliers are optional vs mandatory * and probe the device if possible. By the time this function returns, all such * "best effort" probes are guaranteed to be completed. If a device successfully * probes in this mode, we delete all fw_devlink discovered dependencies of that * device where the supplier hasn't yet probed successfully because they have to * be optional dependencies. * * Any devices that didn't successfully probe go back to being treated as if * this function was never called. * * This also means that some devices that aren't needed for init and could have * waited for their optional supplier to probe (when the supplier's module is * loaded later on) would end up probing prematurely with limited functionality. * So call this function only when boot would fail without it. */ void __init wait_for_init_devices_probe(void) { if (!fw_devlink_flags || fw_devlink_is_permissive()) return; /* * Wait for all ongoing probes to finish so that the "best effort" is * only applied to devices that can't probe otherwise. */ wait_for_device_probe(); pr_info("Trying to probe devices needed for running init ...\n"); fw_devlink_best_effort = true; driver_deferred_probe_trigger(); /* * Wait for all "best effort" probes to finish before going back to * normal enforcement. */ wait_for_device_probe(); fw_devlink_best_effort = false; } static void fw_devlink_unblock_consumers(struct device *dev) { struct device_link *link; if (!fw_devlink_flags || fw_devlink_is_permissive()) return; device_links_write_lock(); list_for_each_entry(link, &dev->links.consumers, s_node) fw_devlink_relax_link(link); device_links_write_unlock(); } #define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) static bool fwnode_init_without_drv(struct fwnode_handle *fwnode) { struct device *dev; bool ret; if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED)) return false; dev = get_dev_from_fwnode(fwnode); ret = !dev || dev->links.status == DL_DEV_NO_DRIVER; put_device(dev); return ret; } static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode) { struct fwnode_handle *parent; fwnode_for_each_parent_node(fwnode, parent) { if (fwnode_init_without_drv(parent)) { fwnode_handle_put(parent); return true; } } return false; } /** * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child * @ancestor: Firmware which is tested for being an ancestor * @child: Firmware which is tested for being the child * * A node is considered an ancestor of itself too. * * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false. */ static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child) { struct fwnode_handle *parent; if (IS_ERR_OR_NULL(ancestor)) return false; if (child == ancestor) return true; fwnode_for_each_parent_node(child, parent) { if (parent == ancestor) { fwnode_handle_put(parent); return true; } } return false; } /** * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode * @fwnode: firmware node * * Given a firmware node (@fwnode), this function finds its closest ancestor * firmware node that has a corresponding struct device and returns that struct * device. * * The caller is responsible for calling put_device() on the returned device * pointer. * * Return: a pointer to the device of the @fwnode's closest ancestor. */ static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode) { struct fwnode_handle *parent; struct device *dev; fwnode_for_each_parent_node(fwnode, parent) { dev = get_dev_from_fwnode(parent); if (dev) { fwnode_handle_put(parent); return dev; } } return NULL; } /** * __fw_devlink_relax_cycles - Relax and mark dependency cycles. * @con_handle: Potential consumer device fwnode. * @sup_handle: Potential supplier's fwnode. * * Needs to be called with fwnode_lock and device link lock held. * * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly * depend on @con. This function can detect multiple cyles between @sup_handle * and @con. When such dependency cycles are found, convert all device links * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are * converted into a device link in the future, they are created as * SYNC_STATE_ONLY device links. This is the equivalent of doing * fw_devlink=permissive just between the devices in the cycle. We need to do * this because, at this point, fw_devlink can't tell which of these * dependencies is not a real dependency. * * Return true if one or more cycles were found. Otherwise, return false. */ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, struct fwnode_handle *sup_handle) { struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL; struct fwnode_link *link; struct device_link *dev_link; bool ret = false; if (!sup_handle) return false; /* * We aren't trying to find all cycles. Just a cycle between con and * sup_handle. */ if (sup_handle->flags & FWNODE_FLAG_VISITED) return false; sup_handle->flags |= FWNODE_FLAG_VISITED; /* Termination condition. */ if (sup_handle == con_handle) { pr_debug("----- cycle: start -----\n"); ret = true; goto out; } sup_dev = get_dev_from_fwnode(sup_handle); con_dev = get_dev_from_fwnode(con_handle); /* * If sup_dev is bound to a driver and @con hasn't started binding to a * driver, sup_dev can't be a consumer of @con. So, no need to check * further. */ if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND && con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) { ret = false; goto out; } list_for_each_entry(link, &sup_handle->suppliers, c_hook) { if (link->flags & FWLINK_FLAG_IGNORE) continue; if (__fw_devlink_relax_cycles(con_handle, link->supplier)) { __fwnode_link_cycle(link); ret = true; } } /* * Give priority to device parent over fwnode parent to account for any * quirks in how fwnodes are converted to devices. */ if (sup_dev) par_dev = get_device(sup_dev->parent); else par_dev = fwnode_get_next_parent_dev(sup_handle); if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) { pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle, par_dev->fwnode); ret = true; } if (!sup_dev) goto out; list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) { /* * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as * such due to a cycle. */ if (device_link_flag_is_sync_state_only(dev_link->flags) && !(dev_link->flags & DL_FLAG_CYCLE)) continue; if (__fw_devlink_relax_cycles(con_handle, dev_link->supplier->fwnode)) { pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle, dev_link->supplier->fwnode); fw_devlink_relax_link(dev_link); dev_link->flags |= DL_FLAG_CYCLE; ret = true; } } out: sup_handle->flags &= ~FWNODE_FLAG_VISITED; put_device(sup_dev); put_device(con_dev); put_device(par_dev); return ret; } /** * fw_devlink_create_devlink - Create a device link from a consumer to fwnode * @con: consumer device for the device link * @sup_handle: fwnode handle of supplier * @link: fwnode link that's being converted to a device link * * This function will try to create a device link between the consumer device * @con and the supplier device represented by @sup_handle. * * The supplier has to be provided as a fwnode because incorrect cycles in * fwnode links can sometimes cause the supplier device to never be created. * This function detects such cases and returns an error if it cannot create a * device link from the consumer to a missing supplier. * * Returns, * 0 on successfully creating a device link * -EINVAL if the device link cannot be created as expected * -EAGAIN if the device link cannot be created right now, but it may be * possible to do that in the future */ static int fw_devlink_create_devlink(struct device *con, struct fwnode_handle *sup_handle, struct fwnode_link *link) { struct device *sup_dev; int ret = 0; u32 flags; if (link->flags & FWLINK_FLAG_IGNORE) return 0; /* * In some cases, a device P might also be a supplier to its child node * C. However, this would defer the probe of C until the probe of P * completes successfully. This is perfectly fine in the device driver * model. device_add() doesn't guarantee probe completion of the device * by the time it returns. * * However, there are a few drivers that assume C will finish probing * as soon as it's added and before P finishes probing. So, we provide * a flag to let fw_devlink know not to delay the probe of C until the * probe of P completes successfully. * * When such a flag is set, we can't create device links where P is the * supplier of C as that would delay the probe of C. */ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD && fwnode_is_ancestor_of(sup_handle, con->fwnode)) return -EINVAL; /* * Don't try to optimize by not calling the cycle detection logic under * certain conditions. There's always some corner case that won't get * detected. */ device_links_write_lock(); if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) { __fwnode_link_cycle(link); pr_debug("----- cycle: end -----\n"); pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n", link->consumer, sup_handle); } device_links_write_unlock(); if (con->fwnode == link->consumer) flags = fw_devlink_get_flags(link->flags); else flags = FW_DEVLINK_FLAGS_PERMISSIVE; if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE) sup_dev = fwnode_get_next_parent_dev(sup_handle); else sup_dev = get_dev_from_fwnode(sup_handle); if (sup_dev) { /* * If it's one of those drivers that don't actually bind to * their device using driver core, then don't wait on this * supplier device indefinitely. */ if (sup_dev->links.status == DL_DEV_NO_DRIVER && sup_handle->flags & FWNODE_FLAG_INITIALIZED) { dev_dbg(con, "Not linking %pfwf - dev might never probe\n", sup_handle); ret = -EINVAL; goto out; } if (con != sup_dev && !device_link_add(con, sup_dev, flags)) { dev_err(con, "Failed to create device link (0x%x) with supplier %s for %pfwf\n", flags, dev_name(sup_dev), link->consumer); ret = -EINVAL; } goto out; } /* * Supplier or supplier's ancestor already initialized without a struct * device or being probed by a driver. */ if (fwnode_init_without_drv(sup_handle) || fwnode_ancestor_init_without_drv(sup_handle)) { dev_dbg(con, "Not linking %pfwf - might never become dev\n", sup_handle); return -EINVAL; } ret = -EAGAIN; out: put_device(sup_dev); return ret; } /** * __fw_devlink_link_to_consumers - Create device links to consumers of a device * @dev: Device that needs to be linked to its consumers * * This function looks at all the consumer fwnodes of @dev and creates device * links between the consumer device and @dev (supplier). * * If the consumer device has not been added yet, then this function creates a * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device * of the consumer fwnode. This is necessary to make sure @dev doesn't get a * sync_state() callback before the real consumer device gets to be added and * then probed. * * Once device links are created from the real consumer to @dev (supplier), the * fwnode links are deleted. */ static void __fw_devlink_link_to_consumers(struct device *dev) { struct fwnode_handle *fwnode = dev->fwnode; struct fwnode_link *link, *tmp; list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { struct device *con_dev; bool own_link = true; int ret; con_dev = get_dev_from_fwnode(link->consumer); /* * If consumer device is not available yet, make a "proxy" * SYNC_STATE_ONLY link from the consumer's parent device to * the supplier device. This is necessary to make sure the * supplier doesn't get a sync_state() callback before the real * consumer can create a device link to the supplier. * * This proxy link step is needed to handle the case where the * consumer's parent device is added before the supplier. */ if (!con_dev) { con_dev = fwnode_get_next_parent_dev(link->consumer); /* * However, if the consumer's parent device is also the * parent of the supplier, don't create a * consumer-supplier link from the parent to its child * device. Such a dependency is impossible. */ if (con_dev && fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) { put_device(con_dev); con_dev = NULL; } else { own_link = false; } } if (!con_dev) continue; ret = fw_devlink_create_devlink(con_dev, fwnode, link); put_device(con_dev); if (!own_link || ret == -EAGAIN) continue; __fwnode_link_del(link); } } /** * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device * @dev: The consumer device that needs to be linked to its suppliers * @fwnode: Root of the fwnode tree that is used to create device links * * This function looks at all the supplier fwnodes of fwnode tree rooted at * @fwnode and creates device links between @dev (consumer) and all the * supplier devices of the entire fwnode tree at @fwnode. * * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev * and the real suppliers of @dev. Once these device links are created, the * fwnode links are deleted. * * In addition, it also looks at all the suppliers of the entire fwnode tree * because some of the child devices of @dev that have not been added yet * (because @dev hasn't probed) might already have their suppliers added to * driver core. So, this function creates SYNC_STATE_ONLY device links between * @dev (consumer) and these suppliers to make sure they don't execute their * sync_state() callbacks before these child devices have a chance to create * their device links. The fwnode links that correspond to the child devices * aren't delete because they are needed later to create the device links * between the real consumer and supplier devices. */ static void __fw_devlink_link_to_suppliers(struct device *dev, struct fwnode_handle *fwnode) { bool own_link = (dev->fwnode == fwnode); struct fwnode_link *link, *tmp; struct fwnode_handle *child = NULL; list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { int ret; struct fwnode_handle *sup = link->supplier; ret = fw_devlink_create_devlink(dev, sup, link); if (!own_link || ret == -EAGAIN) continue; __fwnode_link_del(link); } /* * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of * all the descendants. This proxy link step is needed to handle the * case where the supplier is added before the consumer's parent device * (@dev). */ while ((child = fwnode_get_next_available_child_node(fwnode, child))) __fw_devlink_link_to_suppliers(dev, child); } static void fw_devlink_link_device(struct device *dev) { struct fwnode_handle *fwnode = dev->fwnode; if (!fw_devlink_flags) return; fw_devlink_parse_fwtree(fwnode); guard(mutex)(&fwnode_link_lock); __fw_devlink_link_to_consumers(dev); __fw_devlink_link_to_suppliers(dev, fwnode); } /* Device links support end. */ static struct kobject *dev_kobj; /* /sys/dev/char */ static struct kobject *sysfs_dev_char_kobj; /* /sys/dev/block */ static struct kobject *sysfs_dev_block_kobj; static DEFINE_MUTEX(device_hotplug_lock); void lock_device_hotplug(void) { mutex_lock(&device_hotplug_lock); } void unlock_device_hotplug(void) { mutex_unlock(&device_hotplug_lock); } int lock_device_hotplug_sysfs(void) { if (mutex_trylock(&device_hotplug_lock)) return 0; /* Avoid busy looping (5 ms of sleep should do). */ msleep(5); return restart_syscall(); } #ifdef CONFIG_BLOCK static inline int device_is_not_partition(struct device *dev) { return !(dev->type == &part_type); } #else static inline int device_is_not_partition(struct device *dev) { return 1; } #endif static void device_platform_notify(struct device *dev) { acpi_device_notify(dev); software_node_notify(dev); } static void device_platform_notify_remove(struct device *dev) { software_node_notify_remove(dev); acpi_device_notify_remove(dev); } /** * dev_driver_string - Return a device's driver name, if at all possible * @dev: struct device to get the name of * * Will return the device's driver's name if it is bound to a device. If * the device is not bound to a driver, it will return the name of the bus * it is attached to. If it is not attached to a bus either, an empty * string will be returned. */ const char *dev_driver_string(const struct device *dev) { struct device_driver *drv; /* dev->driver can change to NULL underneath us because of unbinding, * so be careful about accessing it. dev->bus and dev->class should * never change once they are set, so they don't need special care. */ drv = READ_ONCE(dev->driver); return drv ? drv->name : dev_bus_name(dev); } EXPORT_SYMBOL(dev_driver_string); #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dev_attr = to_dev_attr(attr); struct device *dev = kobj_to_dev(kobj); ssize_t ret = -EIO; if (dev_attr->show) ret = dev_attr->show(dev, dev_attr, buf); if (ret >= (ssize_t)PAGE_SIZE) { printk("dev_attr_show: %pS returned bad count\n", dev_attr->show); } return ret; } static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dev_attr = to_dev_attr(attr); struct device *dev = kobj_to_dev(kobj); ssize_t ret = -EIO; if (dev_attr->store) ret = dev_attr->store(dev, dev_attr, buf, count); return ret; } static const struct sysfs_ops dev_sysfs_ops = { .show = dev_attr_show, .store = dev_attr_store, }; #define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct dev_ext_attribute *ea = to_ext_attr(attr); int ret; unsigned long new; ret = kstrtoul(buf, 0, &new); if (ret) return ret; *(unsigned long *)(ea->var) = new; /* Always return full write size even if we didn't consume all */ return size; } EXPORT_SYMBOL_GPL(device_store_ulong); ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, char *buf) { struct dev_ext_attribute *ea = to_ext_attr(attr); return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); } EXPORT_SYMBOL_GPL(device_show_ulong); ssize_t device_store_int(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct dev_ext_attribute *ea = to_ext_attr(attr); int ret; long new; ret = kstrtol(buf, 0, &new); if (ret) return ret; if (new > INT_MAX || new < INT_MIN) return -EINVAL; *(int *)(ea->var) = new; /* Always return full write size even if we didn't consume all */ return size; } EXPORT_SYMBOL_GPL(device_store_int); ssize_t device_show_int(struct device *dev, struct device_attribute *attr, char *buf) { struct dev_ext_attribute *ea = to_ext_attr(attr); return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); } EXPORT_SYMBOL_GPL(device_show_int); ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct dev_ext_attribute *ea = to_ext_attr(attr); if (kstrtobool(buf, ea->var) < 0) return -EINVAL; return size; } EXPORT_SYMBOL_GPL(device_store_bool); ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, char *buf) { struct dev_ext_attribute *ea = to_ext_attr(attr); return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); } EXPORT_SYMBOL_GPL(device_show_bool); ssize_t device_show_string(struct device *dev, struct device_attribute *attr, char *buf) { struct dev_ext_attribute *ea = to_ext_attr(attr); return sysfs_emit(buf, "%s\n", (char *)ea->var); } EXPORT_SYMBOL_GPL(device_show_string); /** * device_release - free device structure. * @kobj: device's kobject. * * This is called once the reference count for the object * reaches 0. We forward the call to the device's release * method, which should handle actually freeing the structure. */ static void device_release(struct kobject *kobj) { struct device *dev = kobj_to_dev(kobj); struct device_private *p = dev->p; /* * Some platform devices are driven without driver attached * and managed resources may have been acquired. Make sure * all resources are released. * * Drivers still can add resources into device after device * is deleted but alive, so release devres here to avoid * possible memory leak. */ devres_release_all(dev); kfree(dev->dma_range_map); if (dev->release) dev->release(dev); else if (dev->type && dev->type->release) dev->type->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); else WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", dev_name(dev)); kfree(p); } static const void *device_namespace(const struct kobject *kobj) { const struct device *dev = kobj_to_dev(kobj); const void *ns = NULL; if (dev->class && dev->class->namespace) ns = dev->class->namespace(dev); return ns; } static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid) { const struct device *dev = kobj_to_dev(kobj); if (dev->class && dev->class->get_ownership) dev->class->get_ownership(dev, uid, gid); } static const struct kobj_type device_ktype = { .release = device_release, .sysfs_ops = &dev_sysfs_ops, .namespace = device_namespace, .get_ownership = device_get_ownership, }; static int dev_uevent_filter(const struct kobject *kobj) { const struct kobj_type *ktype = get_ktype(kobj); if (ktype == &device_ktype) { const struct device *dev = kobj_to_dev(kobj); if (dev->bus) return 1; if (dev->class) return 1; } return 0; } static const char *dev_uevent_name(const struct kobject *kobj) { const struct device *dev = kobj_to_dev(kobj); if (dev->bus) return dev->bus->name; if (dev->class) return dev->class->name; return NULL; } /* * Try filling "DRIVER=<name>" uevent variable for a device. Because this * function may race with binding and unbinding the device from a driver, * we need to be careful. Binding is generally safe, at worst we miss the * fact that the device is already bound to a driver (but the driver * information that is delivered through uevents is best-effort, it may * become obsolete as soon as it is generated anyways). Unbinding is more * risky as driver pointer is transitioning to NULL, so READ_ONCE() should * be used to make sure we are dealing with the same pointer, and to * ensure that driver structure is not going to disappear from under us * we take bus' drivers klist lock. The assumption that only registered * driver can be bound to a device, and to unregister a driver bus code * will take the same lock. */ static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env) { struct subsys_private *sp = bus_to_subsys(dev->bus); if (sp) { scoped_guard(spinlock, &sp->klist_drivers.k_lock) { struct device_driver *drv = READ_ONCE(dev->driver); if (drv) add_uevent_var(env, "DRIVER=%s", drv->name); } subsys_put(sp); } } static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct device *dev = kobj_to_dev(kobj); int retval = 0; /* add device node properties if present */ if (MAJOR(dev->devt)) { const char *tmp; const char *name; umode_t mode = 0; kuid_t uid = GLOBAL_ROOT_UID; kgid_t gid = GLOBAL_ROOT_GID; add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); if (name) { add_uevent_var(env, "DEVNAME=%s", name); if (mode) add_uevent_var(env, "DEVMODE=%#o", mode & 0777); if (!uid_eq(uid, GLOBAL_ROOT_UID)) add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); if (!gid_eq(gid, GLOBAL_ROOT_GID)) add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); kfree(tmp); } } if (dev->type && dev->type->name) add_uevent_var(env, "DEVTYPE=%s", dev->type->name); /* Add "DRIVER=%s" variable if the device is bound to a driver */ dev_driver_uevent(dev, env); /* Add common DT information about the device */ of_device_uevent(dev, env); /* have the bus specific function add its stuff */ if (dev->bus && dev->bus->uevent) { retval = dev->bus->uevent(dev, env); if (retval) pr_debug("device: '%s': %s: bus uevent() returned %d\n", dev_name(dev), __func__, retval); } /* have the class specific function add its stuff */ if (dev->class && dev->class->dev_uevent) { retval = dev->class->dev_uevent(dev, env); if (retval) pr_debug("device: '%s': %s: class uevent() " "returned %d\n", dev_name(dev), __func__, retval); } /* have the device type specific function add its stuff */ if (dev->type && dev->type->uevent) { retval = dev->type->uevent(dev, env); if (retval) pr_debug("device: '%s': %s: dev_type uevent() " "returned %d\n", dev_name(dev), __func__, retval); } return retval; } static const struct kset_uevent_ops device_uevent_ops = { .filter = dev_uevent_filter, .name = dev_uevent_name, .uevent = dev_uevent, }; static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, char *buf) { struct kobject *top_kobj; struct kset *kset; struct kobj_uevent_env *env = NULL; int i; int len = 0; int retval; /* search the kset, the device belongs to */ top_kobj = &dev->kobj; while (!top_kobj->kset && top_kobj->parent) top_kobj = top_kobj->parent; if (!top_kobj->kset) goto out; kset = top_kobj->kset; if (!kset->uevent_ops || !kset->uevent_ops->uevent) goto out; /* respect filter */ if (kset->uevent_ops && kset->uevent_ops->filter) if (!kset->uevent_ops->filter(&dev->kobj)) goto out; env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); if (!env) return -ENOMEM; /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); if (retval) goto out; /* copy keys to file */ for (i = 0; i < env->envp_idx; i++) len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); out: kfree(env); return len; } static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; rc = kobject_synth_uevent(&dev->kobj, buf, count); if (rc) { dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc); return rc; } return count; } static DEVICE_ATTR_RW(uevent); static ssize_t online_show(struct device *dev, struct device_attribute *attr, char *buf) { bool val; device_lock(dev); val = !dev->offline; device_unlock(dev); return sysfs_emit(buf, "%u\n", val); } static ssize_t online_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { bool val; int ret; ret = kstrtobool(buf, &val); if (ret < 0) return ret; ret = lock_device_hotplug_sysfs(); if (ret) return ret; ret = val ? device_online(dev) : device_offline(dev); unlock_device_hotplug(); return ret < 0 ? ret : count; } static DEVICE_ATTR_RW(online); static ssize_t removable_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *loc; switch (dev->removable) { case DEVICE_REMOVABLE: loc = "removable"; break; case DEVICE_FIXED: loc = "fixed"; break; default: loc = "unknown"; } return sysfs_emit(buf, "%s\n", loc); } static DEVICE_ATTR_RO(removable); int device_add_groups(struct device *dev, const struct attribute_group **groups) { return sysfs_create_groups(&dev->kobj, groups); } EXPORT_SYMBOL_GPL(device_add_groups); void device_remove_groups(struct device *dev, const struct attribute_group **groups) { sysfs_remove_groups(&dev->kobj, groups); } EXPORT_SYMBOL_GPL(device_remove_groups); union device_attr_group_devres { const struct attribute_group *group; const struct attribute_group **groups; }; static void devm_attr_group_remove(struct device *dev, void *res) { union device_attr_group_devres *devres = res; const struct attribute_group *group = devres->group; dev_dbg(dev, "%s: removing group %p\n", __func__, group); sysfs_remove_group(&dev->kobj, group); } /** * devm_device_add_group - given a device, create a managed attribute group * @dev: The device to create the group for * @grp: The attribute group to create * * This function creates a group for the first time. It will explicitly * warn and error if any of the attribute files being created already exist. * * Returns 0 on success or error code on failure. */ int devm_device_add_group(struct device *dev, const struct attribute_group *grp) { union device_attr_group_devres *devres; int error; devres = devres_alloc(devm_attr_group_remove, sizeof(*devres), GFP_KERNEL); if (!devres) return -ENOMEM; error = sysfs_create_group(&dev->kobj, grp); if (error) { devres_free(devres); return error; } devres->group = grp; devres_add(dev, devres); return 0; } EXPORT_SYMBOL_GPL(devm_device_add_group); static int device_add_attrs(struct device *dev) { const struct class *class = dev->class; const struct device_type *type = dev->type; int error; if (class) { error = device_add_groups(dev, class->dev_groups); if (error) return error; } if (type) { error = device_add_groups(dev, type->groups); if (error) goto err_remove_class_groups; } error = device_add_groups(dev, dev->groups); if (error) goto err_remove_type_groups; if (device_supports_offline(dev) && !dev->offline_disabled) { error = device_create_file(dev, &dev_attr_online); if (error) goto err_remove_dev_groups; } if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) { error = device_create_file(dev, &dev_attr_waiting_for_supplier); if (error) goto err_remove_dev_online; } if (dev_removable_is_valid(dev)) { error = device_create_file(dev, &dev_attr_removable); if (error) goto err_remove_dev_waiting_for_supplier; } if (dev_add_physical_location(dev)) { error = device_add_group(dev, &dev_attr_physical_location_group); if (error) goto err_remove_dev_removable; } return 0; err_remove_dev_removable: device_remove_file(dev, &dev_attr_removable); err_remove_dev_waiting_for_supplier: device_remove_file(dev, &dev_attr_waiting_for_supplier); err_remove_dev_online: device_remove_file(dev, &dev_attr_online); err_remove_dev_groups: device_remove_groups(dev, dev->groups); err_remove_type_groups: if (type) device_remove_groups(dev, type->groups); err_remove_class_groups: if (class) device_remove_groups(dev, class->dev_groups); return error; } static void device_remove_attrs(struct device *dev) { const struct class *class = dev->class; const struct device_type *type = dev->type; if (dev->physical_location) { device_remove_group(dev, &dev_attr_physical_location_group); kfree(dev->physical_location); } device_remove_file(dev, &dev_attr_removable); device_remove_file(dev, &dev_attr_waiting_for_supplier); device_remove_file(dev, &dev_attr_online); device_remove_groups(dev, dev->groups); if (type) device_remove_groups(dev, type->groups); if (class) device_remove_groups(dev, class->dev_groups); } static ssize_t dev_show(struct device *dev, struct device_attribute *attr, char *buf) { return print_dev_t(buf, dev->devt); } static DEVICE_ATTR_RO(dev); /* /sys/devices/ */ struct kset *devices_kset; /** * devices_kset_move_before - Move device in the devices_kset's list. * @deva: Device to move. * @devb: Device @deva should come before. */ static void devices_kset_move_before(struct device *deva, struct device *devb) { if (!devices_kset) return; pr_debug("devices_kset: Moving %s before %s\n", dev_name(deva), dev_name(devb)); spin_lock(&devices_kset->list_lock); list_move_tail(&deva->kobj.entry, &devb->kobj.entry); spin_unlock(&devices_kset->list_lock); } /** * devices_kset_move_after - Move device in the devices_kset's list. * @deva: Device to move * @devb: Device @deva should come after. */ static void devices_kset_move_after(struct device *deva, struct device *devb) { if (!devices_kset) return; pr_debug("devices_kset: Moving %s after %s\n", dev_name(deva), dev_name(devb)); spin_lock(&devices_kset->list_lock); list_move(&deva->kobj.entry, &devb->kobj.entry); spin_unlock(&devices_kset->list_lock); } /** * devices_kset_move_last - move the device to the end of devices_kset's list. * @dev: device to move */ void devices_kset_move_last(struct device *dev) { if (!devices_kset) return; pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); spin_lock(&devices_kset->list_lock); list_move_tail(&dev->kobj.entry, &devices_kset->list); spin_unlock(&devices_kset->list_lock); } /** * device_create_file - create sysfs attribute file for device. * @dev: device. * @attr: device attribute descriptor. */ int device_create_file(struct device *dev, const struct device_attribute *attr) { int error = 0; if (dev) { WARN(((attr->attr.mode & S_IWUGO) && !attr->store), "Attribute %s: write permission without 'store'\n", attr->attr.name); WARN(((attr->attr.mode & S_IRUGO) && !attr->show), "Attribute %s: read permission without 'show'\n", attr->attr.name); error = sysfs_create_file(&dev->kobj, &attr->attr); } return error; } EXPORT_SYMBOL_GPL(device_create_file); /** * device_remove_file - remove sysfs attribute file. * @dev: device. * @attr: device attribute descriptor. */ void device_remove_file(struct device *dev, const struct device_attribute *attr) { if (dev) sysfs_remove_file(&dev->kobj, &attr->attr); } EXPORT_SYMBOL_GPL(device_remove_file); /** * device_remove_file_self - remove sysfs attribute file from its own method. * @dev: device. * @attr: device attribute descriptor. * * See kernfs_remove_self() for details. */ bool device_remove_file_self(struct device *dev, const struct device_attribute *attr) { if (dev) return sysfs_remove_file_self(&dev->kobj, &attr->attr); else return false; } EXPORT_SYMBOL_GPL(device_remove_file_self); /** * device_create_bin_file - create sysfs binary attribute file for device. * @dev: device. * @attr: device binary attribute descriptor. */ int device_create_bin_file(struct device *dev, const struct bin_attribute *attr) { int error = -EINVAL; if (dev) error = sysfs_create_bin_file(&dev->kobj, attr); return error; } EXPORT_SYMBOL_GPL(device_create_bin_file); /** * device_remove_bin_file - remove sysfs binary attribute file * @dev: device. * @attr: device binary attribute descriptor. */ void device_remove_bin_file(struct device *dev, const struct bin_attribute *attr) { if (dev) sysfs_remove_bin_file(&dev->kobj, attr); } EXPORT_SYMBOL_GPL(device_remove_bin_file); static void klist_children_get(struct klist_node *n) { struct device_private *p = to_device_private_parent(n); struct device *dev = p->device; get_device(dev); } static void klist_children_put(struct klist_node *n) { struct device_private *p = to_device_private_parent(n); struct device *dev = p->device; put_device(dev); } /** * device_initialize - init device structure. * @dev: device. * * This prepares the device for use by other layers by initializing * its fields. * It is the first half of device_register(), if called by * that function, though it can also be called separately, so one * may use @dev's fields. In particular, get_device()/put_device() * may be used for reference counting of @dev after calling this * function. * * All fields in @dev must be initialized by the caller to 0, except * for those explicitly set to some other value. The simplest * approach is to use kzalloc() to allocate the structure containing * @dev. * * NOTE: Use put_device() to give up your reference instead of freeing * @dev directly once you have called this function. */ void device_initialize(struct device *dev) { dev->kobj.kset = devices_kset; kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); mutex_init(&dev->mutex); lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); device_pm_init(dev); set_dev_node(dev, NUMA_NO_NODE); INIT_LIST_HEAD(&dev->links.consumers); INIT_LIST_HEAD(&dev->links.suppliers); INIT_LIST_HEAD(&dev->links.defer_sync); dev->links.status = DL_DEV_NO_DRIVER; #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) dev->dma_coherent = dma_default_coherent; #endif swiotlb_dev_init(dev); } EXPORT_SYMBOL_GPL(device_initialize); struct kobject *virtual_device_parent(void) { static struct kobject *virtual_dir = NULL; if (!virtual_dir) virtual_dir = kobject_create_and_add("virtual", &devices_kset->kobj); return virtual_dir; } struct class_dir { struct kobject kobj; const struct class *class; }; #define to_class_dir(obj) container_of(obj, struct class_dir, kobj) static void class_dir_release(struct kobject *kobj) { struct class_dir *dir = to_class_dir(kobj); kfree(dir); } static const struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *kobj) { const struct class_dir *dir = to_class_dir(kobj); return dir->class->ns_type; } static const struct kobj_type class_dir_ktype = { .release = class_dir_release, .sysfs_ops = &kobj_sysfs_ops, .child_ns_type = class_dir_child_ns_type }; static struct kobject *class_dir_create_and_add(struct subsys_private *sp, struct kobject *parent_kobj) { struct class_dir *dir; int retval; dir = kzalloc(sizeof(*dir), GFP_KERNEL); if (!dir) return ERR_PTR(-ENOMEM); dir->class = sp->class; kobject_init(&dir->kobj, &class_dir_ktype); dir->kobj.kset = &sp->glue_dirs; retval = kobject_add(&dir->kobj, parent_kobj, "%s", sp->class->name); if (retval < 0) { kobject_put(&dir->kobj); return ERR_PTR(retval); } return &dir->kobj; } static DEFINE_MUTEX(gdp_mutex); static struct kobject *get_device_parent(struct device *dev, struct device *parent) { struct subsys_private *sp = class_to_subsys(dev->class); struct kobject *kobj = NULL; if (sp) { struct kobject *parent_kobj; struct kobject *k; /* * If we have no parent, we live in "virtual". * Class-devices with a non class-device as parent, live * in a "glue" directory to prevent namespace collisions. */ if (parent == NULL) parent_kobj = virtual_device_parent(); else if (parent->class && !dev->class->ns_type) { subsys_put(sp); return &parent->kobj; } else { parent_kobj = &parent->kobj; } mutex_lock(&gdp_mutex); /* find our class-directory at the parent and reference it */ spin_lock(&sp->glue_dirs.list_lock); list_for_each_entry(k, &sp->glue_dirs.list, entry) if (k->parent == parent_kobj) { kobj = kobject_get(k); break; } spin_unlock(&sp->glue_dirs.list_lock); if (kobj) { mutex_unlock(&gdp_mutex); subsys_put(sp); return kobj; } /* or create a new class-directory at the parent device */ k = class_dir_create_and_add(sp, parent_kobj); /* do not emit an uevent for this simple "glue" directory */ mutex_unlock(&gdp_mutex); subsys_put(sp); return k; } /* subsystems can specify a default root directory for their devices */ if (!parent && dev->bus) { struct device *dev_root = bus_get_dev_root(dev->bus); if (dev_root) { kobj = &dev_root->kobj; put_device(dev_root); return kobj; } } if (parent) return &parent->kobj; return NULL; } static inline bool live_in_glue_dir(struct kobject *kobj, struct device *dev) { struct subsys_private *sp; bool retval; if (!kobj || !dev->class) return false; sp = class_to_subsys(dev->class); if (!sp) return false; if (kobj->kset == &sp->glue_dirs) retval = true; else retval = false; subsys_put(sp); return retval; } static inline struct kobject *get_glue_dir(struct device *dev) { return dev->kobj.parent; } /** * kobject_has_children - Returns whether a kobject has children. * @kobj: the object to test * * This will return whether a kobject has other kobjects as children. * * It does NOT account for the presence of attribute files, only sub * directories. It also assumes there is no concurrent addition or * removal of such children, and thus relies on external locking. */ static inline bool kobject_has_children(struct kobject *kobj) { WARN_ON_ONCE(kref_read(&kobj->kref) == 0); return kobj->sd && kobj->sd->dir.subdirs; } /* * make sure cleaning up dir as the last step, we need to make * sure .release handler of kobject is run with holding the * global lock */ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) { unsigned int ref; /* see if we live in a "glue" directory */ if (!live_in_glue_dir(glue_dir, dev)) return; mutex_lock(&gdp_mutex); /** * There is a race condition between removing glue directory * and adding a new device under the glue directory. * * CPU1: CPU2: * * device_add() * get_device_parent() * class_dir_create_and_add() * kobject_add_internal() * create_dir() // create glue_dir * * device_add() * get_device_parent() * kobject_get() // get glue_dir * * device_del() * cleanup_glue_dir() * kobject_del(glue_dir) * * kobject_add() * kobject_add_internal() * create_dir() // in glue_dir * sysfs_create_dir_ns() * kernfs_create_dir_ns(sd) * * sysfs_remove_dir() // glue_dir->sd=NULL * sysfs_put() // free glue_dir->sd * * // sd is freed * kernfs_new_node(sd) * kernfs_get(glue_dir) * kernfs_add_one() * kernfs_put() * * Before CPU1 remove last child device under glue dir, if CPU2 add * a new device under glue dir, the glue_dir kobject reference count * will be increase to 2 in kobject_get(k). And CPU2 has been called * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() * and sysfs_put(). This result in glue_dir->sd is freed. * * Then the CPU2 will see a stale "empty" but still potentially used * glue dir around in kernfs_new_node(). * * In order to avoid this happening, we also should make sure that * kernfs_node for glue_dir is released in CPU1 only when refcount * for glue_dir kobj is 1. */ ref = kref_read(&glue_dir->kref); if (!kobject_has_children(glue_dir) && !--ref) kobject_del(glue_dir); kobject_put(glue_dir); mutex_unlock(&gdp_mutex); } static int device_add_class_symlinks(struct device *dev) { struct device_node *of_node = dev_of_node(dev); struct subsys_private *sp; int error; if (of_node) { error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); if (error) dev_warn(dev, "Error %d creating of_node link\n",error); /* An error here doesn't warrant bringing down the device */ } sp = class_to_subsys(dev->class); if (!sp) return 0; error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem"); if (error) goto out_devnode; if (dev->parent && device_is_not_partition(dev)) { error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, "device"); if (error) goto out_subsys; } /* link in the class directory pointing to the device */ error = sysfs_create_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev)); if (error) goto out_device; goto exit; out_device: sysfs_remove_link(&dev->kobj, "device"); out_subsys: sysfs_remove_link(&dev->kobj, "subsystem"); out_devnode: sysfs_remove_link(&dev->kobj, "of_node"); exit: subsys_put(sp); return error; } static void device_remove_class_symlinks(struct device *dev) { struct subsys_private *sp = class_to_subsys(dev->class); if (dev_of_node(dev)) sysfs_remove_link(&dev->kobj, "of_node"); if (!sp) return; if (dev->parent && device_is_not_partition(dev)) sysfs_remove_link(&dev->kobj, "device"); sysfs_remove_link(&dev->kobj, "subsystem"); sysfs_delete_link(&sp->subsys.kobj, &dev->kobj, dev_name(dev)); subsys_put(sp); } /** * dev_set_name - set a device name * @dev: device * @fmt: format string for the device's name */ int dev_set_name(struct device *dev, const char *fmt, ...) { va_list vargs; int err; va_start(vargs, fmt); err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); va_end(vargs); return err; } EXPORT_SYMBOL_GPL(dev_set_name); /* select a /sys/dev/ directory for the device */ static struct kobject *device_to_dev_kobj(struct device *dev) { if (is_blockdev(dev)) return sysfs_dev_block_kobj; else return sysfs_dev_char_kobj; } static int device_create_sys_dev_entry(struct device *dev) { struct kobject *kobj = device_to_dev_kobj(dev); int error = 0; char devt_str[15]; if (kobj) { format_dev_t(devt_str, dev->devt); error = sysfs_create_link(kobj, &dev->kobj, devt_str); } return error; } static void device_remove_sys_dev_entry(struct device *dev) { struct kobject *kobj = device_to_dev_kobj(dev); char devt_str[15]; if (kobj) { format_dev_t(devt_str, dev->devt); sysfs_remove_link(kobj, devt_str); } } static int device_private_init(struct device *dev) { dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); if (!dev->p) return -ENOMEM; dev->p->device = dev; klist_init(&dev->p->klist_children, klist_children_get, klist_children_put); INIT_LIST_HEAD(&dev->p->deferred_probe); return 0; } /** * device_add - add device to device hierarchy. * @dev: device. * * This is part 2 of device_register(), though may be called * separately _iff_ device_initialize() has been called separately. * * This adds @dev to the kobject hierarchy via kobject_add(), adds it * to the global and sibling lists for the device, then * adds it to the other relevant subsystems of the driver model. * * Do not call this routine or device_register() more than once for * any device structure. The driver model core is not designed to work * with devices that get unregistered and then spring back to life. * (Among other things, it's very hard to guarantee that all references * to the previous incarnation of @dev have been dropped.) Allocate * and register a fresh new struct device instead. * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use put_device() to give up your * reference instead. * * Rule of thumb is: if device_add() succeeds, you should call * device_del() when you want to get rid of it. If device_add() has * *not* succeeded, use *only* put_device() to drop the reference * count. */ int device_add(struct device *dev) { struct subsys_private *sp; struct device *parent; struct kobject *kobj; struct class_interface *class_intf; int error = -EINVAL; struct kobject *glue_dir = NULL; dev = get_device(dev); if (!dev) goto done; if (!dev->p) { error = device_private_init(dev); if (error) goto done; } /* * for statically allocated devices, which should all be converted * some day, we need to initialize the name. We prevent reading back * the name, and force the use of dev_name() */ if (dev->init_name) { error = dev_set_name(dev, "%s", dev->init_name); dev->init_name = NULL; } if (dev_name(dev)) error = 0; /* subsystems can specify simple device enumeration */ else if (dev->bus && dev->bus->dev_name) error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); else error = -EINVAL; if (error) goto name_error; pr_debug("device: '%s': %s\n", dev_name(dev), __func__); parent = get_device(dev->parent); kobj = get_device_parent(dev, parent); if (IS_ERR(kobj)) { error = PTR_ERR(kobj); goto parent_error; } if (kobj) dev->kobj.parent = kobj; /* use parent numa_node */ if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) set_dev_node(dev, dev_to_node(parent)); /* first, register with generic layer. */ /* we require the name to be set before, and pass NULL */ error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); if (error) { glue_dir = kobj; goto Error; } /* notify platform of device entry */ device_platform_notify(dev); error = device_create_file(dev, &dev_attr_uevent); if (error) goto attrError; error = device_add_class_symlinks(dev); if (error) goto SymlinkError; error = device_add_attrs(dev); if (error) goto AttrsError; error = bus_add_device(dev); if (error) goto BusError; error = dpm_sysfs_add(dev); if (error) goto DPMError; device_pm_add(dev); if (MAJOR(dev->devt)) { error = device_create_file(dev, &dev_attr_dev); if (error) goto DevAttrError; error = device_create_sys_dev_entry(dev); if (error) goto SysEntryError; devtmpfs_create_node(dev); } /* Notify clients of device addition. This call must come * after dpm_sysfs_add() and before kobject_uevent(). */ bus_notify(dev, BUS_NOTIFY_ADD_DEVICE); kobject_uevent(&dev->kobj, KOBJ_ADD); /* * Check if any of the other devices (consumers) have been waiting for * this device (supplier) to be added so that they can create a device * link to it. * * This needs to happen after device_pm_add() because device_link_add() * requires the supplier be registered before it's called. * * But this also needs to happen before bus_probe_device() to make sure * waiting consumers can link to it before the driver is bound to the * device and the driver sync_state callback is called for this device. */ if (dev->fwnode && !dev->fwnode->dev) { dev->fwnode->dev = dev; fw_devlink_link_device(dev); } bus_probe_device(dev); /* * If all driver registration is done and a newly added device doesn't * match with any driver, don't block its consumers from probing in * case the consumer device is able to operate without this supplier. */ if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match) fw_devlink_unblock_consumers(dev); if (parent) klist_add_tail(&dev->p->knode_parent, &parent->p->klist_children); sp = class_to_subsys(dev->class); if (sp) { mutex_lock(&sp->mutex); /* tie the class to the device */ klist_add_tail(&dev->p->knode_class, &sp->klist_devices); /* notify any interfaces that the device is here */ list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->add_dev) class_intf->add_dev(dev); mutex_unlock(&sp->mutex); subsys_put(sp); } done: put_device(dev); return error; SysEntryError: if (MAJOR(dev->devt)) device_remove_file(dev, &dev_attr_dev); DevAttrError: device_pm_remove(dev); dpm_sysfs_remove(dev); DPMError: device_set_driver(dev, NULL); bus_remove_device(dev); BusError: device_remove_attrs(dev); AttrsError: device_remove_class_symlinks(dev); SymlinkError: device_remove_file(dev, &dev_attr_uevent); attrError: device_platform_notify_remove(dev); kobject_uevent(&dev->kobj, KOBJ_REMOVE); glue_dir = get_glue_dir(dev); kobject_del(&dev->kobj); Error: cleanup_glue_dir(dev, glue_dir); parent_error: put_device(parent); name_error: kfree(dev->p); dev->p = NULL; goto done; } EXPORT_SYMBOL_GPL(device_add); /** * device_register - register a device with the system. * @dev: pointer to the device structure * * This happens in two clean steps - initialize the device * and add it to the system. The two steps can be called * separately, but this is the easiest and most common. * I.e. you should only call the two helpers separately if * have a clearly defined need to use and refcount the device * before it is added to the hierarchy. * * For more information, see the kerneldoc for device_initialize() * and device_add(). * * NOTE: _Never_ directly free @dev after calling this function, even * if it returned an error! Always use put_device() to give up the * reference initialized in this function instead. */ int device_register(struct device *dev) { device_initialize(dev); return device_add(dev); } EXPORT_SYMBOL_GPL(device_register); /** * get_device - increment reference count for device. * @dev: device. * * This simply forwards the call to kobject_get(), though * we do take care to provide for the case that we get a NULL * pointer passed in. */ struct device *get_device(struct device *dev) { return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; } EXPORT_SYMBOL_GPL(get_device); /** * put_device - decrement reference count. * @dev: device in question. */ void put_device(struct device *dev) { /* might_sleep(); */ if (dev) kobject_put(&dev->kobj); } EXPORT_SYMBOL_GPL(put_device); bool kill_device(struct device *dev) { /* * Require the device lock and set the "dead" flag to guarantee that * the update behavior is consistent with the other bitfields near * it and that we cannot have an asynchronous probe routine trying * to run while we are tearing out the bus/class/sysfs from * underneath the device. */ device_lock_assert(dev); if (dev->p->dead) return false; dev->p->dead = true; return true; } EXPORT_SYMBOL_GPL(kill_device); /** * device_del - delete device from system. * @dev: device. * * This is the first part of the device unregistration * sequence. This removes the device from the lists we control * from here, has it removed from the other driver model * subsystems it was added to in device_add(), and removes it * from the kobject hierarchy. * * NOTE: this should be called manually _iff_ device_add() was * also called manually. */ void device_del(struct device *dev) { struct subsys_private *sp; struct device *parent = dev->parent; struct kobject *glue_dir = NULL; struct class_interface *class_intf; unsigned int noio_flag; device_lock(dev); kill_device(dev); device_unlock(dev); if (dev->fwnode && dev->fwnode->dev == dev) dev->fwnode->dev = NULL; /* Notify clients of device removal. This call must come * before dpm_sysfs_remove(). */ noio_flag = memalloc_noio_save(); bus_notify(dev, BUS_NOTIFY_DEL_DEVICE); dpm_sysfs_remove(dev); if (parent) klist_del(&dev->p->knode_parent); if (MAJOR(dev->devt)) { devtmpfs_delete_node(dev); device_remove_sys_dev_entry(dev); device_remove_file(dev, &dev_attr_dev); } sp = class_to_subsys(dev->class); if (sp) { device_remove_class_symlinks(dev); mutex_lock(&sp->mutex); /* notify any interfaces that the device is now gone */ list_for_each_entry(class_intf, &sp->interfaces, node) if (class_intf->remove_dev) class_intf->remove_dev(dev); /* remove the device from the class list */ klist_del(&dev->p->knode_class); mutex_unlock(&sp->mutex); subsys_put(sp); } device_remove_file(dev, &dev_attr_uevent); device_remove_attrs(dev); bus_remove_device(dev); device_pm_remove(dev); driver_deferred_probe_del(dev); device_platform_notify_remove(dev); device_links_purge(dev); /* * If a device does not have a driver attached, we need to clean * up any managed resources. We do this in device_release(), but * it's never called (and we leak the device) if a managed * resource holds a reference to the device. So release all * managed resources here, like we do in driver_detach(). We * still need to do so again in device_release() in case someone * adds a new resource after this point, though. */ devres_release_all(dev); bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE); kobject_uevent(&dev->kobj, KOBJ_REMOVE); glue_dir = get_glue_dir(dev); kobject_del(&dev->kobj); cleanup_glue_dir(dev, glue_dir); memalloc_noio_restore(noio_flag); put_device(parent); } EXPORT_SYMBOL_GPL(device_del); /** * device_unregister - unregister device from system. * @dev: device going away. * * We do this in two parts, like we do device_register(). First, * we remove it from all the subsystems with device_del(), then * we decrement the reference count via put_device(). If that * is the final reference count, the device will be cleaned up * via device_release() above. Otherwise, the structure will * stick around until the final reference to the device is dropped. */ void device_unregister(struct device *dev) { pr_debug("device: '%s': %s\n", dev_name(dev), __func__); device_del(dev); put_device(dev); } EXPORT_SYMBOL_GPL(device_unregister); static struct device *prev_device(struct klist_iter *i) { struct klist_node *n = klist_prev(i); struct device *dev = NULL; struct device_private *p; if (n) { p = to_device_private_parent(n); dev = p->device; } return dev; } static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); struct device *dev = NULL; struct device_private *p; if (n) { p = to_device_private_parent(n); dev = p->device; } return dev; } /** * device_get_devnode - path of device node file * @dev: device * @mode: returned file access mode * @uid: returned file owner * @gid: returned file group * @tmp: possibly allocated string * * Return the relative path of a possible device node. * Non-default names may need to allocate a memory to compose * a name. This memory is returned in tmp and needs to be * freed by the caller. */ const char *device_get_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid, const char **tmp) { char *s; *tmp = NULL; /* the device type may provide a specific name */ if (dev->type && dev->type->devnode) *tmp = dev->type->devnode(dev, mode, uid, gid); if (*tmp) return *tmp; /* the class may provide a specific name */ if (dev->class && dev->class->devnode) *tmp = dev->class->devnode(dev, mode); if (*tmp) return *tmp; /* return name without allocation, tmp == NULL */ if (strchr(dev_name(dev), '!') == NULL) return dev_name(dev); /* replace '!' in the name with '/' */ s = kstrdup_and_replace(dev_name(dev), '!', '/', GFP_KERNEL); if (!s) return NULL; return *tmp = s; } /** * device_for_each_child - device child iterator. * @parent: parent struct device. * @fn: function to be called for each device. * @data: data for the callback. * * Iterate over @parent's child devices, and call @fn for each, * passing it @data. * * We check the return of @fn each time. If it returns anything * other than 0, we break out and return that value. */ int device_for_each_child(struct device *parent, void *data, device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; if (!parent || !parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); while (!error && (child = next_device(&i))) error = fn(child, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(device_for_each_child); /** * device_for_each_child_reverse - device child iterator in reversed order. * @parent: parent struct device. * @fn: function to be called for each device. * @data: data for the callback. * * Iterate over @parent's child devices, and call @fn for each, * passing it @data. * * We check the return of @fn each time. If it returns anything * other than 0, we break out and return that value. */ int device_for_each_child_reverse(struct device *parent, void *data, device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; if (!parent || !parent->p) return 0; klist_iter_init(&parent->p->klist_children, &i); while ((child = prev_device(&i)) && !error) error = fn(child, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(device_for_each_child_reverse); /** * device_for_each_child_reverse_from - device child iterator in reversed order. * @parent: parent struct device. * @from: optional starting point in child list * @fn: function to be called for each device. * @data: data for the callback. * * Iterate over @parent's child devices, starting at @from, and call @fn * for each, passing it @data. This helper is identical to * device_for_each_child_reverse() when @from is NULL. * * @fn is checked each iteration. If it returns anything other than 0, * iteration stop and that value is returned to the caller of * device_for_each_child_reverse_from(); */ int device_for_each_child_reverse_from(struct device *parent, struct device *from, void *data, device_iter_t fn) { struct klist_iter i; struct device *child; int error = 0; if (!parent || !parent->p) return 0; klist_iter_init_node(&parent->p->klist_children, &i, (from ? &from->p->knode_parent : NULL)); while ((child = prev_device(&i)) && !error) error = fn(child, data); klist_iter_exit(&i); return error; } EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from); /** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device * @match: Callback function to check device * @data: Data to pass to match function * * This is similar to the device_for_each_child() function above, but it * returns a reference to a device that is 'found' for later use, as * determined by the @match callback. * * The callback should return 0 if the device doesn't match and non-zero * if it does. If the callback returns non-zero and a reference to the * current device can be obtained, this function will return to the caller * and not iterate over any more devices. * * NOTE: you will need to drop the reference with put_device() after use. */ struct device *device_find_child(struct device *parent, const void *data, device_match_t match) { struct klist_iter i; struct device *child; if (!parent || !parent->p) return NULL; klist_iter_init(&parent->p->klist_children, &i); while ((child = next_device(&i))) { if (match(child, data)) { get_device(child); break; } } klist_iter_exit(&i); return child; } EXPORT_SYMBOL_GPL(device_find_child); int __init devices_init(void) { devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); if (!devices_kset) return -ENOMEM; dev_kobj = kobject_create_and_add("dev", NULL); if (!dev_kobj) goto dev_kobj_err; sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); if (!sysfs_dev_block_kobj) goto block_kobj_err; sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); if (!sysfs_dev_char_kobj) goto char_kobj_err; device_link_wq = alloc_workqueue("device_link_wq", 0, 0); if (!device_link_wq) goto wq_err; return 0; wq_err: kobject_put(sysfs_dev_char_kobj); char_kobj_err: kobject_put(sysfs_dev_block_kobj); block_kobj_err: kobject_put(dev_kobj); dev_kobj_err: kset_unregister(devices_kset); return -ENOMEM; } static int device_check_offline(struct device *dev, void *not_used) { int ret; ret = device_for_each_child(dev, NULL, device_check_offline); if (ret) return ret; return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; } /** * device_offline - Prepare the device for hot-removal. * @dev: Device to be put offline. * * Execute the device bus type's .offline() callback, if present, to prepare * the device for a subsequent hot-removal. If that succeeds, the device must * not be used until either it is removed or its bus type's .online() callback * is executed. * * Call under device_hotplug_lock. */ int device_offline(struct device *dev) { int ret; if (dev->offline_disabled) return -EPERM; ret = device_for_each_child(dev, NULL, device_check_offline); if (ret) return ret; device_lock(dev); if (device_supports_offline(dev)) { if (dev->offline) { ret = 1; } else { ret = dev->bus->offline(dev); if (!ret) { kobject_uevent(&dev->kobj, KOBJ_OFFLINE); dev->offline = true; } } } device_unlock(dev); return ret; } /** * device_online - Put the device back online after successful device_offline(). * @dev: Device to be put back online. * * If device_offline() has been successfully executed for @dev, but the device * has not been removed subsequently, execute its bus type's .online() callback * to indicate that the device can be used again. * * Call under device_hotplug_lock. */ int device_online(struct device *dev) { int ret = 0; device_lock(dev); if (device_supports_offline(dev)) { if (dev->offline) { ret = dev->bus->online(dev); if (!ret) { kobject_uevent(&dev->kobj, KOBJ_ONLINE); dev->offline = false; } } else { ret = 1; } } device_unlock(dev); return ret; } struct root_device { struct device dev; struct module *owner; }; static inline struct root_device *to_root_device(struct device *d) { return container_of(d, struct root_device, dev); } static void root_device_release(struct device *dev) { kfree(to_root_device(dev)); } /** * __root_device_register - allocate and register a root device * @name: root device name * @owner: owner module of the root device, usually THIS_MODULE * * This function allocates a root device and registers it * using device_register(). In order to free the returned * device, use root_device_unregister(). * * Root devices are dummy devices which allow other devices * to be grouped under /sys/devices. Use this function to * allocate a root device and then use it as the parent of * any device which should appear under /sys/devices/{name} * * The /sys/devices/{name} directory will also contain a * 'module' symlink which points to the @owner directory * in sysfs. * * Returns &struct device pointer on success, or ERR_PTR() on error. * * Note: You probably want to use root_device_register(). */ struct device *__root_device_register(const char *name, struct module *owner) { struct root_device *root; int err = -ENOMEM; root = kzalloc(sizeof(struct root_device), GFP_KERNEL); if (!root) return ERR_PTR(err); err = dev_set_name(&root->dev, "%s", name); if (err) { kfree(root); return ERR_PTR(err); } root->dev.release = root_device_release; err = device_register(&root->dev); if (err) { put_device(&root->dev); return ERR_PTR(err); } #ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ if (owner) { struct module_kobject *mk = &owner->mkobj; err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); if (err) { device_unregister(&root->dev); return ERR_PTR(err); } root->owner = owner; } #endif return &root->dev; } EXPORT_SYMBOL_GPL(__root_device_register); /** * root_device_unregister - unregister and free a root device * @dev: device going away * * This function unregisters and cleans up a device that was created by * root_device_register(). */ void root_device_unregister(struct device *dev) { struct root_device *root = to_root_device(dev); if (root->owner) sysfs_remove_link(&root->dev.kobj, "module"); device_unregister(dev); } EXPORT_SYMBOL_GPL(root_device_unregister); static void device_create_release(struct device *dev) { pr_debug("device: '%s': %s\n", dev_name(dev), __func__); kfree(dev); } static __printf(6, 0) struct device * device_create_groups_vargs(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; if (IS_ERR_OR_NULL(class)) goto error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } device_initialize(dev); dev->devt = devt; dev->class = class; dev->parent = parent; dev->groups = groups; dev->release = device_create_release; dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); if (retval) goto error; retval = device_add(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } /** * device_create - creates a device and registers it with sysfs * @class: pointer to the struct class that this device should be registered to * @parent: pointer to the parent struct device of this new device, if any * @devt: the dev_t for the char device to be added * @drvdata: the data to be added to the device for callbacks * @fmt: string for the device's name * * This function can be used by char device classes. A struct device * will be created in sysfs, registered to the specified class. * * A "dev" file will be created, showing the dev_t for the device, if * the dev_t is not 0,0. * If a pointer to a parent struct device is passed in, the newly created * struct device will be a child of that device in sysfs. * The pointer to the struct device will be returned from the call. * Any further sysfs files that might be required can be created using this * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. */ struct device *device_create(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { va_list vargs; struct device *dev; va_start(vargs, fmt); dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, fmt, vargs); va_end(vargs); return dev; } EXPORT_SYMBOL_GPL(device_create); /** * device_create_with_groups - creates a device and registers it with sysfs * @class: pointer to the struct class that this device should be registered to * @parent: pointer to the parent struct device of this new device, if any * @devt: the dev_t for the char device to be added * @drvdata: the data to be added to the device for callbacks * @groups: NULL-terminated list of attribute groups to be created * @fmt: string for the device's name * * This function can be used by char device classes. A struct device * will be created in sysfs, registered to the specified class. * Additional attributes specified in the groups parameter will also * be created automatically. * * A "dev" file will be created, showing the dev_t for the device, if * the dev_t is not 0,0. * If a pointer to a parent struct device is passed in, the newly created * struct device will be a child of that device in sysfs. * The pointer to the struct device will be returned from the call. * Any further sysfs files that might be required can be created using this * pointer. * * Returns &struct device pointer on success, or ERR_PTR() on error. */ struct device *device_create_with_groups(const struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, ...) { va_list vargs; struct device *dev; va_start(vargs, fmt); dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, fmt, vargs); va_end(vargs); return dev; } EXPORT_SYMBOL_GPL(device_create_with_groups); /** * device_destroy - removes a device that was created with device_create() * @class: pointer to the struct class that this device was registered with * @devt: the dev_t of the device that was previously registered * * This call unregisters and cleans up a device that was created with a * call to device_create(). */ void device_destroy(const struct class *class, dev_t devt) { struct device *dev; dev = class_find_device_by_devt(class, devt); if (dev) { put_device(dev); device_unregister(dev); } } EXPORT_SYMBOL_GPL(device_destroy); /** * device_rename - renames a device * @dev: the pointer to the struct device to be renamed * @new_name: the new name of the device * * It is the responsibility of the caller to provide mutual * exclusion between two different calls of device_rename * on the same device to ensure that new_name is valid and * won't conflict with other devices. * * Note: given that some subsystems (networking and infiniband) use this * function, with no immediate plans for this to change, we cannot assume or * require that this function not be called at all. * * However, if you're writing new code, do not call this function. The following * text from Kay Sievers offers some insight: * * Renaming devices is racy at many levels, symlinks and other stuff are not * replaced atomically, and you get a "move" uevent, but it's not easy to * connect the event to the old and new device. Device nodes are not renamed at * all, there isn't even support for that in the kernel now. * * In the meantime, during renaming, your target name might be taken by another * driver, creating conflicts. Or the old name is taken directly after you * renamed it -- then you get events for the same DEVPATH, before you even see * the "move" event. It's just a mess, and nothing new should ever rely on * kernel device renaming. Besides that, it's not even implemented now for * other things than (driver-core wise very simple) network devices. * * Make up a "real" name in the driver before you register anything, or add * some other attributes for userspace to find the device, or use udev to add * symlinks -- but never rename kernel devices later, it's a complete mess. We * don't even want to get into that and try to implement the missing pieces in * the core. We really have other pieces to fix in the driver core mess. :) */ int device_rename(struct device *dev, const char *new_name) { struct subsys_private *sp = NULL; struct kobject *kobj = &dev->kobj; char *old_device_name = NULL; int error; bool is_link_renamed = false; dev = get_device(dev); if (!dev) return -EINVAL; dev_dbg(dev, "renaming to %s\n", new_name); old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); if (!old_device_name) { error = -ENOMEM; goto out; } if (dev->class) { sp = class_to_subsys(dev->class); if (!sp) { error = -EINVAL; goto out; } error = sysfs_rename_link_ns(&sp->subsys.kobj, kobj, old_device_name, new_name, kobject_namespace(kobj)); if (error) goto out; is_link_renamed = true; } error = kobject_rename(kobj, new_name); out: if (error && is_link_renamed) sysfs_rename_link_ns(&sp->subsys.kobj, kobj, new_name, old_device_name, kobject_namespace(kobj)); subsys_put(sp); put_device(dev); kfree(old_device_name); return error; } EXPORT_SYMBOL_GPL(device_rename); static int device_move_class_links(struct device *dev, struct device *old_parent, struct device *new_parent) { int error = 0; if (old_parent) sysfs_remove_link(&dev->kobj, "device"); if (new_parent) error = sysfs_create_link(&dev->kobj, &new_parent->kobj, "device"); return error; } /** * device_move - moves a device to a new parent * @dev: the pointer to the struct device to be moved * @new_parent: the new parent of the device (can be NULL) * @dpm_order: how to reorder the dpm_list */ int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order) { int error; struct device *old_parent; struct kobject *new_parent_kobj; dev = get_device(dev); if (!dev) return -EINVAL; device_pm_lock(); new_parent = get_device(new_parent); new_parent_kobj = get_device_parent(dev, new_parent); if (IS_ERR(new_parent_kobj)) { error = PTR_ERR(new_parent_kobj); put_device(new_parent); goto out; } pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), __func__, new_parent ? dev_name(new_parent) : "<NULL>"); error = kobject_move(&dev->kobj, new_parent_kobj); if (error) { cleanup_glue_dir(dev, new_parent_kobj); put_device(new_parent); goto out; } old_parent = dev->parent; dev->parent = new_parent; if (old_parent) klist_remove(&dev->p->knode_parent); if (new_parent) { klist_add_tail(&dev->p->knode_parent, &new_parent->p->klist_children); set_dev_node(dev, dev_to_node(new_parent)); } if (dev->class) { error = device_move_class_links(dev, old_parent, new_parent); if (error) { /* We ignore errors on cleanup since we're hosed anyway... */ device_move_class_links(dev, new_parent, old_parent); if (!kobject_move(&dev->kobj, &old_parent->kobj)) { if (new_parent) klist_remove(&dev->p->knode_parent); dev->parent = old_parent; if (old_parent) { klist_add_tail(&dev->p->knode_parent, &old_parent->p->klist_children); set_dev_node(dev, dev_to_node(old_parent)); } } cleanup_glue_dir(dev, new_parent_kobj); put_device(new_parent); goto out; } } switch (dpm_order) { case DPM_ORDER_NONE: break; case DPM_ORDER_DEV_AFTER_PARENT: device_pm_move_after(dev, new_parent); devices_kset_move_after(dev, new_parent); break; case DPM_ORDER_PARENT_BEFORE_DEV: device_pm_move_before(new_parent, dev); devices_kset_move_before(new_parent, dev); break; case DPM_ORDER_DEV_LAST: device_pm_move_last(dev); devices_kset_move_last(dev); break; } put_device(old_parent); out: device_pm_unlock(); put_device(dev); return error; } EXPORT_SYMBOL_GPL(device_move); static int device_attrs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { struct kobject *kobj = &dev->kobj; const struct class *class = dev->class; const struct device_type *type = dev->type; int error; if (class) { /* * Change the device groups of the device class for @dev to * @kuid/@kgid. */ error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, kgid); if (error) return error; } if (type) { /* * Change the device groups of the device type for @dev to * @kuid/@kgid. */ error = sysfs_groups_change_owner(kobj, type->groups, kuid, kgid); if (error) return error; } /* Change the device groups of @dev to @kuid/@kgid. */ error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); if (error) return error; if (device_supports_offline(dev) && !dev->offline_disabled) { /* Change online device attributes of @dev to @kuid/@kgid. */ error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, kuid, kgid); if (error) return error; } return 0; } /** * device_change_owner - change the owner of an existing device. * @dev: device. * @kuid: new owner's kuid * @kgid: new owner's kgid * * This changes the owner of @dev and its corresponding sysfs entries to * @kuid/@kgid. This function closely mirrors how @dev was added via driver * core. * * Returns 0 on success or error code on failure. */ int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { int error; struct kobject *kobj = &dev->kobj; struct subsys_private *sp; dev = get_device(dev); if (!dev) return -EINVAL; /* * Change the kobject and the default attributes and groups of the * ktype associated with it to @kuid/@kgid. */ error = sysfs_change_owner(kobj, kuid, kgid); if (error) goto out; /* * Change the uevent file for @dev to the new owner. The uevent file * was created in a separate step when @dev got added and we mirror * that step here. */ error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, kgid); if (error) goto out; /* * Change the device groups, the device groups associated with the * device class, and the groups associated with the device type of @dev * to @kuid/@kgid. */ error = device_attrs_change_owner(dev, kuid, kgid); if (error) goto out; error = dpm_sysfs_change_owner(dev, kuid, kgid); if (error) goto out; /* * Change the owner of the symlink located in the class directory of * the device class associated with @dev which points to the actual * directory entry for @dev to @kuid/@kgid. This ensures that the * symlink shows the same permissions as its target. */ sp = class_to_subsys(dev->class); if (!sp) { error = -EINVAL; goto out; } error = sysfs_link_change_owner(&sp->subsys.kobj, &dev->kobj, dev_name(dev), kuid, kgid); subsys_put(sp); out: put_device(dev); return error; } EXPORT_SYMBOL_GPL(device_change_owner); /** * device_shutdown - call ->shutdown() on each device to shutdown. */ void device_shutdown(void) { struct device *dev, *parent; wait_for_device_probe(); device_block_probing(); cpufreq_suspend(); spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. * Beware that device unplug events may also start pulling * devices offline, even as the system is shutting down. */ while (!list_empty(&devices_kset->list)) { dev = list_entry(devices_kset->list.prev, struct device, kobj.entry); /* * hold reference count of device's parent to * prevent it from being freed because parent's * lock is to be held */ parent = get_device(dev->parent); get_device(dev); /* * Make sure the device is off the kset list, in the * event that dev->*->shutdown() doesn't remove it. */ list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); /* hold lock to avoid race with probe/release */ if (parent) device_lock(parent); device_lock(dev); /* Don't allow any more runtime suspends */ pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); if (dev->class && dev->class->shutdown_pre) { if (initcall_debug) dev_info(dev, "shutdown_pre\n"); dev->class->shutdown_pre(dev); } if (dev->bus && dev->bus->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->bus->shutdown(dev); } else if (dev->driver && dev->driver->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->driver->shutdown(dev); } device_unlock(dev); if (parent) device_unlock(parent); put_device(dev); put_device(parent); spin_lock(&devices_kset->list_lock); } spin_unlock(&devices_kset->list_lock); } /* * Device logging functions */ #ifdef CONFIG_PRINTK static void set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) { const char *subsys; memset(dev_info, 0, sizeof(*dev_info)); if (dev->class) subsys = dev->class->name; else if (dev->bus) subsys = dev->bus->name; else return; strscpy(dev_info->subsystem, subsys); /* * Add device identifier DEVICE=: * b12:8 block dev_t * c127:3 char dev_t * n8 netdev ifindex * +sound:card0 subsystem:devname */ if (MAJOR(dev->devt)) { char c; if (strcmp(subsys, "block") == 0) c = 'b'; else c = 'c'; snprintf(dev_info->device, sizeof(dev_info->device), "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt)); } else if (strcmp(subsys, "net") == 0) { struct net_device *net = to_net_dev(dev); snprintf(dev_info->device, sizeof(dev_info->device), "n%u", net->ifindex); } else { snprintf(dev_info->device, sizeof(dev_info->device), "+%s:%s", subsys, dev_name(dev)); } } int dev_vprintk_emit(int level, const struct device *dev, const char *fmt, va_list args) { struct dev_printk_info dev_info; set_dev_info(dev, &dev_info); return vprintk_emit(0, level, &dev_info, fmt, args); } EXPORT_SYMBOL(dev_vprintk_emit); int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = dev_vprintk_emit(level, dev, fmt, args); va_end(args); return r; } EXPORT_SYMBOL(dev_printk_emit); static void __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) { if (dev) dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", dev_driver_string(dev), dev_name(dev), vaf); else printk("%s(NULL device *): %pV", level, vaf); } void _dev_printk(const char *level, const struct device *dev, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; __dev_printk(level, dev, &vaf); va_end(args); } EXPORT_SYMBOL(_dev_printk); #define define_dev_printk_level(func, kern_level) \ void func(const struct device *dev, const char *fmt, ...) \ { \ struct va_format vaf; \ va_list args; \ \ va_start(args, fmt); \ \ vaf.fmt = fmt; \ vaf.va = &args; \ \ __dev_printk(kern_level, dev, &vaf); \ \ va_end(args); \ } \ EXPORT_SYMBOL(func); define_dev_printk_level(_dev_emerg, KERN_EMERG); define_dev_printk_level(_dev_alert, KERN_ALERT); define_dev_printk_level(_dev_crit, KERN_CRIT); define_dev_printk_level(_dev_err, KERN_ERR); define_dev_printk_level(_dev_warn, KERN_WARNING); define_dev_printk_level(_dev_notice, KERN_NOTICE); define_dev_printk_level(_dev_info, KERN_INFO); #endif static void __dev_probe_failed(const struct device *dev, int err, bool fatal, const char *fmt, va_list vargsp) { struct va_format vaf; va_list vargs; /* * On x86_64 and possibly on other architectures, va_list is actually a * size-1 array containing a structure. As a result, function parameter * vargsp decays from T[1] to T*, and &vargsp has type T** rather than * T(*)[1], which is expected by its assignment to vaf.va below. * * One standard way to solve this mess is by creating a copy in a local * variable of type va_list and then using a pointer to that local copy * instead, which is the approach employed here. */ va_copy(vargs, vargsp); vaf.fmt = fmt; vaf.va = &vargs; switch (err) { case -EPROBE_DEFER: device_set_deferred_probe_reason(dev, &vaf); dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); break; case -ENOMEM: /* Don't print anything on -ENOMEM, there's already enough output */ break; default: /* Log fatal final failures as errors, otherwise produce warnings */ if (fatal) dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); else dev_warn(dev, "error %pe: %pV", ERR_PTR(err), &vaf); break; } va_end(vargs); } /** * dev_err_probe - probe error check and log helper * @dev: the pointer to the struct device * @err: error value to test * @fmt: printf-style format string * @...: arguments as specified in the format string * * This helper implements common pattern present in probe functions for error * checking: print debug or error message depending if the error value is * -EPROBE_DEFER and propagate error upwards. * In case of -EPROBE_DEFER it sets also defer probe reason, which can be * checked later by reading devices_deferred debugfs attribute. * It replaces the following code sequence:: * * if (err != -EPROBE_DEFER) * dev_err(dev, ...); * else * dev_dbg(dev, ...); * return err; * * with:: * * return dev_err_probe(dev, err, ...); * * Using this helper in your probe function is totally fine even if @err * is known to never be -EPROBE_DEFER. * The benefit compared to a normal dev_err() is the standardized format * of the error code, which is emitted symbolically (i.e. you get "EAGAIN" * instead of "-35"), and having the error code returned allows more * compact error paths. * * Returns @err. */ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); /* Use dev_err() for logging when err doesn't equal -EPROBE_DEFER */ __dev_probe_failed(dev, err, true, fmt, vargs); va_end(vargs); return err; } EXPORT_SYMBOL_GPL(dev_err_probe); /** * dev_warn_probe - probe error check and log helper * @dev: the pointer to the struct device * @err: error value to test * @fmt: printf-style format string * @...: arguments as specified in the format string * * This helper implements common pattern present in probe functions for error * checking: print debug or warning message depending if the error value is * -EPROBE_DEFER and propagate error upwards. * In case of -EPROBE_DEFER it sets also defer probe reason, which can be * checked later by reading devices_deferred debugfs attribute. * It replaces the following code sequence:: * * if (err != -EPROBE_DEFER) * dev_warn(dev, ...); * else * dev_dbg(dev, ...); * return err; * * with:: * * return dev_warn_probe(dev, err, ...); * * Using this helper in your probe function is totally fine even if @err * is known to never be -EPROBE_DEFER. * The benefit compared to a normal dev_warn() is the standardized format * of the error code, which is emitted symbolically (i.e. you get "EAGAIN" * instead of "-35"), and having the error code returned allows more * compact error paths. * * Returns @err. */ int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); /* Use dev_warn() for logging when err doesn't equal -EPROBE_DEFER */ __dev_probe_failed(dev, err, false, fmt, vargs); va_end(vargs); return err; } EXPORT_SYMBOL_GPL(dev_warn_probe); static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) { return fwnode && !IS_ERR(fwnode->secondary); } /** * set_primary_fwnode - Change the primary firmware node of a given device. * @dev: Device to handle. * @fwnode: New primary firmware node of the device. * * Set the device's firmware node pointer to @fwnode, but if a secondary * firmware node of the device is present, preserve it. * * Valid fwnode cases are: * - primary --> secondary --> -ENODEV * - primary --> NULL * - secondary --> -ENODEV * - NULL */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { struct device *parent = dev->parent; struct fwnode_handle *fn = dev->fwnode; if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; if (fn) { WARN_ON(fwnode->secondary); fwnode->secondary = fn; } dev->fwnode = fwnode; } else { if (fwnode_is_primary(fn)) { dev->fwnode = fn->secondary; /* Skip nullifying fn->secondary if the primary is shared */ if (parent && fn == parent->fwnode) return; /* Set fn->secondary = NULL, so fn remains the primary fwnode */ fn->secondary = NULL; } else { dev->fwnode = NULL; } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); /** * set_secondary_fwnode - Change the secondary firmware node of a given device. * @dev: Device to handle. * @fwnode: New secondary firmware node of the device. * * If a primary firmware node of the device is present, set its secondary * pointer to @fwnode. Otherwise, set the device's firmware node pointer to * @fwnode. */ void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { if (fwnode) fwnode->secondary = ERR_PTR(-ENODEV); if (fwnode_is_primary(dev->fwnode)) dev->fwnode->secondary = fwnode; else dev->fwnode = fwnode; } EXPORT_SYMBOL_GPL(set_secondary_fwnode); /** * device_remove_of_node - Remove an of_node from a device * @dev: device whose device tree node is being removed */ void device_remove_of_node(struct device *dev) { dev = get_device(dev); if (!dev) return; if (!dev->of_node) goto end; if (dev->fwnode == of_fwnode_handle(dev->of_node)) dev->fwnode = NULL; of_node_put(dev->of_node); dev->of_node = NULL; end: put_device(dev); } EXPORT_SYMBOL_GPL(device_remove_of_node); /** * device_add_of_node - Add an of_node to an existing device * @dev: device whose device tree node is being added * @of_node: of_node to add * * Return: 0 on success or error code on failure. */ int device_add_of_node(struct device *dev, struct device_node *of_node) { int ret; if (!of_node) return -EINVAL; dev = get_device(dev); if (!dev) return -EINVAL; if (dev->of_node) { dev_err(dev, "Cannot replace node %pOF with %pOF\n", dev->of_node, of_node); ret = -EBUSY; goto end; } dev->of_node = of_node_get(of_node); if (!dev->fwnode) dev->fwnode = of_fwnode_handle(of_node); ret = 0; end: put_device(dev); return ret; } EXPORT_SYMBOL_GPL(device_add_of_node); /** * device_set_of_node_from_dev - reuse device-tree node of another device * @dev: device whose device-tree node is being set * @dev2: device whose device-tree node is being reused * * Takes another reference to the new device-tree node after first dropping * any reference held to the old node. */ void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) { of_node_put(dev->of_node); dev->of_node = of_node_get(dev2->of_node); dev->of_node_reused = true; } EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); void device_set_node(struct device *dev, struct fwnode_handle *fwnode) { dev->fwnode = fwnode; dev->of_node = to_of_node(fwnode); } EXPORT_SYMBOL_GPL(device_set_node); int device_match_name(struct device *dev, const void *name) { return sysfs_streq(dev_name(dev), name); } EXPORT_SYMBOL_GPL(device_match_name); int device_match_type(struct device *dev, const void *type) { return dev->type == type; } EXPORT_SYMBOL_GPL(device_match_type); int device_match_of_node(struct device *dev, const void *np) { return np && dev->of_node == np; } EXPORT_SYMBOL_GPL(device_match_of_node); int device_match_fwnode(struct device *dev, const void *fwnode) { return fwnode && dev_fwnode(dev) == fwnode; } EXPORT_SYMBOL_GPL(device_match_fwnode); int device_match_devt(struct device *dev, const void *pdevt) { return dev->devt == *(dev_t *)pdevt; } EXPORT_SYMBOL_GPL(device_match_devt); int device_match_acpi_dev(struct device *dev, const void *adev) { return adev && ACPI_COMPANION(dev) == adev; } EXPORT_SYMBOL(device_match_acpi_dev); int device_match_acpi_handle(struct device *dev, const void *handle) { return handle && ACPI_HANDLE(dev) == handle; } EXPORT_SYMBOL(device_match_acpi_handle); int device_match_any(struct device *dev, const void *unused) { return 1; } EXPORT_SYMBOL_GPL(device_match_any); |
174 174 402 228 402 402 174 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007-2008 Oliver Neukum * Copyright (c) 2006-2012 Jiri Kosina * Copyright (c) 2012 Henrik Rydberg */ /* */ #include <linux/module.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/unaligned.h> #include <asm/byteorder.h> #include <linux/hid.h> static struct hid_driver hid_generic; static int __check_hid_generic(struct device_driver *drv, void *data) { struct hid_driver *hdrv = to_hid_driver(drv); struct hid_device *hdev = data; if (hdrv == &hid_generic) return 0; return hid_match_device(hdev, hdrv) != NULL; } static bool hid_generic_match(struct hid_device *hdev, bool ignore_special_driver) { if (ignore_special_driver) return true; if (hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER) return true; if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER) return false; /* * If any other driver wants the device, leave the device to this other * driver. */ if (bus_for_each_drv(&hid_bus_type, NULL, hdev, __check_hid_generic)) return false; return true; } static int hid_generic_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= HID_QUIRK_INPUT_PER_APP; ret = hid_parse(hdev); if (ret) return ret; return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } static const struct hid_device_id hid_table[] = { { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, hid_table); static struct hid_driver hid_generic = { .name = "hid-generic", .id_table = hid_table, .match = hid_generic_match, .probe = hid_generic_probe, }; module_hid_driver(hid_generic); MODULE_AUTHOR("Henrik Rydberg"); MODULE_DESCRIPTION("HID generic driver"); MODULE_LICENSE("GPL"); |
6844 7257 7248 25657 25645 7251 7273 7248 4984 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 | // SPDX-License-Identifier: GPL-2.0-or-later /* * printk_safe.c - Safe printk for printk-deadlock-prone contexts */ #include <linux/preempt.h> #include <linux/kdb.h> #include <linux/smp.h> #include <linux/cpumask.h> #include <linux/printk.h> #include <linux/kprobes.h> #include "internal.h" /* Context where printk messages are never suppressed */ static atomic_t force_con; void printk_force_console_enter(void) { atomic_inc(&force_con); } void printk_force_console_exit(void) { atomic_dec(&force_con); } bool is_printk_force_console(void) { return atomic_read(&force_con); } static DEFINE_PER_CPU(int, printk_context); /* Can be preempted by NMI. */ void __printk_safe_enter(void) { this_cpu_inc(printk_context); } /* Can be preempted by NMI. */ void __printk_safe_exit(void) { this_cpu_dec(printk_context); } void __printk_deferred_enter(void) { cant_migrate(); __printk_safe_enter(); } void __printk_deferred_exit(void) { cant_migrate(); __printk_safe_exit(); } bool is_printk_legacy_deferred(void) { /* * The per-CPU variable @printk_context can be read safely in any * context. CPU migration is always disabled when set. * * A context holding the printk_cpu_sync must not spin waiting for * another CPU. For legacy printing, it could be the console_lock * or the port lock. */ return (force_legacy_kthread() || this_cpu_read(printk_context) || in_nmi() || is_printk_cpu_sync_owner()); } asmlinkage int vprintk(const char *fmt, va_list args) { #ifdef CONFIG_KGDB_KDB /* Allow to pass printk() to kdb but avoid a recursion. */ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); #endif return vprintk_default(fmt, args); } EXPORT_SYMBOL(vprintk); |
4780 576 3546 13 1 1017 2869 2164 923 2167 2164 548 575 4322 19079 3435 775 1140 405 397 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Operations on the network namespace */ #ifndef __NET_NET_NAMESPACE_H #define __NET_NET_NAMESPACE_H #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/workqueue.h> #include <linux/list.h> #include <linux/sysctl.h> #include <linux/uidgid.h> #include <net/flow.h> #include <net/netns/core.h> #include <net/netns/mib.h> #include <net/netns/unix.h> #include <net/netns/packet.h> #include <net/netns/ipv4.h> #include <net/netns/ipv6.h> #include <net/netns/nexthop.h> #include <net/netns/ieee802154_6lowpan.h> #include <net/netns/sctp.h> #include <net/netns/netfilter.h> #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netns/conntrack.h> #endif #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) #include <net/netns/flow_table.h> #endif #include <net/netns/nftables.h> #include <net/netns/xfrm.h> #include <net/netns/mpls.h> #include <net/netns/can.h> #include <net/netns/xdp.h> #include <net/netns/smc.h> #include <net/netns/bpf.h> #include <net/netns/mctp.h> #include <net/net_trackers.h> #include <linux/ns_common.h> #include <linux/idr.h> #include <linux/skbuff.h> #include <linux/notifier.h> #include <linux/xarray.h> struct user_namespace; struct proc_dir_entry; struct net_device; struct sock; struct ctl_table_header; struct net_generic; struct uevent_sock; struct netns_ipvs; struct bpf_prog; #define NETDEV_HASHBITS 8 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) struct net { /* First cache line can be often dirtied. * Do not place here read-mostly fields. */ refcount_t passive; /* To decide when the network * namespace should be freed. */ spinlock_t rules_mod_lock; unsigned int dev_base_seq; /* protected by rtnl_mutex */ u32 ifindex; spinlock_t nsid_lock; atomic_t fnhe_genid; struct list_head list; /* list of network namespaces */ struct list_head exit_list; /* To linked to call pernet exit * methods on dead net ( * pernet_ops_rwsem read locked), * or to unregister pernet ops * (pernet_ops_rwsem write locked). */ struct llist_node defer_free_list; struct llist_node cleanup_list; /* namespaces on death row */ struct list_head ptype_all; struct list_head ptype_specific; #ifdef CONFIG_KEYS struct key_tag *key_domain; /* Key domain of operation tag */ #endif struct user_namespace *user_ns; /* Owning user namespace */ struct ucounts *ucounts; struct idr netns_ids; struct ns_common ns; struct ref_tracker_dir refcnt_tracker; struct ref_tracker_dir notrefcnt_tracker; /* tracker for objects not * refcounted against netns */ struct list_head dev_base_head; struct proc_dir_entry *proc_net; struct proc_dir_entry *proc_net_stat; #ifdef CONFIG_SYSCTL struct ctl_table_set sysctls; #endif struct sock *rtnl; /* rtnetlink socket */ struct sock *genl_sock; struct uevent_sock *uevent_sock; /* uevent socket */ struct hlist_head *dev_name_head; struct hlist_head *dev_index_head; struct xarray dev_by_index; struct raw_notifier_head netdev_chain; /* Note that @hash_mix can be read millions times per second, * it is critical that it is on a read_mostly cache line. */ u32 hash_mix; struct net_device *loopback_dev; /* The loopback */ /* core fib_rules */ struct list_head rules_ops; struct netns_core core; struct netns_mib mib; struct netns_packet packet; #if IS_ENABLED(CONFIG_UNIX) struct netns_unix unx; #endif struct netns_nexthop nexthop; struct netns_ipv4 ipv4; #if IS_ENABLED(CONFIG_IPV6) struct netns_ipv6 ipv6; #endif #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) struct netns_ieee802154_lowpan ieee802154_lowpan; #endif #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE) struct netns_sctp sctp; #endif #ifdef CONFIG_NETFILTER struct netns_nf nf; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct netns_ct ct; #endif #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) struct netns_nftables nft; #endif #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) struct netns_ft ft; #endif #endif #ifdef CONFIG_WEXT_CORE struct sk_buff_head wext_nlevents; #endif struct net_generic __rcu *gen; /* Used to store attached BPF programs */ struct netns_bpf bpf; /* Note : following structs are cache line aligned */ #ifdef CONFIG_XFRM struct netns_xfrm xfrm; #endif u64 net_cookie; /* written once */ #if IS_ENABLED(CONFIG_IP_VS) struct netns_ipvs *ipvs; #endif #if IS_ENABLED(CONFIG_MPLS) struct netns_mpls mpls; #endif #if IS_ENABLED(CONFIG_CAN) struct netns_can can; #endif #ifdef CONFIG_XDP_SOCKETS struct netns_xdp xdp; #endif #if IS_ENABLED(CONFIG_MCTP) struct netns_mctp mctp; #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) struct sock *crypto_nlsk; #endif struct sock *diag_nlsk; #if IS_ENABLED(CONFIG_SMC) struct netns_smc smc; #endif #ifdef CONFIG_DEBUG_NET_SMALL_RTNL /* Move to a better place when the config guard is removed. */ struct mutex rtnl_mutex; #endif } __randomize_layout; #include <linux/seq_file_net.h> /* Init's network namespace */ extern struct net init_net; #ifdef CONFIG_NET_NS struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, struct net *old_net); void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid); void net_ns_barrier(void); struct ns_common *get_net_ns(struct ns_common *ns); struct net *get_net_ns_by_fd(int fd); extern struct task_struct *cleanup_net_task; #else /* CONFIG_NET_NS */ #include <linux/sched.h> #include <linux/nsproxy.h> static inline struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, struct net *old_net) { if (flags & CLONE_NEWNET) return ERR_PTR(-EINVAL); return old_net; } static inline void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid) { *uid = GLOBAL_ROOT_UID; *gid = GLOBAL_ROOT_GID; } static inline void net_ns_barrier(void) {} static inline struct ns_common *get_net_ns(struct ns_common *ns) { return ERR_PTR(-EINVAL); } static inline struct net *get_net_ns_by_fd(int fd) { return ERR_PTR(-EINVAL); } #endif /* CONFIG_NET_NS */ extern struct list_head net_namespace_list; struct net *get_net_ns_by_pid(pid_t pid); #ifdef CONFIG_SYSCTL void ipx_register_sysctl(void); void ipx_unregister_sysctl(void); #else #define ipx_register_sysctl() #define ipx_unregister_sysctl() #endif #ifdef CONFIG_NET_NS void __put_net(struct net *net); /* Try using get_net_track() instead */ static inline struct net *get_net(struct net *net) { refcount_inc(&net->ns.count); return net; } static inline struct net *maybe_get_net(struct net *net) { /* Used when we know struct net exists but we * aren't guaranteed a previous reference count * exists. If the reference count is zero this * function fails and returns NULL. */ if (!refcount_inc_not_zero(&net->ns.count)) net = NULL; return net; } /* Try using put_net_track() instead */ static inline void put_net(struct net *net) { if (refcount_dec_and_test(&net->ns.count)) __put_net(net); } static inline int net_eq(const struct net *net1, const struct net *net2) { return net1 == net2; } static inline int check_net(const struct net *net) { return refcount_read(&net->ns.count) != 0; } void net_drop_ns(void *); void net_passive_dec(struct net *net); #else static inline struct net *get_net(struct net *net) { return net; } static inline void put_net(struct net *net) { } static inline struct net *maybe_get_net(struct net *net) { return net; } static inline int net_eq(const struct net *net1, const struct net *net2) { return 1; } static inline int check_net(const struct net *net) { return 1; } #define net_drop_ns NULL static inline void net_passive_dec(struct net *net) { refcount_dec(&net->passive); } #endif static inline void net_passive_inc(struct net *net) { refcount_inc(&net->passive); } /* Returns true if the netns initialization is completed successfully */ static inline bool net_initialized(const struct net *net) { return READ_ONCE(net->list.next); } static inline void __netns_tracker_alloc(struct net *net, netns_tracker *tracker, bool refcounted, gfp_t gfp) { #ifdef CONFIG_NET_NS_REFCNT_TRACKER ref_tracker_alloc(refcounted ? &net->refcnt_tracker : &net->notrefcnt_tracker, tracker, gfp); #endif } static inline void netns_tracker_alloc(struct net *net, netns_tracker *tracker, gfp_t gfp) { __netns_tracker_alloc(net, tracker, true, gfp); } static inline void __netns_tracker_free(struct net *net, netns_tracker *tracker, bool refcounted) { #ifdef CONFIG_NET_NS_REFCNT_TRACKER ref_tracker_free(refcounted ? &net->refcnt_tracker : &net->notrefcnt_tracker, tracker); #endif } static inline struct net *get_net_track(struct net *net, netns_tracker *tracker, gfp_t gfp) { get_net(net); netns_tracker_alloc(net, tracker, gfp); return net; } static inline void put_net_track(struct net *net, netns_tracker *tracker) { __netns_tracker_free(net, tracker, true); put_net(net); } typedef struct { #ifdef CONFIG_NET_NS struct net __rcu *net; #endif } possible_net_t; static inline void write_pnet(possible_net_t *pnet, struct net *net) { #ifdef CONFIG_NET_NS rcu_assign_pointer(pnet->net, net); #endif } static inline struct net *read_pnet(const possible_net_t *pnet) { #ifdef CONFIG_NET_NS return rcu_dereference_protected(pnet->net, true); #else return &init_net; #endif } static inline struct net *read_pnet_rcu(const possible_net_t *pnet) { #ifdef CONFIG_NET_NS return rcu_dereference(pnet->net); #else return &init_net; #endif } /* Protected by net_rwsem */ #define for_each_net(VAR) \ list_for_each_entry(VAR, &net_namespace_list, list) #define for_each_net_continue_reverse(VAR) \ list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list) #define for_each_net_rcu(VAR) \ list_for_each_entry_rcu(VAR, &net_namespace_list, list) #ifdef CONFIG_NET_NS #define __net_init #define __net_exit #define __net_initdata #define __net_initconst #else #define __net_init __init #define __net_exit __ref #define __net_initdata __initdata #define __net_initconst __initconst #endif int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp); int peernet2id(const struct net *net, struct net *peer); bool peernet_has_id(const struct net *net, struct net *peer); struct net *get_net_ns_by_id(const struct net *net, int id); struct pernet_operations { struct list_head list; /* * Below methods are called without any exclusive locks. * More than one net may be constructed and destructed * in parallel on several cpus. Every pernet_operations * have to keep in mind all other pernet_operations and * to introduce a locking, if they share common resources. * * The only time they are called with exclusive lock is * from register_pernet_subsys(), unregister_pernet_subsys() * register_pernet_device() and unregister_pernet_device(). * * Exit methods using blocking RCU primitives, such as * synchronize_rcu(), should be implemented via exit_batch. * Then, destruction of a group of net requires single * synchronize_rcu() related to these pernet_operations, * instead of separate synchronize_rcu() for every net. * Please, avoid synchronize_rcu() at all, where it's possible. * * Note that a combination of pre_exit() and exit() can * be used, since a synchronize_rcu() is guaranteed between * the calls. */ int (*init)(struct net *net); void (*pre_exit)(struct net *net); void (*exit)(struct net *net); void (*exit_batch)(struct list_head *net_exit_list); /* Following method is called with RTNL held. */ void (*exit_rtnl)(struct net *net, struct list_head *dev_kill_list); unsigned int * const id; const size_t size; }; /* * Use these carefully. If you implement a network device and it * needs per network namespace operations use device pernet operations, * otherwise use pernet subsys operations. * * Network interfaces need to be removed from a dying netns _before_ * subsys notifiers can be called, as most of the network code cleanup * (which is done from subsys notifiers) runs with the assumption that * dev_remove_pack has been called so no new packets will arrive during * and after the cleanup functions have been called. dev_remove_pack * is not per namespace so instead the guarantee of no more packets * arriving in a network namespace is provided by ensuring that all * network devices and all sockets have left the network namespace * before the cleanup methods are called. * * For the longest time the ipv4 icmp code was registered as a pernet * device which caused kernel oops, and panics during network * namespace cleanup. So please don't get this wrong. */ int register_pernet_subsys(struct pernet_operations *); void unregister_pernet_subsys(struct pernet_operations *); int register_pernet_device(struct pernet_operations *); void unregister_pernet_device(struct pernet_operations *); struct ctl_table; #define register_net_sysctl(net, path, table) \ register_net_sysctl_sz(net, path, table, ARRAY_SIZE(table)) #ifdef CONFIG_SYSCTL int net_sysctl_init(void); struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path, struct ctl_table *table, size_t table_size); void unregister_net_sysctl_table(struct ctl_table_header *header); #else static inline int net_sysctl_init(void) { return 0; } static inline struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path, struct ctl_table *table, size_t table_size) { return NULL; } static inline void unregister_net_sysctl_table(struct ctl_table_header *header) { } #endif static inline int rt_genid_ipv4(const struct net *net) { return atomic_read(&net->ipv4.rt_genid); } #if IS_ENABLED(CONFIG_IPV6) static inline int rt_genid_ipv6(const struct net *net) { return atomic_read(&net->ipv6.fib6_sernum); } #endif static inline void rt_genid_bump_ipv4(struct net *net) { atomic_inc(&net->ipv4.rt_genid); } extern void (*__fib6_flush_trees)(struct net *net); static inline void rt_genid_bump_ipv6(struct net *net) { if (__fib6_flush_trees) __fib6_flush_trees(net); } #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) static inline struct netns_ieee802154_lowpan * net_ieee802154_lowpan(struct net *net) { return &net->ieee802154_lowpan; } #endif /* For callers who don't really care about whether it's IPv4 or IPv6 */ static inline void rt_genid_bump_all(struct net *net) { rt_genid_bump_ipv4(net); rt_genid_bump_ipv6(net); } static inline int fnhe_genid(const struct net *net) { return atomic_read(&net->fnhe_genid); } static inline void fnhe_genid_bump(struct net *net) { atomic_inc(&net->fnhe_genid); } #ifdef CONFIG_NET void net_ns_init(void); #else static inline void net_ns_init(void) {} #endif #endif /* __NET_NET_NAMESPACE_H */ |
29 29 29 29 24 24 5 1 6 6 6 7 7 7 7 7 7 2 7 2 7 7 7 7 7 5 7 7 7 198 198 198 11 130 130 121 11 4 4 4 4 4 3 3 3 9 9 9 8 9 112 112 111 3 2 3 108 3 2 110 110 109 107 3 107 3 107 2 112 4 136 3 135 135 126 126 18 109 2 1 110 126 113 113 1 1 1 119 3 2 119 119 50 115 108 115 114 1 107 107 107 3 104 104 104 104 107 107 2 2 2 2 2 2 2 112 108 107 107 107 107 107 107 107 3 107 107 3 3 3 3 2 3 2 3 3 104 107 107 2 107 102 107 107 107 112 115 1 114 2 112 111 2 112 112 84 84 84 84 84 84 84 84 84 84 39 22 21 21 125 40 125 124 40 1 1 40 40 1 125 125 125 108 124 124 125 1 125 125 125 2 124 9 124 123 91 2 91 35 30 6 6 6 6 131 2 129 129 128 126 2 126 1 125 9 10 123 123 123 123 123 123 123 123 121 123 7 123 1 122 123 84 107 106 122 122 122 117 117 117 104 104 117 14 117 104 157 157 156 157 151 13 12 139 139 139 10 139 120 1 119 115 2 115 111 4 111 131 125 123 18 123 1 122 103 107 84 84 123 122 117 19 4 117 111 4 113 89 117 117 117 8 90 90 90 156 54 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 | // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <uapi/linux/magic.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/xattr.h> #include <linux/mount.h> #include <linux/parser.h> #include <linux/module.h> #include <linux/statfs.h> #include <linux/seq_file.h> #include <linux/posix_acl_xattr.h> #include <linux/exportfs.h> #include <linux/file.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include "overlayfs.h" #include "params.h" MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); struct ovl_dir_cache; static struct dentry *ovl_d_real(struct dentry *dentry, enum d_real_type type) { struct dentry *upper, *lower; int err; switch (type) { case D_REAL_DATA: case D_REAL_METADATA: break; default: goto bug; } if (!d_is_reg(dentry)) { /* d_real_inode() is only relevant for regular files */ return dentry; } upper = ovl_dentry_upper(dentry); if (upper && (type == D_REAL_METADATA || ovl_has_upperdata(d_inode(dentry)))) return upper; if (type == D_REAL_METADATA) { lower = ovl_dentry_lower(dentry); goto real_lower; } /* * Best effort lazy lookup of lowerdata for D_REAL_DATA case to return * the real lowerdata dentry. The only current caller of d_real() with * D_REAL_DATA is d_real_inode() from trace_uprobe and this caller is * likely going to be followed reading from the file, before placing * uprobes on offset within the file, so lowerdata should be available * when setting the uprobe. */ err = ovl_verify_lowerdata(dentry); if (err) goto bug; lower = ovl_dentry_lowerdata(dentry); if (!lower) goto bug; real_lower: /* Handle recursion into stacked lower fs */ return d_real(lower, type); bug: WARN(1, "%s(%pd4, %d): real dentry not found\n", __func__, dentry, type); return dentry; } static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak) { int ret = 1; if (!d) return 1; if (weak) { if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) ret = d->d_op->d_weak_revalidate(d, flags); } else if (d->d_flags & DCACHE_OP_REVALIDATE) { struct dentry *parent; struct inode *dir; struct name_snapshot n; if (flags & LOOKUP_RCU) { parent = READ_ONCE(d->d_parent); dir = d_inode_rcu(parent); if (!dir) return -ECHILD; } else { parent = dget_parent(d); dir = d_inode(parent); } take_dentry_name_snapshot(&n, d); ret = d->d_op->d_revalidate(dir, &n.name, d, flags); release_dentry_name_snapshot(&n); if (!(flags & LOOKUP_RCU)) dput(parent); if (!ret) { if (!(flags & LOOKUP_RCU)) d_invalidate(d); ret = -ESTALE; } } return ret; } static int ovl_dentry_revalidate_common(struct dentry *dentry, unsigned int flags, bool weak) { struct ovl_entry *oe; struct ovl_path *lowerstack; struct inode *inode = d_inode_rcu(dentry); struct dentry *upper; unsigned int i; int ret = 1; /* Careful in RCU mode */ if (!inode) return -ECHILD; oe = OVL_I_E(inode); lowerstack = ovl_lowerstack(oe); upper = ovl_i_dentry_upper(inode); if (upper) ret = ovl_revalidate_real(upper, flags, weak); for (i = 0; ret > 0 && i < ovl_numlower(oe); i++) ret = ovl_revalidate_real(lowerstack[i].dentry, flags, weak); return ret; } static int ovl_dentry_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, false); } static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, true); } static const struct dentry_operations ovl_dentry_operations = { .d_real = ovl_d_real, .d_revalidate = ovl_dentry_revalidate, .d_weak_revalidate = ovl_dentry_weak_revalidate, }; static struct kmem_cache *ovl_inode_cachep; static struct inode *ovl_alloc_inode(struct super_block *sb) { struct ovl_inode *oi = alloc_inode_sb(sb, ovl_inode_cachep, GFP_KERNEL); if (!oi) return NULL; oi->cache = NULL; oi->redirect = NULL; oi->version = 0; oi->flags = 0; oi->__upperdentry = NULL; oi->lowerdata_redirect = NULL; oi->oe = NULL; mutex_init(&oi->lock); return &oi->vfs_inode; } static void ovl_free_inode(struct inode *inode) { struct ovl_inode *oi = OVL_I(inode); kfree(oi->redirect); kfree(oi->oe); mutex_destroy(&oi->lock); kmem_cache_free(ovl_inode_cachep, oi); } static void ovl_destroy_inode(struct inode *inode) { struct ovl_inode *oi = OVL_I(inode); dput(oi->__upperdentry); ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe)); if (S_ISDIR(inode->i_mode)) ovl_dir_cache_free(inode); else kfree(oi->lowerdata_redirect); } static void ovl_put_super(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); if (ofs) ovl_free_fs(ofs); } /* Sync real dirty inodes in upper filesystem (if it exists) */ static int ovl_sync_fs(struct super_block *sb, int wait) { struct ovl_fs *ofs = OVL_FS(sb); struct super_block *upper_sb; int ret; ret = ovl_sync_status(ofs); if (ret < 0) return -EIO; if (!ret) return ret; /* * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). * All the super blocks will be iterated, including upper_sb. * * If this is a syncfs(2) call, then we do need to call * sync_filesystem() on upper_sb, but enough if we do it when being * called with wait == 1. */ if (!wait) return 0; upper_sb = ovl_upper_mnt(ofs)->mnt_sb; down_read(&upper_sb->s_umount); ret = sync_filesystem(upper_sb); up_read(&upper_sb->s_umount); return ret; } /** * ovl_statfs * @dentry: The dentry to query * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer * filesystem pass the statfs to the upper filesystem (if it exists) */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct ovl_fs *ofs = OVL_FS(sb); struct dentry *root_dentry = sb->s_root; struct path path; int err; ovl_path_real(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { buf->f_namelen = ofs->namelen; buf->f_type = OVERLAYFS_SUPER_MAGIC; if (ovl_has_fsid(ofs)) buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); } return err; } static const struct super_operations ovl_super_operations = { .alloc_inode = ovl_alloc_inode, .free_inode = ovl_free_inode, .destroy_inode = ovl_destroy_inode, .drop_inode = generic_delete_inode, .put_super = ovl_put_super, .sync_fs = ovl_sync_fs, .statfs = ovl_statfs, .show_options = ovl_show_options, }; #define OVL_WORKDIR_NAME "work" #define OVL_INDEXDIR_NAME "index" static struct dentry *ovl_workdir_create(struct ovl_fs *ofs, const char *name, bool persist) { struct inode *dir = ofs->workbasedir->d_inode; struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *work; int err; bool retried = false; inode_lock_nested(dir, I_MUTEX_PARENT); retry: work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name)); if (!IS_ERR(work)) { struct iattr attr = { .ia_valid = ATTR_MODE, .ia_mode = S_IFDIR | 0, }; if (work->d_inode) { err = -EEXIST; if (retried) goto out_dput; if (persist) goto out_unlock; retried = true; err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0); dput(work); if (err == -EINVAL) { work = ERR_PTR(err); goto out_unlock; } goto retry; } work = ovl_do_mkdir(ofs, dir, work, attr.ia_mode); err = PTR_ERR(work); if (IS_ERR(work)) goto out_err; /* Weird filesystem returning with hashed negative (kernfs)? */ err = -EINVAL; if (d_really_is_negative(work)) goto out_dput; /* * Try to remove POSIX ACL xattrs from workdir. We are good if: * * a) success (there was a POSIX ACL xattr and was removed) * b) -ENODATA (there was no POSIX ACL xattr) * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported) * * There are various other error values that could effectively * mean that the xattr doesn't exist (e.g. -ERANGE is returned * if the xattr name is too long), but the set of filesystems * allowed as upper are limited to "normal" ones, where checking * for the above two errors is sufficient. */ err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT); if (err && err != -ENODATA && err != -EOPNOTSUPP) goto out_dput; err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS); if (err && err != -ENODATA && err != -EOPNOTSUPP) goto out_dput; /* Clear any inherited mode bits */ inode_lock(work->d_inode); err = ovl_do_notify_change(ofs, work, &attr); inode_unlock(work->d_inode); if (err) goto out_dput; } else { err = PTR_ERR(work); goto out_err; } out_unlock: inode_unlock(dir); return work; out_dput: dput(work); out_err: pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n", ofs->config.workdir, name, -err); work = NULL; goto out_unlock; } static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs, const char *name) { struct kstatfs statfs; int err = vfs_statfs(path, &statfs); if (err) pr_err("statfs failed on '%s'\n", name); else ofs->namelen = max(ofs->namelen, statfs.f_namelen); return err; } static int ovl_lower_dir(const char *name, struct path *path, struct ovl_fs *ofs, int *stack_depth) { int fh_type; int err; err = ovl_check_namelen(path, ofs, name); if (err) return err; *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); /* * The inodes index feature and NFS export need to encode and decode * file handles, so they require that all layers support them. */ fh_type = ovl_can_decode_fh(path->dentry->d_sb); if ((ofs->config.nfs_export || (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; /* * Decoding origin file handle is required for persistent st_ino. * Without persistent st_ino, xino=auto falls back to xino=off. */ if (ofs->config.xino == OVL_XINO_AUTO && ofs->config.upperdir && !fh_type) { ofs->config.xino = OVL_XINO_OFF; pr_warn("fs on '%s' does not support file handles, falling back to xino=off.\n", name); } /* Check if lower fs has 32bit inode numbers */ if (fh_type != FILEID_INO32_GEN) ofs->xino_mode = -1; return 0; } /* Workdir should not be subdir of upperdir and vice versa */ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) { bool ok = false; if (workdir != upperdir) { struct dentry *trap = lock_rename(workdir, upperdir); if (!IS_ERR(trap)) unlock_rename(workdir, upperdir); ok = (trap == NULL); } return ok; } static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, struct inode **ptrap, const char *name) { struct inode *trap; int err; trap = ovl_get_trap_inode(sb, dir); err = PTR_ERR_OR_ZERO(trap); if (err) { if (err == -ELOOP) pr_err("conflicting %s path\n", name); return err; } *ptrap = trap; return 0; } /* * Determine how we treat concurrent use of upperdir/workdir based on the * index feature. This is papering over mount leaks of container runtimes, * for example, an old overlay mount is leaked and now its upperdir is * attempted to be used as a lower layer in a new overlay mount. */ static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", name); return -EBUSY; } else { pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", name); return 0; } } static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, struct ovl_layer *upper_layer, const struct path *upperpath) { struct vfsmount *upper_mnt; int err; /* Upperdir path should not be r/o */ if (__mnt_is_readonly(upperpath->mnt)) { pr_err("upper fs is r/o, try multi-lower layers mount\n"); err = -EINVAL; goto out; } err = ovl_check_namelen(upperpath, ofs, ofs->config.upperdir); if (err) goto out; err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap, "upperdir"); if (err) goto out; upper_mnt = clone_private_mount(upperpath); err = PTR_ERR(upper_mnt); if (IS_ERR(upper_mnt)) { pr_err("failed to clone upperpath\n"); goto out; } /* Don't inherit atime flags */ upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); upper_layer->mnt = upper_mnt; upper_layer->idx = 0; upper_layer->fsid = 0; /* * Inherit SB_NOSEC flag from upperdir. * * This optimization changes behavior when a security related attribute * (suid/sgid/security.*) is changed on an underlying layer. This is * okay because we don't yet have guarantees in that case, but it will * need careful treatment once we want to honour changes to underlying * filesystems. */ if (upper_mnt->mnt_sb->s_flags & SB_NOSEC) sb->s_flags |= SB_NOSEC; if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) { ofs->upperdir_locked = true; } else { err = ovl_report_in_use(ofs, "upperdir"); if (err) goto out; } err = 0; out: return err; } /* * Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and * negative values if error is encountered. */ static int ovl_check_rename_whiteout(struct ovl_fs *ofs) { struct dentry *workdir = ofs->workdir; struct inode *dir = d_inode(workdir); struct dentry *temp; struct dentry *dest; struct dentry *whiteout; struct name_snapshot name; int err; inode_lock_nested(dir, I_MUTEX_PARENT); temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0)); err = PTR_ERR(temp); if (IS_ERR(temp)) goto out_unlock; dest = ovl_lookup_temp(ofs, workdir); err = PTR_ERR(dest); if (IS_ERR(dest)) { dput(temp); goto out_unlock; } /* Name is inline and stable - using snapshot as a copy helper */ take_dentry_name_snapshot(&name, temp); err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT); if (err) { if (err == -EINVAL) err = 0; goto cleanup_temp; } whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len); err = PTR_ERR(whiteout); if (IS_ERR(whiteout)) goto cleanup_temp; err = ovl_upper_is_whiteout(ofs, whiteout); /* Best effort cleanup of whiteout and temp file */ if (err) ovl_cleanup(ofs, dir, whiteout); dput(whiteout); cleanup_temp: ovl_cleanup(ofs, dir, temp); release_dentry_name_snapshot(&name); dput(temp); dput(dest); out_unlock: inode_unlock(dir); return err; } static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs, struct dentry *parent, const char *name, umode_t mode) { size_t len = strlen(name); struct dentry *child; inode_lock_nested(parent->d_inode, I_MUTEX_PARENT); child = ovl_lookup_upper(ofs, name, parent, len); if (!IS_ERR(child) && !child->d_inode) child = ovl_create_real(ofs, parent->d_inode, child, OVL_CATTR(mode)); inode_unlock(parent->d_inode); dput(parent); return child; } /* * Creates $workdir/work/incompat/volatile/dirty file if it is not already * present. */ static int ovl_create_volatile_dirty(struct ovl_fs *ofs) { unsigned int ctr; struct dentry *d = dget(ofs->workbasedir); static const char *const volatile_path[] = { OVL_WORKDIR_NAME, "incompat", "volatile", "dirty" }; const char *const *name = volatile_path; for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) { d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG); if (IS_ERR(d)) return PTR_ERR(d); } dput(d); return 0; } static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs, const struct path *workpath) { struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *workdir; struct file *tmpfile; bool rename_whiteout; bool d_type; int fh_type; int err; err = mnt_want_write(mnt); if (err) return err; workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false); err = PTR_ERR(workdir); if (IS_ERR_OR_NULL(workdir)) goto out; ofs->workdir = workdir; err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir"); if (err) goto out; /* * Upper should support d_type, else whiteouts are visible. Given * workdir and upper are on same fs, we can do iterate_dir() on * workdir. This check requires successful creation of workdir in * previous step. */ err = ovl_check_d_type_supported(workpath); if (err < 0) goto out; d_type = err; if (!d_type) pr_warn("upper fs needs to support d_type.\n"); /* Check if upper/work fs supports O_TMPFILE */ tmpfile = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0); ofs->tmpfile = !IS_ERR(tmpfile); if (ofs->tmpfile) fput(tmpfile); else pr_warn("upper fs does not support tmpfile.\n"); /* Check if upper/work fs supports RENAME_WHITEOUT */ err = ovl_check_rename_whiteout(ofs); if (err < 0) goto out; rename_whiteout = err; if (!rename_whiteout) pr_warn("upper fs does not support RENAME_WHITEOUT.\n"); /* * Check if upper/work fs supports (trusted|user).overlay.* xattr */ err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1); if (err) { pr_warn("failed to set xattr on upper\n"); ofs->noxattr = true; if (ovl_redirect_follow(ofs)) { ofs->config.redirect_mode = OVL_REDIRECT_NOFOLLOW; pr_warn("...falling back to redirect_dir=nofollow.\n"); } if (ofs->config.metacopy) { ofs->config.metacopy = false; pr_warn("...falling back to metacopy=off.\n"); } if (ofs->config.index) { ofs->config.index = false; pr_warn("...falling back to index=off.\n"); } if (ovl_has_fsid(ofs)) { ofs->config.uuid = OVL_UUID_NULL; pr_warn("...falling back to uuid=null.\n"); } /* * xattr support is required for persistent st_ino. * Without persistent st_ino, xino=auto falls back to xino=off. */ if (ofs->config.xino == OVL_XINO_AUTO) { ofs->config.xino = OVL_XINO_OFF; pr_warn("...falling back to xino=off.\n"); } if (err == -EPERM && !ofs->config.userxattr) pr_info("try mounting with 'userxattr' option\n"); err = 0; } else { ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE); } /* * We allowed sub-optimal upper fs configuration and don't want to break * users over kernel upgrade, but we never allowed remote upper fs, so * we can enforce strict requirements for remote upper fs. */ if (ovl_dentry_remote(ofs->workdir) && (!d_type || !rename_whiteout || ofs->noxattr)) { pr_err("upper fs missing required features.\n"); err = -EINVAL; goto out; } /* * For volatile mount, create a incompat/volatile/dirty file to keep * track of it. */ if (ofs->config.ovl_volatile) { err = ovl_create_volatile_dirty(ofs); if (err < 0) { pr_err("Failed to create volatile/dirty file.\n"); goto out; } } /* Check if upper/work fs supports file handles */ fh_type = ovl_can_decode_fh(ofs->workdir->d_sb); if (ofs->config.index && !fh_type) { ofs->config.index = false; pr_warn("upper fs does not support file handles, falling back to index=off.\n"); } ofs->nofh |= !fh_type; /* Check if upper fs has 32bit inode numbers */ if (fh_type != FILEID_INO32_GEN) ofs->xino_mode = -1; /* NFS export of r/w mount depends on index */ if (ofs->config.nfs_export && !ofs->config.index) { pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } out: mnt_drop_write(mnt); return err; } static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, const struct path *upperpath, const struct path *workpath) { int err; err = -EINVAL; if (upperpath->mnt != workpath->mnt) { pr_err("workdir and upperdir must reside under the same mount\n"); return err; } if (!ovl_workdir_ok(workpath->dentry, upperpath->dentry)) { pr_err("workdir and upperdir must be separate subtrees\n"); return err; } ofs->workbasedir = dget(workpath->dentry); if (ovl_inuse_trylock(ofs->workbasedir)) { ofs->workdir_locked = true; } else { err = ovl_report_in_use(ofs, "workdir"); if (err) return err; } err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap, "workdir"); if (err) return err; return ovl_make_workdir(sb, ofs, workpath); } static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs, struct ovl_entry *oe, const struct path *upperpath) { struct vfsmount *mnt = ovl_upper_mnt(ofs); struct dentry *indexdir; struct dentry *origin = ovl_lowerstack(oe)->dentry; const struct ovl_fh *fh; int err; fh = ovl_get_origin_fh(ofs, origin); if (IS_ERR(fh)) return PTR_ERR(fh); err = mnt_want_write(mnt); if (err) goto out_free_fh; /* Verify lower root is upper root origin */ err = ovl_verify_origin_fh(ofs, upperpath->dentry, fh, true); if (err) { pr_err("failed to verify upper root origin\n"); goto out; } /* index dir will act also as workdir */ iput(ofs->workdir_trap); ofs->workdir_trap = NULL; dput(ofs->workdir); ofs->workdir = NULL; indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true); if (IS_ERR(indexdir)) { err = PTR_ERR(indexdir); } else if (indexdir) { ofs->workdir = indexdir; err = ovl_setup_trap(sb, indexdir, &ofs->workdir_trap, "indexdir"); if (err) goto out; /* * Verify upper root is exclusively associated with index dir. * Older kernels stored upper fh in ".overlay.origin" * xattr. If that xattr exists, verify that it is a match to * upper dir file handle. In any case, verify or set xattr * ".overlay.upper" to indicate that index may have * directory entries. */ if (ovl_check_origin_xattr(ofs, indexdir)) { err = ovl_verify_origin_xattr(ofs, indexdir, OVL_XATTR_ORIGIN, upperpath->dentry, true, false); if (err) pr_err("failed to verify index dir 'origin' xattr\n"); } err = ovl_verify_upper(ofs, indexdir, upperpath->dentry, true); if (err) pr_err("failed to verify index dir 'upper' xattr\n"); /* Cleanup bad/stale/orphan index entries */ if (!err) err = ovl_indexdir_cleanup(ofs); } if (err || !indexdir) pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n"); out: mnt_drop_write(mnt); out_free_fh: kfree(fh); return err; } static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid) { unsigned int i; if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs)) return true; /* * We allow using single lower with null uuid for index and nfs_export * for example to support those features with single lower squashfs. * To avoid regressions in setups of overlay with re-formatted lower * squashfs, do not allow decoding origin with lower null uuid unless * user opted-in to one of the new features that require following the * lower inode of non-dir upper. */ if (ovl_allow_offline_changes(ofs) && uuid_is_null(uuid)) return false; for (i = 0; i < ofs->numfs; i++) { /* * We use uuid to associate an overlay lower file handle with a * lower layer, so we can accept lower fs with null uuid as long * as all lower layers with null uuid are on the same fs. * if we detect multiple lower fs with the same uuid, we * disable lower file handle decoding on all of them. */ if (ofs->fs[i].is_lower && uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) { ofs->fs[i].bad_uuid = true; return false; } } return true; } /* Get a unique fsid for the layer */ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path) { struct super_block *sb = path->mnt->mnt_sb; unsigned int i; dev_t dev; int err; bool bad_uuid = false; bool warn = false; for (i = 0; i < ofs->numfs; i++) { if (ofs->fs[i].sb == sb) return i; } if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) { bad_uuid = true; if (ofs->config.xino == OVL_XINO_AUTO) { ofs->config.xino = OVL_XINO_OFF; warn = true; } if (ofs->config.index || ofs->config.nfs_export) { ofs->config.index = false; ofs->config.nfs_export = false; warn = true; } if (warn) { pr_warn("%s uuid detected in lower fs '%pd2', falling back to xino=%s,index=off,nfs_export=off.\n", uuid_is_null(&sb->s_uuid) ? "null" : "conflicting", path->dentry, ovl_xino_mode(&ofs->config)); } } err = get_anon_bdev(&dev); if (err) { pr_err("failed to get anonymous bdev for lowerpath\n"); return err; } ofs->fs[ofs->numfs].sb = sb; ofs->fs[ofs->numfs].pseudo_dev = dev; ofs->fs[ofs->numfs].bad_uuid = bad_uuid; return ofs->numfs++; } /* * The fsid after the last lower fsid is used for the data layers. * It is a "null fs" with a null sb, null uuid, and no pseudo dev. */ static int ovl_get_data_fsid(struct ovl_fs *ofs) { return ofs->numfs; } static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs, struct ovl_fs_context *ctx, struct ovl_layer *layers) { int err; unsigned int i; size_t nr_merged_lower; ofs->fs = kcalloc(ctx->nr + 2, sizeof(struct ovl_sb), GFP_KERNEL); if (ofs->fs == NULL) return -ENOMEM; /* * idx/fsid 0 are reserved for upper fs even with lower only overlay * and the last fsid is reserved for "null fs" of the data layers. */ ofs->numfs++; /* * All lower layers that share the same fs as upper layer, use the same * pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower * only overlay to simplify ovl_fs_free(). * is_lower will be set if upper fs is shared with a lower layer. */ err = get_anon_bdev(&ofs->fs[0].pseudo_dev); if (err) { pr_err("failed to get anonymous bdev for upper fs\n"); return err; } if (ovl_upper_mnt(ofs)) { ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb; ofs->fs[0].is_lower = false; } nr_merged_lower = ctx->nr - ctx->nr_data; for (i = 0; i < ctx->nr; i++) { struct ovl_fs_context_layer *l = &ctx->lower[i]; struct vfsmount *mnt; struct inode *trap; int fsid; if (i < nr_merged_lower) fsid = ovl_get_fsid(ofs, &l->path); else fsid = ovl_get_data_fsid(ofs); if (fsid < 0) return fsid; /* * Check if lower root conflicts with this overlay layers before * checking if it is in-use as upperdir/workdir of "another" * mount, because we do not bother to check in ovl_is_inuse() if * the upperdir/workdir is in fact in-use by our * upperdir/workdir. */ err = ovl_setup_trap(sb, l->path.dentry, &trap, "lowerdir"); if (err) return err; if (ovl_is_inuse(l->path.dentry)) { err = ovl_report_in_use(ofs, "lowerdir"); if (err) { iput(trap); return err; } } mnt = clone_private_mount(&l->path); err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("failed to clone lowerpath\n"); iput(trap); return err; } /* * Make lower layers R/O. That way fchmod/fchown on lower file * will fail instead of modifying lower fs. */ mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME; layers[ofs->numlayer].trap = trap; layers[ofs->numlayer].mnt = mnt; layers[ofs->numlayer].idx = ofs->numlayer; layers[ofs->numlayer].fsid = fsid; layers[ofs->numlayer].fs = &ofs->fs[fsid]; /* Store for printing lowerdir=... in ovl_show_options() */ ofs->config.lowerdirs[ofs->numlayer] = l->name; l->name = NULL; ofs->numlayer++; ofs->fs[fsid].is_lower = true; } /* * When all layers on same fs, overlay can use real inode numbers. * With mount option "xino=<on|auto>", mounter declares that there are * enough free high bits in underlying fs to hold the unique fsid. * If overlayfs does encounter underlying inodes using the high xino * bits reserved for fsid, it emits a warning and uses the original * inode number or a non persistent inode number allocated from a * dedicated range. */ if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) { if (ofs->config.xino == OVL_XINO_ON) pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n"); ofs->xino_mode = 0; } else if (ofs->config.xino == OVL_XINO_OFF) { ofs->xino_mode = -1; } else if (ofs->xino_mode < 0) { /* * This is a roundup of number of bits needed for encoding * fsid, where fsid 0 is reserved for upper fs (even with * lower only overlay) +1 extra bit is reserved for the non * persistent inode number range that is used for resolving * xino lower bits overflow. */ BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 30); ofs->xino_mode = ilog2(ofs->numfs - 1) + 2; } if (ofs->xino_mode > 0) { pr_info("\"xino\" feature enabled using %d upper inode bits.\n", ofs->xino_mode); } return 0; } static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb, struct ovl_fs_context *ctx, struct ovl_fs *ofs, struct ovl_layer *layers) { int err; unsigned int i; size_t nr_merged_lower; struct ovl_entry *oe; struct ovl_path *lowerstack; struct ovl_fs_context_layer *l; if (!ofs->config.upperdir && ctx->nr == 1) { pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n"); return ERR_PTR(-EINVAL); } if (ctx->nr == ctx->nr_data) { pr_err("at least one non-data lowerdir is required\n"); return ERR_PTR(-EINVAL); } err = -EINVAL; for (i = 0; i < ctx->nr; i++) { l = &ctx->lower[i]; err = ovl_lower_dir(l->name, &l->path, ofs, &sb->s_stack_depth); if (err) return ERR_PTR(err); } err = -EINVAL; sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("maximum fs stacking depth exceeded\n"); return ERR_PTR(err); } err = ovl_get_layers(sb, ofs, ctx, layers); if (err) return ERR_PTR(err); err = -ENOMEM; /* Data-only layers are not merged in root directory */ nr_merged_lower = ctx->nr - ctx->nr_data; oe = ovl_alloc_entry(nr_merged_lower); if (!oe) return ERR_PTR(err); lowerstack = ovl_lowerstack(oe); for (i = 0; i < nr_merged_lower; i++) { l = &ctx->lower[i]; lowerstack[i].dentry = dget(l->path.dentry); lowerstack[i].layer = &ofs->layers[i + 1]; } ofs->numdatalayer = ctx->nr_data; return oe; } /* * Check if this layer root is a descendant of: * - another layer of this overlayfs instance * - upper/work dir of any overlayfs instance */ static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, struct dentry *dentry, const char *name, bool is_lower) { struct dentry *next = dentry, *parent; int err = 0; if (!dentry) return 0; parent = dget_parent(next); /* Walk back ancestors to root (inclusive) looking for traps */ while (!err && parent != next) { if (is_lower && ovl_lookup_trap_inode(sb, parent)) { err = -ELOOP; pr_err("overlapping %s path\n", name); } else if (ovl_is_inuse(parent)) { err = ovl_report_in_use(ofs, name); } next = parent; parent = dget_parent(next); dput(next); } dput(parent); return err; } /* * Check if any of the layers or work dirs overlap. */ static int ovl_check_overlapping_layers(struct super_block *sb, struct ovl_fs *ofs) { int i, err; if (ovl_upper_mnt(ofs)) { err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root, "upperdir", false); if (err) return err; /* * Checking workbasedir avoids hitting ovl_is_inuse(parent) of * this instance and covers overlapping work and index dirs, * unless work or index dir have been moved since created inside * workbasedir. In that case, we already have their traps in * inode cache and we will catch that case on lookup. */ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir", false); if (err) return err; } for (i = 1; i < ofs->numlayer; i++) { err = ovl_check_layer(sb, ofs, ofs->layers[i].mnt->mnt_root, "lowerdir", true); if (err) return err; } return 0; } static struct dentry *ovl_get_root(struct super_block *sb, struct dentry *upperdentry, struct ovl_entry *oe) { struct dentry *root; struct ovl_fs *ofs = OVL_FS(sb); struct ovl_path *lowerpath = ovl_lowerstack(oe); unsigned long ino = d_inode(lowerpath->dentry)->i_ino; int fsid = lowerpath->layer->fsid; struct ovl_inode_params oip = { .upperdentry = upperdentry, .oe = oe, }; root = d_make_root(ovl_new_inode(sb, S_IFDIR, 0)); if (!root) return NULL; if (upperdentry) { /* Root inode uses upper st_ino/i_ino */ ino = d_inode(upperdentry)->i_ino; fsid = 0; ovl_dentry_set_upper_alias(root); if (ovl_is_impuredir(sb, upperdentry)) ovl_set_flag(OVL_IMPURE, d_inode(root)); } /* Look for xwhiteouts marker except in the lowermost layer */ for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) { struct path path = { .mnt = lowerpath->layer->mnt, .dentry = lowerpath->dentry, }; /* overlay.opaque=x means xwhiteouts directory */ if (ovl_get_opaquedir_val(ofs, &path) == 'x') { ovl_layer_set_xwhiteouts(ofs, lowerpath->layer); ovl_dentry_set_xwhiteouts(root); } } /* Root is always merge -> can have whiteouts */ ovl_set_flag(OVL_WHITEOUTS, d_inode(root)); ovl_dentry_set_flag(OVL_E_CONNECTED, root); ovl_set_upperdata(d_inode(root)); ovl_inode_init(d_inode(root), &oip, ino, fsid); ovl_dentry_init_flags(root, upperdentry, oe, DCACHE_OP_WEAK_REVALIDATE); /* root keeps a reference of upperdentry */ dget(upperdentry); return root; } int ovl_fill_super(struct super_block *sb, struct fs_context *fc) { struct ovl_fs *ofs = sb->s_fs_info; struct ovl_fs_context *ctx = fc->fs_private; const struct cred *old_cred = NULL; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_layer *layers; struct cred *cred; int err; err = -EIO; if (WARN_ON(fc->user_ns != current_user_ns())) goto out_err; sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; if (!ofs->creator_cred) ofs->creator_cred = cred = prepare_creds(); else cred = (struct cred *)ofs->creator_cred; if (!cred) goto out_err; old_cred = ovl_override_creds(sb); err = ovl_fs_params_verify(ctx, &ofs->config); if (err) goto out_err; err = -EINVAL; if (ctx->nr == 0) { if (!(fc->sb_flags & SB_SILENT)) pr_err("missing 'lowerdir'\n"); goto out_err; } err = -ENOMEM; layers = kcalloc(ctx->nr + 1, sizeof(struct ovl_layer), GFP_KERNEL); if (!layers) goto out_err; ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL); if (!ofs->config.lowerdirs) { kfree(layers); goto out_err; } ofs->layers = layers; /* * Layer 0 is reserved for upper even if there's no upper. * config.lowerdirs[0] is used for storing the user provided colon * separated lowerdir string. */ ofs->config.lowerdirs[0] = ctx->lowerdir_all; ctx->lowerdir_all = NULL; ofs->numlayer = 1; sb->s_stack_depth = 0; sb->s_maxbytes = MAX_LFS_FILESIZE; atomic_long_set(&ofs->last_ino, 1); /* Assume underlying fs uses 32bit inodes unless proven otherwise */ if (ofs->config.xino != OVL_XINO_OFF) { ofs->xino_mode = BITS_PER_LONG - 32; if (!ofs->xino_mode) { pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n"); ofs->config.xino = OVL_XINO_OFF; } } /* alloc/destroy_inode needed for setting up traps in inode cache */ sb->s_op = &ovl_super_operations; if (ofs->config.upperdir) { struct super_block *upper_sb; err = -EINVAL; if (!ofs->config.workdir) { pr_err("missing 'workdir'\n"); goto out_err; } err = ovl_get_upper(sb, ofs, &layers[0], &ctx->upper); if (err) goto out_err; upper_sb = ovl_upper_mnt(ofs)->mnt_sb; if (!ovl_should_sync(ofs)) { ofs->errseq = errseq_sample(&upper_sb->s_wb_err); if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) { err = -EIO; pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n"); goto out_err; } } err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work); if (err) goto out_err; if (!ofs->workdir) sb->s_flags |= SB_RDONLY; sb->s_stack_depth = upper_sb->s_stack_depth; sb->s_time_gran = upper_sb->s_time_gran; } oe = ovl_get_lowerstack(sb, ctx, ofs, layers); err = PTR_ERR(oe); if (IS_ERR(oe)) goto out_err; /* If the upper fs is nonexistent, we mark overlayfs r/o too */ if (!ovl_upper_mnt(ofs)) sb->s_flags |= SB_RDONLY; if (!ovl_origin_uuid(ofs) && ofs->numfs > 1) { pr_warn("The uuid=off requires a single fs for lower and upper, falling back to uuid=null.\n"); ofs->config.uuid = OVL_UUID_NULL; } else if (ovl_has_fsid(ofs) && ovl_upper_mnt(ofs)) { /* Use per instance persistent uuid/fsid */ ovl_init_uuid_xattr(sb, ofs, &ctx->upper); } if (!ovl_force_readonly(ofs) && ofs->config.index) { err = ovl_get_indexdir(sb, ofs, oe, &ctx->upper); if (err) goto out_free_oe; /* Force r/o mount with no index dir */ if (!ofs->workdir) sb->s_flags |= SB_RDONLY; } err = ovl_check_overlapping_layers(sb, ofs); if (err) goto out_free_oe; /* Show index=off in /proc/mounts for forced r/o mount */ if (!ofs->workdir) { ofs->config.index = false; if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) { pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } } if (ofs->config.metacopy && ofs->config.nfs_export) { pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n"); ofs->config.nfs_export = false; } /* * Support encoding decodable file handles with nfs_export=on * and encoding non-decodable file handles with nfs_export=off * if all layers support file handles. */ if (ofs->config.nfs_export) sb->s_export_op = &ovl_export_operations; else if (!ofs->nofh) sb->s_export_op = &ovl_export_fid_operations; /* Never override disk quota limits or use reserved space */ cap_lower(cred->cap_effective, CAP_SYS_RESOURCE); sb->s_magic = OVERLAYFS_SUPER_MAGIC; sb->s_xattr = ovl_xattr_handlers(ofs); sb->s_fs_info = ofs; #ifdef CONFIG_FS_POSIX_ACL sb->s_flags |= SB_POSIXACL; #endif sb->s_iflags |= SB_I_SKIP_SYNC; /* * Ensure that umask handling is done by the filesystems used * for the the upper layer instead of overlayfs as that would * lead to unexpected results. */ sb->s_iflags |= SB_I_NOUMASK; sb->s_iflags |= SB_I_EVM_HMAC_UNSUPPORTED; err = -ENOMEM; root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe); if (!root_dentry) goto out_free_oe; sb->s_root = root_dentry; ovl_revert_creds(old_cred); return 0; out_free_oe: ovl_free_entry(oe); out_err: /* * Revert creds before calling ovl_free_fs() which will call * put_cred() and put_cred() requires that the cred's that are * put are not the caller's creds, i.e., current->cred. */ if (old_cred) ovl_revert_creds(old_cred); ovl_free_fs(ofs); sb->s_fs_info = NULL; return err; } struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, .name = "overlay", .init_fs_context = ovl_init_fs_context, .parameters = ovl_parameter_spec, .fs_flags = FS_USERNS_MOUNT, .kill_sb = kill_anon_super, }; MODULE_ALIAS_FS("overlay"); static void ovl_inode_init_once(void *foo) { struct ovl_inode *oi = foo; inode_init_once(&oi->vfs_inode); } static int __init ovl_init(void) { int err; ovl_inode_cachep = kmem_cache_create("ovl_inode", sizeof(struct ovl_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), ovl_inode_init_once); if (ovl_inode_cachep == NULL) return -ENOMEM; err = register_filesystem(&ovl_fs_type); if (!err) return 0; kmem_cache_destroy(ovl_inode_cachep); return err; } static void __exit ovl_exit(void) { unregister_filesystem(&ovl_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ovl_inode_cachep); } module_init(ovl_init); module_exit(ovl_exit); |
2 1 2 5 5 3 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | // SPDX-License-Identifier: GPL-2.0 #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/xxhash.h> #include <linux/unaligned.h> #define XXHASH64_BLOCK_SIZE 32 #define XXHASH64_DIGEST_SIZE 8 struct xxhash64_tfm_ctx { u64 seed; }; struct xxhash64_desc_ctx { struct xxh64_state xxhstate; }; static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm); if (keylen != sizeof(tctx->seed)) return -EINVAL; tctx->seed = get_unaligned_le64(key); return 0; } static int xxhash64_init(struct shash_desc *desc) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); xxh64_reset(&dctx->xxhstate, tctx->seed); return 0; } static int xxhash64_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); xxh64_update(&dctx->xxhstate, data, length); return 0; } static int xxhash64_final(struct shash_desc *desc, u8 *out) { struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out); return 0; } static int xxhash64_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); put_unaligned_le64(xxh64(data, length, tctx->seed), out); return 0; } static struct shash_alg alg = { .digestsize = XXHASH64_DIGEST_SIZE, .setkey = xxhash64_setkey, .init = xxhash64_init, .update = xxhash64_update, .final = xxhash64_final, .digest = xxhash64_digest, .descsize = sizeof(struct xxhash64_desc_ctx), .base = { .cra_name = "xxhash64", .cra_driver_name = "xxhash64-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = XXHASH64_BLOCK_SIZE, .cra_ctxsize = sizeof(struct xxhash64_tfm_ctx), .cra_module = THIS_MODULE, } }; static int __init xxhash_mod_init(void) { return crypto_register_shash(&alg); } static void __exit xxhash_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(xxhash_mod_init); module_exit(xxhash_mod_fini); MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("xxhash64"); MODULE_ALIAS_CRYPTO("xxhash64-generic"); |
1 1 3 43 2 41 43 12 6 6 2 4 3 9 9 7 2 2 9 12 196 2 2 3 3 3 2 2 2 3 2 2 2 2 1 2 11 11 11 11 11 11 10 10 10 1 1 1 196 11 117 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/fs.h> #include <linux/xattr.h> #include "overlayfs.h" static bool ovl_is_escaped_xattr(struct super_block *sb, const char *name) { struct ovl_fs *ofs = sb->s_fs_info; if (ofs->config.userxattr) return strncmp(name, OVL_XATTR_ESCAPE_USER_PREFIX, OVL_XATTR_ESCAPE_USER_PREFIX_LEN) == 0; else return strncmp(name, OVL_XATTR_ESCAPE_TRUSTED_PREFIX, OVL_XATTR_ESCAPE_TRUSTED_PREFIX_LEN - 1) == 0; } static bool ovl_is_own_xattr(struct super_block *sb, const char *name) { struct ovl_fs *ofs = OVL_FS(sb); if (ofs->config.userxattr) return strncmp(name, OVL_XATTR_USER_PREFIX, OVL_XATTR_USER_PREFIX_LEN) == 0; else return strncmp(name, OVL_XATTR_TRUSTED_PREFIX, OVL_XATTR_TRUSTED_PREFIX_LEN) == 0; } bool ovl_is_private_xattr(struct super_block *sb, const char *name) { return ovl_is_own_xattr(sb, name) && !ovl_is_escaped_xattr(sb, name); } static int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { int err; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct dentry *upperdentry = ovl_i_dentry_upper(inode); struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry); struct path realpath; const struct cred *old_cred; if (!value && !upperdentry) { ovl_path_lower(dentry, &realpath); old_cred = ovl_override_creds(dentry->d_sb); err = vfs_getxattr(mnt_idmap(realpath.mnt), realdentry, name, NULL, 0); ovl_revert_creds(old_cred); if (err < 0) goto out; } if (!upperdentry) { err = ovl_copy_up(dentry); if (err) goto out; realdentry = ovl_dentry_upper(dentry); } err = ovl_want_write(dentry); if (err) goto out; old_cred = ovl_override_creds(dentry->d_sb); if (value) { err = ovl_do_setxattr(ofs, realdentry, name, value, size, flags); } else { WARN_ON(flags != XATTR_REPLACE); err = ovl_do_removexattr(ofs, realdentry, name); } ovl_revert_creds(old_cred); ovl_drop_write(dentry); /* copy c/mtime */ ovl_copyattr(inode); out: return err; } static int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name, void *value, size_t size) { ssize_t res; const struct cred *old_cred; struct path realpath; ovl_i_path_real(inode, &realpath); old_cred = ovl_override_creds(dentry->d_sb); res = vfs_getxattr(mnt_idmap(realpath.mnt), realpath.dentry, name, value, size); ovl_revert_creds(old_cred); return res; } static bool ovl_can_list(struct super_block *sb, const char *s) { /* Never list private (.overlay) */ if (ovl_is_private_xattr(sb, s)) return false; /* List all non-trusted xattrs */ if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) return true; /* list other trusted for superuser only */ return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN); } ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) { struct dentry *realdentry = ovl_dentry_real(dentry); struct ovl_fs *ofs = OVL_FS(dentry->d_sb); ssize_t res; size_t len; char *s; const struct cred *old_cred; size_t prefix_len, name_len; old_cred = ovl_override_creds(dentry->d_sb); res = vfs_listxattr(realdentry, list, size); ovl_revert_creds(old_cred); if (res <= 0 || size == 0) return res; prefix_len = ofs->config.userxattr ? OVL_XATTR_USER_PREFIX_LEN : OVL_XATTR_TRUSTED_PREFIX_LEN; /* filter out private xattrs */ for (s = list, len = res; len;) { size_t slen = strnlen(s, len) + 1; /* underlying fs providing us with an broken xattr list? */ if (WARN_ON(slen > len)) return -EIO; len -= slen; if (!ovl_can_list(dentry->d_sb, s)) { res -= slen; memmove(s, s + slen, len); } else if (ovl_is_escaped_xattr(dentry->d_sb, s)) { res -= OVL_XATTR_ESCAPE_PREFIX_LEN; name_len = slen - prefix_len - OVL_XATTR_ESCAPE_PREFIX_LEN; s += prefix_len; memmove(s, s + OVL_XATTR_ESCAPE_PREFIX_LEN, name_len + len); s += name_len; } else { s += slen; } } return res; } static char *ovl_xattr_escape_name(const char *prefix, const char *name) { size_t prefix_len = strlen(prefix); size_t name_len = strlen(name); size_t escaped_len; char *escaped, *s; escaped_len = prefix_len + OVL_XATTR_ESCAPE_PREFIX_LEN + name_len; if (escaped_len > XATTR_NAME_MAX) return ERR_PTR(-EOPNOTSUPP); escaped = kmalloc(escaped_len + 1, GFP_KERNEL); if (escaped == NULL) return ERR_PTR(-ENOMEM); s = escaped; memcpy(s, prefix, prefix_len); s += prefix_len; memcpy(s, OVL_XATTR_ESCAPE_PREFIX, OVL_XATTR_ESCAPE_PREFIX_LEN); s += OVL_XATTR_ESCAPE_PREFIX_LEN; memcpy(s, name, name_len + 1); return escaped; } static int ovl_own_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { char *escaped; int r; escaped = ovl_xattr_escape_name(handler->prefix, name); if (IS_ERR(escaped)) return PTR_ERR(escaped); r = ovl_xattr_get(dentry, inode, escaped, buffer, size); kfree(escaped); return r; } static int ovl_own_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { char *escaped; int r; escaped = ovl_xattr_escape_name(handler->prefix, name); if (IS_ERR(escaped)) return PTR_ERR(escaped); r = ovl_xattr_set(dentry, inode, escaped, value, size, flags); kfree(escaped); return r; } static int ovl_other_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { return ovl_xattr_get(dentry, inode, name, buffer, size); } static int ovl_other_xattr_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { return ovl_xattr_set(dentry, inode, name, value, size, flags); } static const struct xattr_handler ovl_own_trusted_xattr_handler = { .prefix = OVL_XATTR_TRUSTED_PREFIX, .get = ovl_own_xattr_get, .set = ovl_own_xattr_set, }; static const struct xattr_handler ovl_own_user_xattr_handler = { .prefix = OVL_XATTR_USER_PREFIX, .get = ovl_own_xattr_get, .set = ovl_own_xattr_set, }; static const struct xattr_handler ovl_other_xattr_handler = { .prefix = "", /* catch all */ .get = ovl_other_xattr_get, .set = ovl_other_xattr_set, }; static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = { &ovl_own_trusted_xattr_handler, &ovl_other_xattr_handler, NULL }; static const struct xattr_handler * const ovl_user_xattr_handlers[] = { &ovl_own_user_xattr_handler, &ovl_other_xattr_handler, NULL }; const struct xattr_handler * const *ovl_xattr_handlers(struct ovl_fs *ofs) { return ofs->config.userxattr ? ovl_user_xattr_handlers : ovl_trusted_xattr_handlers; } |
6 6 6 6 6 6 6 6 4 6 1 1 1 1 1 1 1 1 1 1 6 6 6 6 6 6 6 3 3 2 4 3 1 7 7 7 7 7 6 6 6 7 7 7 4 4 4 4 4 4 7 7 8 6 8 7 6 6 6 6 6 6 6 6 5 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the VoIP USB phones with CM109 chipsets. * * Copyright (C) 2007 - 2008 Alfred E. Heggestad <aeh@db.org> */ /* * Tested devices: * - Komunikate KIP1000 * - Genius G-talk * - Allied-Telesis Corega USBPH01 * - ... * * This driver is based on the yealink.c driver * * Thanks to: * - Authors of yealink.c * - Thomas Reitmayr * - Oliver Neukum for good review comments and code * - Shaun Jackman <sjackman@gmail.com> for Genius G-talk keymap * - Dmitry Torokhov for valuable input and review * * Todo: * - Read/write EEPROM */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/rwsem.h> #include <linux/usb/input.h> #define DRIVER_VERSION "20080805" #define DRIVER_AUTHOR "Alfred E. Heggestad" #define DRIVER_DESC "CM109 phone driver" static char *phone = "kip1000"; module_param(phone, charp, S_IRUSR); MODULE_PARM_DESC(phone, "Phone name {kip1000, gtalk, usbph01, atcom}"); enum { /* HID Registers */ HID_IR0 = 0x00, /* Record/Playback-mute button, Volume up/down */ HID_IR1 = 0x01, /* GPI, generic registers or EEPROM_DATA0 */ HID_IR2 = 0x02, /* Generic registers or EEPROM_DATA1 */ HID_IR3 = 0x03, /* Generic registers or EEPROM_CTRL */ HID_OR0 = 0x00, /* Mapping control, buzzer, SPDIF (offset 0x04) */ HID_OR1 = 0x01, /* GPO - General Purpose Output */ HID_OR2 = 0x02, /* Set GPIO to input/output mode */ HID_OR3 = 0x03, /* SPDIF status channel or EEPROM_CTRL */ /* HID_IR0 */ RECORD_MUTE = 1 << 3, PLAYBACK_MUTE = 1 << 2, VOLUME_DOWN = 1 << 1, VOLUME_UP = 1 << 0, /* HID_OR0 */ /* bits 7-6 0: HID_OR1-2 are used for GPO; HID_OR0, 3 are used for buzzer and SPDIF 1: HID_OR0-3 are used as generic HID registers 2: Values written to HID_OR0-3 are also mapped to MCU_CTRL, EEPROM_DATA0-1, EEPROM_CTRL (see Note) 3: Reserved */ HID_OR_GPO_BUZ_SPDIF = 0 << 6, HID_OR_GENERIC_HID_REG = 1 << 6, HID_OR_MAP_MCU_EEPROM = 2 << 6, BUZZER_ON = 1 << 5, /* up to 256 normal keys, up to 15 special key combinations */ KEYMAP_SIZE = 256 + 15, }; /* CM109 protocol packet */ struct cm109_ctl_packet { u8 byte[4]; } __attribute__ ((packed)); enum { USB_PKT_LEN = sizeof(struct cm109_ctl_packet) }; /* CM109 device structure */ struct cm109_dev { struct input_dev *idev; /* input device */ struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* irq input channel */ struct cm109_ctl_packet *irq_data; dma_addr_t irq_dma; struct urb *urb_irq; /* control output channel */ struct cm109_ctl_packet *ctl_data; dma_addr_t ctl_dma; struct usb_ctrlrequest *ctl_req; struct urb *urb_ctl; /* * The 3 bitfields below are protected by ctl_submit_lock. * They have to be separate since they are accessed from IRQ * context. */ unsigned irq_urb_pending:1; /* irq_urb is in flight */ unsigned ctl_urb_pending:1; /* ctl_urb is in flight */ unsigned buzzer_pending:1; /* need to issue buzz command */ spinlock_t ctl_submit_lock; unsigned char buzzer_state; /* on/off */ /* flags */ unsigned open:1; unsigned resetting:1; unsigned shutdown:1; /* This mutex protects writes to the above flags */ struct mutex pm_mutex; unsigned short keymap[KEYMAP_SIZE]; char phys[64]; /* physical device path */ int key_code; /* last reported key */ int keybit; /* 0=new scan 1,2,4,8=scan columns */ u8 gpi; /* Cached value of GPI (high nibble) */ }; /****************************************************************************** * CM109 key interface *****************************************************************************/ static unsigned short special_keymap(int code) { if (code > 0xff) { switch (code - 0xff) { case RECORD_MUTE: return KEY_MICMUTE; case PLAYBACK_MUTE: return KEY_MUTE; case VOLUME_DOWN: return KEY_VOLUMEDOWN; case VOLUME_UP: return KEY_VOLUMEUP; } } return KEY_RESERVED; } /* Map device buttons to internal key events. * * The "up" and "down" keys, are symbolised by arrows on the button. * The "pickup" and "hangup" keys are symbolised by a green and red phone * on the button. Komunikate KIP1000 Keyboard Matrix -> -- 1 -- 2 -- 3 --> GPI pin 4 (0x10) | | | | <- -- 4 -- 5 -- 6 --> GPI pin 5 (0x20) | | | | END - 7 -- 8 -- 9 --> GPI pin 6 (0x40) | | | | OK -- * -- 0 -- # --> GPI pin 7 (0x80) | | | | /|\ /|\ /|\ /|\ | | | | GPO pin: 3 2 1 0 0x8 0x4 0x2 0x1 */ static unsigned short keymap_kip1000(int scancode) { switch (scancode) { /* phone key: */ case 0x82: return KEY_NUMERIC_0; /* 0 */ case 0x14: return KEY_NUMERIC_1; /* 1 */ case 0x12: return KEY_NUMERIC_2; /* 2 */ case 0x11: return KEY_NUMERIC_3; /* 3 */ case 0x24: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x21: return KEY_NUMERIC_6; /* 6 */ case 0x44: return KEY_NUMERIC_7; /* 7 */ case 0x42: return KEY_NUMERIC_8; /* 8 */ case 0x41: return KEY_NUMERIC_9; /* 9 */ case 0x81: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x88: return KEY_ENTER; /* pickup */ case 0x48: return KEY_ESC; /* hangup */ case 0x28: return KEY_LEFT; /* IN */ case 0x18: return KEY_RIGHT; /* OUT */ default: return special_keymap(scancode); } } /* Contributed by Shaun Jackman <sjackman@gmail.com> Genius G-Talk keyboard matrix 0 1 2 3 4: 0 4 8 Talk 5: 1 5 9 End 6: 2 6 # Up 7: 3 7 * Down */ static unsigned short keymap_gtalk(int scancode) { switch (scancode) { case 0x11: return KEY_NUMERIC_0; case 0x21: return KEY_NUMERIC_1; case 0x41: return KEY_NUMERIC_2; case 0x81: return KEY_NUMERIC_3; case 0x12: return KEY_NUMERIC_4; case 0x22: return KEY_NUMERIC_5; case 0x42: return KEY_NUMERIC_6; case 0x82: return KEY_NUMERIC_7; case 0x14: return KEY_NUMERIC_8; case 0x24: return KEY_NUMERIC_9; case 0x44: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* Talk (green handset) */ case 0x28: return KEY_ESC; /* End (red handset) */ case 0x48: return KEY_UP; /* Menu up (rocker switch) */ case 0x88: return KEY_DOWN; /* Menu down (rocker switch) */ default: return special_keymap(scancode); } } /* * Keymap for Allied-Telesis Corega USBPH01 * http://www.alliedtelesis-corega.com/2/1344/1437/1360/chprd.html * * Contributed by july@nat.bg */ static unsigned short keymap_usbph01(int scancode) { switch (scancode) { case 0x11: return KEY_NUMERIC_0; /* 0 */ case 0x21: return KEY_NUMERIC_1; /* 1 */ case 0x41: return KEY_NUMERIC_2; /* 2 */ case 0x81: return KEY_NUMERIC_3; /* 3 */ case 0x12: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x42: return KEY_NUMERIC_6; /* 6 */ case 0x82: return KEY_NUMERIC_7; /* 7 */ case 0x14: return KEY_NUMERIC_8; /* 8 */ case 0x24: return KEY_NUMERIC_9; /* 9 */ case 0x44: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* pickup */ case 0x28: return KEY_ESC; /* hangup */ case 0x48: return KEY_LEFT; /* IN */ case 0x88: return KEY_RIGHT; /* OUT */ default: return special_keymap(scancode); } } /* * Keymap for ATCom AU-100 * http://www.atcom.cn/products.html * http://www.packetizer.com/products/au100/ * http://www.voip-info.org/wiki/view/AU-100 * * Contributed by daniel@gimpelevich.san-francisco.ca.us */ static unsigned short keymap_atcom(int scancode) { switch (scancode) { /* phone key: */ case 0x82: return KEY_NUMERIC_0; /* 0 */ case 0x11: return KEY_NUMERIC_1; /* 1 */ case 0x12: return KEY_NUMERIC_2; /* 2 */ case 0x14: return KEY_NUMERIC_3; /* 3 */ case 0x21: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x24: return KEY_NUMERIC_6; /* 6 */ case 0x41: return KEY_NUMERIC_7; /* 7 */ case 0x42: return KEY_NUMERIC_8; /* 8 */ case 0x44: return KEY_NUMERIC_9; /* 9 */ case 0x84: return KEY_NUMERIC_POUND; /* # */ case 0x81: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* pickup */ case 0x28: return KEY_ESC; /* hangup */ case 0x48: return KEY_LEFT; /* left arrow */ case 0x88: return KEY_RIGHT; /* right arrow */ default: return special_keymap(scancode); } } static unsigned short (*keymap)(int) = keymap_kip1000; /* * Completes a request by converting the data into events for the * input subsystem. */ static void report_key(struct cm109_dev *dev, int key) { struct input_dev *idev = dev->idev; if (dev->key_code >= 0) { /* old key up */ input_report_key(idev, dev->key_code, 0); } dev->key_code = key; if (key >= 0) { /* new valid key */ input_report_key(idev, key, 1); } input_sync(idev); } /* * Converts data of special key presses (volume, mute) into events * for the input subsystem, sends press-n-release for mute keys. */ static void cm109_report_special(struct cm109_dev *dev) { static const u8 autorelease = RECORD_MUTE | PLAYBACK_MUTE; struct input_dev *idev = dev->idev; u8 data = dev->irq_data->byte[HID_IR0]; unsigned short keycode; int i; for (i = 0; i < 4; i++) { keycode = dev->keymap[0xff + BIT(i)]; if (keycode == KEY_RESERVED) continue; input_report_key(idev, keycode, data & BIT(i)); if (data & autorelease & BIT(i)) { input_sync(idev); input_report_key(idev, keycode, 0); } } input_sync(idev); } /****************************************************************************** * CM109 usb communication interface *****************************************************************************/ static void cm109_submit_buzz_toggle(struct cm109_dev *dev) { int error; if (dev->buzzer_state) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); } static void cm109_submit_ctl(struct cm109_dev *dev) { int error; guard(spinlock_irqsave)(&dev->ctl_submit_lock); dev->irq_urb_pending = 0; if (unlikely(dev->shutdown)) return; if (dev->buzzer_state) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; dev->ctl_data->byte[HID_OR1] = dev->keybit; dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); } /* * IRQ handler */ static void cm109_urb_irq_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; dev_dbg(&dev->intf->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n", dev->irq_data->byte[0], dev->irq_data->byte[1], dev->irq_data->byte[2], dev->irq_data->byte[3], dev->keybit); if (status) { if (status == -ESHUTDOWN) return; dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", __func__, status); goto out; } /* Special keys */ cm109_report_special(dev); /* Scan key column */ if (dev->keybit == 0xf) { /* Any changes ? */ if ((dev->gpi & 0xf0) == (dev->irq_data->byte[HID_IR1] & 0xf0)) goto out; dev->gpi = dev->irq_data->byte[HID_IR1] & 0xf0; dev->keybit = 0x1; } else { report_key(dev, dev->keymap[dev->irq_data->byte[HID_IR1]]); dev->keybit <<= 1; if (dev->keybit > 0x8) dev->keybit = 0xf; } out: cm109_submit_ctl(dev); } static void cm109_urb_ctl_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; int error; dev_dbg(&dev->intf->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n", dev->ctl_data->byte[0], dev->ctl_data->byte[1], dev->ctl_data->byte[2], dev->ctl_data->byte[3]); if (status) { if (status == -ESHUTDOWN) return; dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", __func__, status); } guard(spinlock_irqsave)(&dev->ctl_submit_lock); dev->ctl_urb_pending = 0; if (unlikely(dev->shutdown)) return; if (dev->buzzer_pending || status) { dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } else if (likely(!dev->irq_urb_pending)) { /* ask for key data */ dev->irq_urb_pending = 1; error = usb_submit_urb(dev->urb_irq, GFP_ATOMIC); if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_irq) failed %d\n", __func__, error); } } static void cm109_toggle_buzzer_async(struct cm109_dev *dev) { guard(spinlock_irqsave)(&dev->ctl_submit_lock); if (dev->ctl_urb_pending) { /* URB completion will resubmit */ dev->buzzer_pending = 1; } else { dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } } static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) { int error; if (on) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; error = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), dev->ctl_req->bRequest, dev->ctl_req->bRequestType, le16_to_cpu(dev->ctl_req->wValue), le16_to_cpu(dev->ctl_req->wIndex), dev->ctl_data, USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); if (error < 0 && error != -EINTR) dev_err(&dev->intf->dev, "%s: usb_control_msg() failed %d\n", __func__, error); } static void cm109_stop_traffic(struct cm109_dev *dev) { dev->shutdown = 1; /* * Make sure other CPUs see this */ smp_wmb(); usb_kill_urb(dev->urb_ctl); usb_kill_urb(dev->urb_irq); cm109_toggle_buzzer_sync(dev, 0); dev->shutdown = 0; smp_wmb(); } static void cm109_restore_state(struct cm109_dev *dev) { if (dev->open) { /* * Restore buzzer state. * This will also kick regular URB submission */ cm109_toggle_buzzer_async(dev); } } /****************************************************************************** * input event interface *****************************************************************************/ static int cm109_input_open(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); int error; error = usb_autopm_get_interface(dev->intf); if (error < 0) { dev_err(&idev->dev, "%s - cannot autoresume, result %d\n", __func__, error); return error; } scoped_guard(mutex, &dev->pm_mutex) { dev->buzzer_state = 0; dev->key_code = -1; /* no keys pressed */ dev->keybit = 0xf; /* issue INIT */ dev->ctl_data->byte[HID_OR0] = HID_OR_GPO_BUZ_SPDIF; dev->ctl_data->byte[HID_OR1] = dev->keybit; dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->ctl_data->byte[HID_OR3] = 0x00; dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); if (!error) { dev->open = 1; return 0; } } dev->ctl_urb_pending = 0; usb_autopm_put_interface(dev->intf); dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); return error; } static void cm109_input_close(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); scoped_guard(mutex, &dev->pm_mutex) { /* * Once we are here event delivery is stopped so we * don't need to worry about someone starting buzzer * again */ cm109_stop_traffic(dev); dev->open = 0; } usb_autopm_put_interface(dev->intf); } static int cm109_input_ev(struct input_dev *idev, unsigned int type, unsigned int code, int value) { struct cm109_dev *dev = input_get_drvdata(idev); dev_dbg(&dev->intf->dev, "input_ev: type=%u code=%u value=%d\n", type, code, value); if (type != EV_SND) return -EINVAL; switch (code) { case SND_TONE: case SND_BELL: dev->buzzer_state = !!value; if (!dev->resetting) cm109_toggle_buzzer_async(dev); return 0; default: return -EINVAL; } } /****************************************************************************** * Linux interface and usb initialisation *****************************************************************************/ struct driver_info { char *name; }; static const struct driver_info info_cm109 = { .name = "CM109 USB driver", }; enum { VENDOR_ID = 0x0d8c, /* C-Media Electronics */ PRODUCT_ID_CM109 = 0x000e, /* CM109 defines range 0x0008 - 0x000f */ }; /* table of devices that work with this driver */ static const struct usb_device_id cm109_usb_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = VENDOR_ID, .idProduct = PRODUCT_ID_CM109, .bInterfaceClass = USB_CLASS_HID, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t) &info_cm109 }, /* you can add more devices here with product ID 0x0008 - 0x000f */ { } }; static void cm109_usb_cleanup(struct cm109_dev *dev) { kfree(dev->ctl_req); usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma); usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma); usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */ usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */ kfree(dev); } static void cm109_usb_disconnect(struct usb_interface *interface) { struct cm109_dev *dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); input_unregister_device(dev->idev); cm109_usb_cleanup(dev); } static int cm109_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct driver_info *nfo = (struct driver_info *)id->driver_info; struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct cm109_dev *dev; struct input_dev *input_dev = NULL; int ret, pipe, i; int error = -ENOMEM; interface = intf->cur_altsetting; if (interface->desc.bNumEndpoints < 1) return -ENODEV; endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->ctl_submit_lock); mutex_init(&dev->pm_mutex); dev->udev = udev; dev->intf = intf; dev->idev = input_dev = input_allocate_device(); if (!input_dev) goto err_out; /* allocate usb buffers */ dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_KERNEL, &dev->irq_dma); if (!dev->irq_data) goto err_out; dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_KERNEL, &dev->ctl_dma); if (!dev->ctl_data) goto err_out; dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL); if (!dev->ctl_req) goto err_out; /* allocate urb structures */ dev->urb_irq = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_irq) goto err_out; dev->urb_ctl = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_ctl) goto err_out; /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); ret = usb_maxpacket(udev, pipe); if (ret != USB_PKT_LEN) dev_err(&intf->dev, "invalid payload size %d, expected %d\n", ret, USB_PKT_LEN); /* initialise irq urb */ usb_fill_int_urb(dev->urb_irq, udev, pipe, dev->irq_data, USB_PKT_LEN, cm109_urb_irq_callback, dev, endpoint->bInterval); dev->urb_irq->transfer_dma = dev->irq_dma; dev->urb_irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->urb_irq->dev = udev; /* initialise ctl urb */ dev->ctl_req->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT; dev->ctl_req->bRequest = USB_REQ_SET_CONFIGURATION; dev->ctl_req->wValue = cpu_to_le16(0x200); dev->ctl_req->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber); dev->ctl_req->wLength = cpu_to_le16(USB_PKT_LEN); usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0), (void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN, cm109_urb_ctl_callback, dev); dev->urb_ctl->transfer_dma = dev->ctl_dma; dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->urb_ctl->dev = udev; /* find out the physical bus location */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); /* register settings for the input device */ input_dev->name = nfo->name; input_dev->phys = dev->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, dev); input_dev->open = cm109_input_open; input_dev->close = cm109_input_close; input_dev->event = cm109_input_ev; input_dev->keycode = dev->keymap; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(dev->keymap); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); /* register available key events */ for (i = 0; i < KEYMAP_SIZE; i++) { unsigned short k = keymap(i); dev->keymap[i] = k; __set_bit(k, input_dev->keybit); } __clear_bit(KEY_RESERVED, input_dev->keybit); error = input_register_device(dev->idev); if (error) goto err_out; usb_set_intfdata(intf, dev); return 0; err_out: input_free_device(input_dev); cm109_usb_cleanup(dev); return error; } static int cm109_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct cm109_dev *dev = usb_get_intfdata(intf); dev_info(&intf->dev, "cm109: usb_suspend (event=%d)\n", message.event); guard(mutex)(&dev->pm_mutex); cm109_stop_traffic(dev); return 0; } static int cm109_usb_resume(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); dev_info(&intf->dev, "cm109: usb_resume\n"); guard(mutex)(&dev->pm_mutex); cm109_restore_state(dev); return 0; } static int cm109_usb_pre_reset(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); mutex_lock(&dev->pm_mutex); /* * Make sure input events don't try to toggle buzzer * while we are resetting */ dev->resetting = 1; smp_wmb(); cm109_stop_traffic(dev); return 0; } static int cm109_usb_post_reset(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); dev->resetting = 0; smp_wmb(); cm109_restore_state(dev); mutex_unlock(&dev->pm_mutex); return 0; } static struct usb_driver cm109_driver = { .name = "cm109", .probe = cm109_usb_probe, .disconnect = cm109_usb_disconnect, .suspend = cm109_usb_suspend, .resume = cm109_usb_resume, .reset_resume = cm109_usb_resume, .pre_reset = cm109_usb_pre_reset, .post_reset = cm109_usb_post_reset, .id_table = cm109_usb_table, .supports_autosuspend = 1, }; static int __init cm109_select_keymap(void) { /* Load the phone keymap */ if (!strcasecmp(phone, "kip1000")) { keymap = keymap_kip1000; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Komunikate KIP1000 phone loaded\n"); } else if (!strcasecmp(phone, "gtalk")) { keymap = keymap_gtalk; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Genius G-talk phone loaded\n"); } else if (!strcasecmp(phone, "usbph01")) { keymap = keymap_usbph01; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Allied-Telesis Corega USBPH01 phone loaded\n"); } else if (!strcasecmp(phone, "atcom")) { keymap = keymap_atcom; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for ATCom AU-100 phone loaded\n"); } else { printk(KERN_ERR KBUILD_MODNAME ": " "Unsupported phone: %s\n", phone); return -EINVAL; } return 0; } static int __init cm109_init(void) { int err; err = cm109_select_keymap(); if (err) return err; err = usb_register(&cm109_driver); if (err) return err; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC ": " DRIVER_VERSION " (C) " DRIVER_AUTHOR "\n"); return 0; } static void __exit cm109_exit(void) { usb_deregister(&cm109_driver); } module_init(cm109_init); module_exit(cm109_exit); MODULE_DEVICE_TABLE(usb, cm109_usb_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
1 186 1 186 12 186 6 186 1 186 178 186 1 6 186 5 186 186 230 231 231 229 230 32 32 32 186 183 186 186 177 11 11 7 1 11 8 4 4 10 10 11 11 6 11 8 8 4 8 12 14 14 12 14 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 | // SPDX-License-Identifier: GPL-2.0 #include <linux/syscalls.h> #include <linux/export.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/statfs.h> #include <linux/security.h> #include <linux/uaccess.h> #include <linux/compat.h> #include "internal.h" static int flags_by_mnt(int mnt_flags) { int flags = 0; if (mnt_flags & MNT_READONLY) flags |= ST_RDONLY; if (mnt_flags & MNT_NOSUID) flags |= ST_NOSUID; if (mnt_flags & MNT_NODEV) flags |= ST_NODEV; if (mnt_flags & MNT_NOEXEC) flags |= ST_NOEXEC; if (mnt_flags & MNT_NOATIME) flags |= ST_NOATIME; if (mnt_flags & MNT_NODIRATIME) flags |= ST_NODIRATIME; if (mnt_flags & MNT_RELATIME) flags |= ST_RELATIME; if (mnt_flags & MNT_NOSYMFOLLOW) flags |= ST_NOSYMFOLLOW; return flags; } static int flags_by_sb(int s_flags) { int flags = 0; if (s_flags & SB_SYNCHRONOUS) flags |= ST_SYNCHRONOUS; if (s_flags & SB_MANDLOCK) flags |= ST_MANDLOCK; if (s_flags & SB_RDONLY) flags |= ST_RDONLY; return flags; } static int calculate_f_flags(struct vfsmount *mnt) { return ST_VALID | flags_by_mnt(mnt->mnt_flags) | flags_by_sb(mnt->mnt_sb->s_flags); } static int statfs_by_dentry(struct dentry *dentry, struct kstatfs *buf) { int retval; if (!dentry->d_sb->s_op->statfs) return -ENOSYS; memset(buf, 0, sizeof(*buf)); retval = security_sb_statfs(dentry); if (retval) return retval; retval = dentry->d_sb->s_op->statfs(dentry, buf); if (retval == 0 && buf->f_frsize == 0) buf->f_frsize = buf->f_bsize; return retval; } int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid) { struct kstatfs st; int error; error = statfs_by_dentry(dentry, &st); if (error) return error; *fsid = st.f_fsid; return 0; } EXPORT_SYMBOL(vfs_get_fsid); int vfs_statfs(const struct path *path, struct kstatfs *buf) { int error; error = statfs_by_dentry(path->dentry, buf); if (!error) buf->f_flags = calculate_f_flags(path->mnt); return error; } EXPORT_SYMBOL(vfs_statfs); int user_statfs(const char __user *pathname, struct kstatfs *st) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (!error) { error = vfs_statfs(&path, st); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; } int fd_statfs(int fd, struct kstatfs *st) { CLASS(fd_raw, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_statfs(&fd_file(f)->f_path, st); } static int do_statfs_native(struct kstatfs *st, struct statfs __user *p) { struct statfs buf; if (sizeof(buf) == sizeof(*st)) memcpy(&buf, st, sizeof(*st)); else { memset(&buf, 0, sizeof(buf)); if (sizeof buf.f_blocks == 4) { if ((st->f_blocks | st->f_bfree | st->f_bavail | st->f_bsize | st->f_frsize) & 0xffffffff00000000ULL) return -EOVERFLOW; /* * f_files and f_ffree may be -1; it's okay to stuff * that into 32 bits */ if (st->f_files != -1 && (st->f_files & 0xffffffff00000000ULL)) return -EOVERFLOW; if (st->f_ffree != -1 && (st->f_ffree & 0xffffffff00000000ULL)) return -EOVERFLOW; } buf.f_type = st->f_type; buf.f_bsize = st->f_bsize; buf.f_blocks = st->f_blocks; buf.f_bfree = st->f_bfree; buf.f_bavail = st->f_bavail; buf.f_files = st->f_files; buf.f_ffree = st->f_ffree; buf.f_fsid = st->f_fsid; buf.f_namelen = st->f_namelen; buf.f_frsize = st->f_frsize; buf.f_flags = st->f_flags; } if (copy_to_user(p, &buf, sizeof(buf))) return -EFAULT; return 0; } static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p) { struct statfs64 buf; if (sizeof(buf) == sizeof(*st)) memcpy(&buf, st, sizeof(*st)); else { memset(&buf, 0, sizeof(buf)); buf.f_type = st->f_type; buf.f_bsize = st->f_bsize; buf.f_blocks = st->f_blocks; buf.f_bfree = st->f_bfree; buf.f_bavail = st->f_bavail; buf.f_files = st->f_files; buf.f_ffree = st->f_ffree; buf.f_fsid = st->f_fsid; buf.f_namelen = st->f_namelen; buf.f_frsize = st->f_frsize; buf.f_flags = st->f_flags; } if (copy_to_user(p, &buf, sizeof(buf))) return -EFAULT; return 0; } SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct statfs __user *, buf) { struct kstatfs st; int error = user_statfs(pathname, &st); if (!error) error = do_statfs_native(&st, buf); return error; } SYSCALL_DEFINE3(statfs64, const char __user *, pathname, size_t, sz, struct statfs64 __user *, buf) { struct kstatfs st; int error; if (sz != sizeof(*buf)) return -EINVAL; error = user_statfs(pathname, &st); if (!error) error = do_statfs64(&st, buf); return error; } SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct statfs __user *, buf) { struct kstatfs st; int error = fd_statfs(fd, &st); if (!error) error = do_statfs_native(&st, buf); return error; } SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user *, buf) { struct kstatfs st; int error; if (sz != sizeof(*buf)) return -EINVAL; error = fd_statfs(fd, &st); if (!error) error = do_statfs64(&st, buf); return error; } static int vfs_ustat(dev_t dev, struct kstatfs *sbuf) { struct super_block *s = user_get_super(dev, false); int err; if (!s) return -EINVAL; err = statfs_by_dentry(s->s_root, sbuf); drop_super(s); return err; } SYSCALL_DEFINE2(ustat, unsigned, dev, struct ustat __user *, ubuf) { struct ustat tmp; struct kstatfs sbuf; int err = vfs_ustat(new_decode_dev(dev), &sbuf); if (err) return err; memset(&tmp,0,sizeof(struct ustat)); tmp.f_tfree = sbuf.f_bfree; if (IS_ENABLED(CONFIG_ARCH_32BIT_USTAT_F_TINODE)) tmp.f_tinode = min_t(u64, sbuf.f_ffree, UINT_MAX); else tmp.f_tinode = sbuf.f_ffree; return copy_to_user(ubuf, &tmp, sizeof(struct ustat)) ? -EFAULT : 0; } #ifdef CONFIG_COMPAT static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *kbuf) { struct compat_statfs buf; if (sizeof ubuf->f_blocks == 4) { if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail | kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL) return -EOVERFLOW; /* f_files and f_ffree may be -1; it's okay * to stuff that into 32 bits */ if (kbuf->f_files != 0xffffffffffffffffULL && (kbuf->f_files & 0xffffffff00000000ULL)) return -EOVERFLOW; if (kbuf->f_ffree != 0xffffffffffffffffULL && (kbuf->f_ffree & 0xffffffff00000000ULL)) return -EOVERFLOW; } memset(&buf, 0, sizeof(struct compat_statfs)); buf.f_type = kbuf->f_type; buf.f_bsize = kbuf->f_bsize; buf.f_blocks = kbuf->f_blocks; buf.f_bfree = kbuf->f_bfree; buf.f_bavail = kbuf->f_bavail; buf.f_files = kbuf->f_files; buf.f_ffree = kbuf->f_ffree; buf.f_namelen = kbuf->f_namelen; buf.f_fsid.val[0] = kbuf->f_fsid.val[0]; buf.f_fsid.val[1] = kbuf->f_fsid.val[1]; buf.f_frsize = kbuf->f_frsize; buf.f_flags = kbuf->f_flags; if (copy_to_user(ubuf, &buf, sizeof(struct compat_statfs))) return -EFAULT; return 0; } /* * The following statfs calls are copies of code from fs/statfs.c and * should be checked against those from time to time */ COMPAT_SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct compat_statfs __user *, buf) { struct kstatfs tmp; int error = user_statfs(pathname, &tmp); if (!error) error = put_compat_statfs(buf, &tmp); return error; } COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *, buf) { struct kstatfs tmp; int error = fd_statfs(fd, &tmp); if (!error) error = put_compat_statfs(buf, &tmp); return error; } static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf) { struct compat_statfs64 buf; if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL) return -EOVERFLOW; memset(&buf, 0, sizeof(struct compat_statfs64)); buf.f_type = kbuf->f_type; buf.f_bsize = kbuf->f_bsize; buf.f_blocks = kbuf->f_blocks; buf.f_bfree = kbuf->f_bfree; buf.f_bavail = kbuf->f_bavail; buf.f_files = kbuf->f_files; buf.f_ffree = kbuf->f_ffree; buf.f_namelen = kbuf->f_namelen; buf.f_fsid.val[0] = kbuf->f_fsid.val[0]; buf.f_fsid.val[1] = kbuf->f_fsid.val[1]; buf.f_frsize = kbuf->f_frsize; buf.f_flags = kbuf->f_flags; if (copy_to_user(ubuf, &buf, sizeof(struct compat_statfs64))) return -EFAULT; return 0; } int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, struct compat_statfs64 __user * buf) { struct kstatfs tmp; int error; if (sz != sizeof(*buf)) return -EINVAL; error = user_statfs(pathname, &tmp); if (!error) error = put_compat_statfs64(buf, &tmp); return error; } COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf) { return kcompat_sys_statfs64(pathname, sz, buf); } int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf) { struct kstatfs tmp; int error; if (sz != sizeof(*buf)) return -EINVAL; error = fd_statfs(fd, &tmp); if (!error) error = put_compat_statfs64(buf, &tmp); return error; } COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf) { return kcompat_sys_fstatfs64(fd, sz, buf); } /* * This is a copy of sys_ustat, just dealing with a structure layout. * Given how simple this syscall is that apporach is more maintainable * than the various conversion hacks. */ COMPAT_SYSCALL_DEFINE2(ustat, unsigned, dev, struct compat_ustat __user *, u) { struct compat_ustat tmp; struct kstatfs sbuf; int err = vfs_ustat(new_decode_dev(dev), &sbuf); if (err) return err; memset(&tmp, 0, sizeof(struct compat_ustat)); tmp.f_tfree = sbuf.f_bfree; tmp.f_tinode = sbuf.f_ffree; if (copy_to_user(u, &tmp, sizeof(struct compat_ustat))) return -EFAULT; return 0; } #endif |
98 98 130 130 56 98 130 98 56 56 98 97 98 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_IRQ_H #define _LINUX_IRQ_H /* * Please do not include this file in generic code. There is currently * no requirement for any architecture to implement anything held * within this file. * * Thanks. --rmk */ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/cpumask.h> #include <linux/irqhandler.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/topology.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/irq.h> #include <asm/ptrace.h> #include <asm/irq_regs.h> struct seq_file; struct module; struct msi_msg; struct irq_affinity_desc; enum irqchip_irq_state; /* * IRQ line status. * * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h * * IRQ_TYPE_NONE - default, unspecified type * IRQ_TYPE_EDGE_RISING - rising edge triggered * IRQ_TYPE_EDGE_FALLING - falling edge triggered * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered * IRQ_TYPE_LEVEL_HIGH - high level triggered * IRQ_TYPE_LEVEL_LOW - low level triggered * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits * IRQ_TYPE_SENSE_MASK - Mask for all the above bits * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type * to setup the HW to a sane default (used * by irqdomain map() callbacks to synchronize * the HW state and SW flags for a newly * allocated descriptor). * * IRQ_TYPE_PROBE - Special flag for probing in progress * * Bits which can be modified via irq_set/clear/modify_status_flags() * IRQ_LEVEL - Interrupt is level type. Will be also * updated in the code when the above trigger * bits are modified via irq_set_irq_type() * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect * it from affinity setting * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing * IRQ_NOREQUEST - Interrupt cannot be requested via * request_irq() * IRQ_NOTHREAD - Interrupt cannot be threaded * IRQ_NOAUTOEN - Interrupt is not automatically enabled in * request/setup_irq() * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) * IRQ_NESTED_THREAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. * IRQ_DISABLE_UNLAZY - Disable lazy irq disable * IRQ_HIDDEN - Don't show up in /proc/interrupts * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging */ enum { IRQ_TYPE_NONE = 0x00000000, IRQ_TYPE_EDGE_RISING = 0x00000001, IRQ_TYPE_EDGE_FALLING = 0x00000002, IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), IRQ_TYPE_LEVEL_HIGH = 0x00000004, IRQ_TYPE_LEVEL_LOW = 0x00000008, IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), IRQ_TYPE_SENSE_MASK = 0x0000000f, IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, IRQ_TYPE_PROBE = 0x00000010, IRQ_LEVEL = (1 << 8), IRQ_PER_CPU = (1 << 9), IRQ_NOPROBE = (1 << 10), IRQ_NOREQUEST = (1 << 11), IRQ_NOAUTOEN = (1 << 12), IRQ_NO_BALANCING = (1 << 13), IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), IRQ_HIDDEN = (1 << 20), IRQ_NO_DEBUG = (1 << 21), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) /* * Return value for chip->irq_set_affinity() * * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity * IRQ_SET_MASK_NOCOPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendant irqchips. */ enum { IRQ_SET_MASK_OK = 0, IRQ_SET_MASK_OK_NOCOPY, IRQ_SET_MASK_OK_DONE, }; struct msi_desc; struct irq_domain; /** * struct irq_common_data - per irq data shared by all irqchips * @state_use_accessors: status information for irq chip functions. * Use accessor functions to deal with it * @node: node index useful for balancing * @handler_data: per-IRQ data for the irq_chip methods * @affinity: IRQ affinity on SMP. If this is an IPI * related irq, then this is the mask of the * CPUs to which an IPI can be sent. * @effective_affinity: The effective IRQ affinity on SMP as some irq * chips do not allow multi CPU destinations. * A subset of @affinity. * @msi_desc: MSI descriptor * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. */ struct irq_common_data { unsigned int __private state_use_accessors; #ifdef CONFIG_NUMA unsigned int node; #endif void *handler_data; struct msi_desc *msi_desc; #ifdef CONFIG_SMP cpumask_var_t affinity; #endif #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK cpumask_var_t effective_affinity; #endif #ifdef CONFIG_GENERIC_IRQ_IPI unsigned int ipi_offset; #endif }; /** * struct irq_data - per irq chip data passed down to chip functions * @mask: precomputed bitmask for accessing the chip registers * @irq: interrupt number * @hwirq: hardware interrupt number, local to the interrupt domain * @common: point to data shared by all irqchips * @chip: low level interrupt hardware access * @domain: Interrupt translation domain; responsible for mapping * between hwirq number and linux irq number. * @parent_data: pointer to parent struct irq_data to support hierarchy * irq_domain * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations */ struct irq_data { u32 mask; unsigned int irq; irq_hw_number_t hwirq; struct irq_common_data *common; struct irq_chip *chip; struct irq_domain *domain; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_data *parent_data; #endif void *chip_data; }; /* * Bit masks for irq_common_data.state_use_accessors * * IRQD_TRIGGER_MASK - Mask for the trigger type bits * IRQD_SETAFFINITY_PENDING - Affinity setting is pending * IRQD_ACTIVATED - Interrupt has already been activated * IRQD_NO_BALANCING - Balancing disabled for this IRQ * IRQD_PER_CPU - Interrupt is per cpu * IRQD_AFFINITY_SET - Interrupt affinity was set * IRQD_LEVEL - Interrupt is level triggered * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup * from suspend * IRQD_IRQ_DISABLED - Disabled state of the interrupt * IRQD_IRQ_MASKED - Masked state of the interrupt * IRQD_IRQ_INPROGRESS - In progress state of the interrupt * IRQD_WAKEUP_ARMED - Wakeup mode armed * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel * IRQD_IRQ_STARTED - Startup state of the interrupt * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity * mask. Applies only to affinity managed irqs. * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked * from actual interrupt context. * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call * irq_chip::irq_set_affinity() when deactivated. * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which * case it must be resent at the next available opportunity. */ enum { IRQD_TRIGGER_MASK = 0xf, IRQD_SETAFFINITY_PENDING = BIT(8), IRQD_ACTIVATED = BIT(9), IRQD_NO_BALANCING = BIT(10), IRQD_PER_CPU = BIT(11), IRQD_AFFINITY_SET = BIT(12), IRQD_LEVEL = BIT(13), IRQD_WAKEUP_STATE = BIT(14), IRQD_IRQ_DISABLED = BIT(16), IRQD_IRQ_MASKED = BIT(17), IRQD_IRQ_INPROGRESS = BIT(18), IRQD_WAKEUP_ARMED = BIT(19), IRQD_FORWARDED_TO_VCPU = BIT(20), IRQD_AFFINITY_MANAGED = BIT(21), IRQD_IRQ_STARTED = BIT(22), IRQD_MANAGED_SHUTDOWN = BIT(23), IRQD_SINGLE_TARGET = BIT(24), IRQD_DEFAULT_TRIGGER_SET = BIT(25), IRQD_CAN_RESERVE = BIT(26), IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27), IRQD_AFFINITY_ON_ACTIVATE = BIT(28), IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29), IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) static inline bool irqd_is_setaffinity_pending(struct irq_data *d) { return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING; } static inline bool irqd_is_per_cpu(struct irq_data *d) { return __irqd_to_state(d) & IRQD_PER_CPU; } static inline bool irqd_can_balance(struct irq_data *d) { return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING)); } static inline bool irqd_affinity_was_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_AFFINITY_SET; } static inline void irqd_mark_affinity_was_set(struct irq_data *d) { __irqd_to_state(d) |= IRQD_AFFINITY_SET; } static inline bool irqd_trigger_type_was_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET; } static inline u32 irqd_get_trigger_type(struct irq_data *d) { return __irqd_to_state(d) & IRQD_TRIGGER_MASK; } /* * Must only be called inside irq_chip.irq_set_type() functions or * from the DT/ACPI setup code. */ static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) { __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET; } static inline bool irqd_is_level_type(struct irq_data *d) { return __irqd_to_state(d) & IRQD_LEVEL; } /* * Must only be called of irqchip.irq_set_affinity() or low level * hierarchy domain allocation functions. */ static inline void irqd_set_single_target(struct irq_data *d) { __irqd_to_state(d) |= IRQD_SINGLE_TARGET; } static inline bool irqd_is_single_target(struct irq_data *d) { return __irqd_to_state(d) & IRQD_SINGLE_TARGET; } static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) { __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; } static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) { return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; } static inline bool irqd_is_enabled_on_suspend(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND; } static inline bool irqd_is_wakeup_set(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_STATE; } static inline bool irqd_irq_disabled(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_DISABLED; } static inline bool irqd_irq_masked(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_MASKED; } static inline bool irqd_irq_inprogress(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS; } static inline bool irqd_is_wakeup_armed(struct irq_data *d) { return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; } static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) { return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; } static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) { __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; } static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; } static inline bool irqd_affinity_is_managed(struct irq_data *d) { return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; } static inline bool irqd_is_activated(struct irq_data *d) { return __irqd_to_state(d) & IRQD_ACTIVATED; } static inline void irqd_set_activated(struct irq_data *d) { __irqd_to_state(d) |= IRQD_ACTIVATED; } static inline void irqd_clr_activated(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_ACTIVATED; } static inline bool irqd_is_started(struct irq_data *d) { return __irqd_to_state(d) & IRQD_IRQ_STARTED; } static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) { return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; } static inline void irqd_set_can_reserve(struct irq_data *d) { __irqd_to_state(d) |= IRQD_CAN_RESERVE; } static inline void irqd_clr_can_reserve(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; } static inline bool irqd_can_reserve(struct irq_data *d) { return __irqd_to_state(d) & IRQD_CAN_RESERVE; } static inline void irqd_set_affinity_on_activate(struct irq_data *d) { __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; } static inline bool irqd_affinity_on_activate(struct irq_data *d) { return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; } static inline void irqd_set_resend_when_in_progress(struct irq_data *d) { __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS; } static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d) { return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS; } #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) { return d->hwirq; } /** * struct irq_chip - hardware interrupt chip descriptor * * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) * @irq_disable: disable the interrupt * @irq_ack: start of a new interrupt * @irq_mask: mask an interrupt source * @irq_mask_ack: ack and mask an interrupt source * @irq_unmask: unmask an interrupt source * @irq_eoi: end of interrupt * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force * argument is true, it tells the driver to * unconditionally apply the affinity setting. Sanity * checks against the supplied affinity mask are not * required. This is used for CPU hotplug where the * target CPU is not yet set in the cpu_online_mask. * @irq_retrigger: resend an IRQ to the CPU * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ * @irq_set_wake: enable/disable power-management wake-on of an IRQ * @irq_bus_lock: function to lock access to slow bus (i2c) chips * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips * @irq_cpu_online: configure an interrupt source for a secondary CPU * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU * @irq_suspend: function called from core code on suspend once per * chip, when one or more interrupts are installed * @irq_resume: function called from core code on resume once per chip, * when one ore more interrupts are installed * @irq_pm_shutdown: function called from core code on shutdown once per chip * @irq_calc_mask: Optional function to set irq_data.mask for special cases * @irq_print_chip: optional to print special chip info in show_interrupts * @irq_request_resources: optional to request resources before calling * any other callback related to this irq * @irq_release_resources: optional to release resources acquired with * irq_request_resources * @irq_compose_msi_msg: optional to compose message content for MSI * @irq_write_msi_msg: optional to write message content for MSI * @irq_get_irqchip_state: return the internal state of an interrupt * @irq_set_irqchip_state: set the internal state of a interrupt * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine * @ipi_send_single: send a single IPI to destination cpus * @ipi_send_mask: send an IPI to destination cpus in cpumask * @irq_nmi_setup: function called from core code before enabling an NMI * @irq_nmi_teardown: function called from core code after disabling an NMI * @irq_force_complete_move: optional function to force complete pending irq move * @flags: chip specific flags */ struct irq_chip { const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); void (*irq_enable)(struct irq_data *data); void (*irq_disable)(struct irq_data *data); void (*irq_ack)(struct irq_data *data); void (*irq_mask)(struct irq_data *data); void (*irq_mask_ack)(struct irq_data *data); void (*irq_unmask)(struct irq_data *data); void (*irq_eoi)(struct irq_data *data); int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); int (*irq_retrigger)(struct irq_data *data); int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); int (*irq_set_wake)(struct irq_data *data, unsigned int on); void (*irq_bus_lock)(struct irq_data *data); void (*irq_bus_sync_unlock)(struct irq_data *data); #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE void (*irq_cpu_online)(struct irq_data *data); void (*irq_cpu_offline)(struct irq_data *data); #endif void (*irq_suspend)(struct irq_data *data); void (*irq_resume)(struct irq_data *data); void (*irq_pm_shutdown)(struct irq_data *data); void (*irq_calc_mask)(struct irq_data *data); void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); int (*irq_request_resources)(struct irq_data *data); void (*irq_release_resources)(struct irq_data *data); void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); int (*irq_nmi_setup)(struct irq_data *data); void (*irq_nmi_teardown)(struct irq_data *data); void (*irq_force_complete_move)(struct irq_data *data); unsigned long flags; }; /* * irq_chip specific flags * * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks * when irq enabled * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs * in the suspend path if they are in disabled state * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip * IRQCHIP_MOVE_DEFERRED: Move the interrupt in actual interrupt context */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), IRQCHIP_EOI_IF_HANDLED = (1 << 1), IRQCHIP_MASK_ON_SUSPEND = (1 << 2), IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), IRQCHIP_SKIP_SET_WAKE = (1 << 4), IRQCHIP_ONESHOT_SAFE = (1 << 5), IRQCHIP_EOI_THREADED = (1 << 6), IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), IRQCHIP_IMMUTABLE = (1 << 11), IRQCHIP_MOVE_DEFERRED = (1 << 12), }; #include <linux/irqdesc.h> /* * Pick up the arch-dependent methods: */ #include <asm/hw_irq.h> #ifndef NR_IRQS_LEGACY # define NR_IRQS_LEGACY 0 #endif #ifndef ARCH_IRQ_INIT_FLAGS # define ARCH_IRQ_INIT_FLAGS 0 #endif #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS struct irqaction; extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE extern void irq_cpu_online(void); extern void irq_cpu_offline(void); #endif extern int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask, bool force); extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION) extern void irq_migrate_all_off_this_cpu(void); extern int irq_affinity_online_cpu(unsigned int cpu); #else # define irq_affinity_online_cpu NULL #endif #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) bool irq_can_move_in_process_context(struct irq_data *data); void __irq_move_irq(struct irq_data *data); static inline void irq_move_irq(struct irq_data *data) { if (unlikely(irqd_is_setaffinity_pending(data))) __irq_move_irq(data); } void irq_move_masked_irq(struct irq_data *data); #else static inline bool irq_can_move_in_process_context(struct irq_data *data) { return true; } static inline void irq_move_irq(struct irq_data *data) { } static inline void irq_move_masked_irq(struct irq_data *data) { } #endif extern int no_irq_affinity; #ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq); #else static inline int irq_set_parent(int irq, int parent_irq) { return 0; } #endif /* * Built-in IRQ handlers for various IRQ types, * callable via desc->handle_irq() */ extern void handle_level_irq(struct irq_desc *desc); extern void handle_fasteoi_irq(struct irq_desc *desc); extern void handle_edge_irq(struct irq_desc *desc); extern void handle_edge_eoi_irq(struct irq_desc *desc); extern void handle_simple_irq(struct irq_desc *desc); extern void handle_untracked_irq(struct irq_desc *desc); extern void handle_percpu_irq(struct irq_desc *desc); extern void handle_percpu_devid_irq(struct irq_desc *desc); extern void handle_bad_irq(struct irq_desc *desc); extern void handle_nested_irq(unsigned int irq); extern void handle_fasteoi_nmi(struct irq_desc *desc); extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); extern int irq_chip_pm_get(struct irq_data *data); extern int irq_chip_pm_put(struct irq_data *data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern void handle_fasteoi_ack_irq(struct irq_desc *desc); extern void handle_fasteoi_mask_irq(struct irq_desc *desc); extern int irq_chip_set_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool val); extern int irq_chip_get_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool *state); extern void irq_chip_enable_parent(struct irq_data *data); extern void irq_chip_disable_parent(struct irq_data *data); extern void irq_chip_ack_parent(struct irq_data *data); extern int irq_chip_retrigger_hierarchy(struct irq_data *data); extern void irq_chip_mask_parent(struct irq_data *data); extern void irq_chip_mask_ack_parent(struct irq_data *data); extern void irq_chip_unmask_parent(struct irq_data *data); extern void irq_chip_eoi_parent(struct irq_data *data); extern int irq_chip_set_affinity_parent(struct irq_data *data, const struct cpumask *dest, bool force); extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info); extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); extern int irq_chip_request_resources_parent(struct irq_data *data); extern void irq_chip_release_resources_parent(struct irq_data *data); #endif /* Disable or mask interrupts during a kernel kexec */ extern void machine_kexec_mask_interrupts(void); /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); /* Enable/disable irq debugging output: */ extern int noirqdebug_setup(char *str); /* Checks whether the interrupt can be requested by request_irq(): */ extern bool can_request_irq(unsigned int irq, unsigned long irqflags); /* Dummy irq-chip implementations: */ extern struct irq_chip no_irq_chip; extern struct irq_chip dummy_irq_chip; extern void irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name); static inline void irq_set_chip_and_handler(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle) { irq_set_chip_and_handler_name(irq, chip, handle, NULL); } extern int irq_set_percpu_devid(unsigned int irq); extern int irq_set_percpu_devid_partition(unsigned int irq, const struct cpumask *affinity); extern int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity); extern void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name); static inline void irq_set_handler(unsigned int irq, irq_flow_handler_t handle) { __irq_set_handler(irq, handle, 0, NULL); } /* * Set a highlevel chained flow handler for a given IRQ. * (a chained handler is automatically enabled and set to * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) */ static inline void irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) { __irq_set_handler(irq, handle, 1, NULL); } /* * Set a highlevel chained flow handler and its data for a given IRQ. * (a chained handler is automatically enabled and set to * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) */ void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, void *data); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); static inline void irq_set_status_flags(unsigned int irq, unsigned long set) { irq_modify_status(irq, 0, set); } static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) { irq_modify_status(irq, clr, 0); } static inline void irq_set_noprobe(unsigned int irq) { irq_modify_status(irq, 0, IRQ_NOPROBE); } static inline void irq_set_probe(unsigned int irq) { irq_modify_status(irq, IRQ_NOPROBE, 0); } static inline void irq_set_nothread(unsigned int irq) { irq_modify_status(irq, 0, IRQ_NOTHREAD); } static inline void irq_set_thread(unsigned int irq) { irq_modify_status(irq, IRQ_NOTHREAD, 0); } static inline void irq_set_nested_thread(unsigned int irq, bool nest) { if (nest) irq_set_status_flags(irq, IRQ_NESTED_THREAD); else irq_clear_status_flags(irq, IRQ_NESTED_THREAD); } static inline void irq_set_percpu_devid_flags(unsigned int irq) { irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); } /* Set/get chip/data for an IRQ: */ extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip); extern int irq_set_handler_data(unsigned int irq, void *data); extern int irq_set_chip_data(unsigned int irq, void *data); extern int irq_set_irq_type(unsigned int irq, unsigned int type); extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry); extern struct irq_data *irq_get_irq_data(unsigned int irq); static inline struct irq_chip *irq_get_chip(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->chip : NULL; } static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) { return d->chip; } static inline void *irq_get_chip_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->chip_data : NULL; } static inline void *irq_data_get_irq_chip_data(struct irq_data *d) { return d->chip_data; } static inline void *irq_get_handler_data(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->common->handler_data : NULL; } static inline void *irq_data_get_irq_handler_data(struct irq_data *d) { return d->common->handler_data; } static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? d->common->msi_desc : NULL; } static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) { return d->common->msi_desc; } static inline u32 irq_get_trigger_type(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? irqd_get_trigger_type(d) : 0; } static inline int irq_common_data_get_node(struct irq_common_data *d) { #ifdef CONFIG_NUMA return d->node; #else return 0; #endif } static inline int irq_data_get_node(struct irq_data *d) { return irq_common_data_get_node(d->common); } static inline const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) { #ifdef CONFIG_SMP return d->common->affinity; #else return cpumask_of(0); #endif } static inline void irq_data_update_affinity(struct irq_data *d, const struct cpumask *m) { #ifdef CONFIG_SMP cpumask_copy(d->common->affinity, m); #endif } static inline const struct cpumask *irq_get_affinity_mask(int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? irq_data_get_affinity_mask(d) : NULL; } #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK static inline const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return d->common->effective_affinity; } static inline void irq_data_update_effective_affinity(struct irq_data *d, const struct cpumask *m) { cpumask_copy(d->common->effective_affinity, m); } #else static inline void irq_data_update_effective_affinity(struct irq_data *d, const struct cpumask *m) { } static inline const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) { return irq_data_get_affinity_mask(d); } #endif static inline const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); return d ? irq_data_get_effective_affinity_mask(d) : NULL; } unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, const struct irq_affinity_desc *affinity); int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, const struct irq_affinity_desc *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) #define irq_alloc_desc(node) \ irq_alloc_descs(-1, 1, 1, node) #define irq_alloc_desc_at(at, node) \ irq_alloc_descs(at, at, 1, node) #define irq_alloc_desc_from(from, node) \ irq_alloc_descs(-1, from, 1, node) #define irq_alloc_descs_from(from, cnt, node) \ irq_alloc_descs(-1, from, cnt, node) #define devm_irq_alloc_descs(dev, irq, from, cnt, node) \ __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) #define devm_irq_alloc_desc(dev, node) \ devm_irq_alloc_descs(dev, -1, 1, 1, node) #define devm_irq_alloc_desc_at(dev, at, node) \ devm_irq_alloc_descs(dev, at, at, 1, node) #define devm_irq_alloc_desc_from(dev, from, node) \ devm_irq_alloc_descs(dev, -1, from, 1, node) #define devm_irq_alloc_descs_from(dev, from, cnt, node) \ devm_irq_alloc_descs(dev, -1, from, cnt, node) void irq_free_descs(unsigned int irq, unsigned int cnt); static inline void irq_free_desc(unsigned int irq) { irq_free_descs(irq, 1); } #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq); #endif /** * struct irq_chip_regs - register offsets for struct irq_gci * @enable: Enable register offset to reg_base * @disable: Disable register offset to reg_base * @mask: Mask register offset to reg_base * @ack: Ack register offset to reg_base * @eoi: Eoi register offset to reg_base * @type: Type configuration register offset to reg_base */ struct irq_chip_regs { unsigned long enable; unsigned long disable; unsigned long mask; unsigned long ack; unsigned long eoi; unsigned long type; }; /** * struct irq_chip_type - Generic interrupt chip instance for a flow type * @chip: The real interrupt chip which provides the callbacks * @regs: Register offsets for this chip * @handler: Flow handler associated with this chip * @type: Chip can handle these flow types * @mask_cache_priv: Cached mask register private to the chip type * @mask_cache: Pointer to cached mask register * * A irq_generic_chip can have several instances of irq_chip_type when * it requires different functions and register offsets for different * flow types. */ struct irq_chip_type { struct irq_chip chip; struct irq_chip_regs regs; irq_flow_handler_t handler; u32 type; u32 mask_cache_priv; u32 *mask_cache; }; /** * struct irq_chip_generic - Generic irq chip data structure * @lock: Lock to protect register and cache data access * @reg_base: Register base address (virtual) * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) * @suspend: Function called from core code on suspend once per * chip; can be useful instead of irq_chip::suspend to * handle chip details even when no interrupts are in use * @resume: Function called from core code on resume once per chip; * can be useful instead of irq_chip::suspend to handle * chip details even when no interrupts are in use * @irq_base: Interrupt base nr for this chip * @irq_cnt: Number of interrupts handled by this chip * @mask_cache: Cached mask register shared between all chip types * @wake_enabled: Interrupt can wakeup from suspend * @wake_active: Interrupt is marked as an wakeup from suspend source * @num_ct: Number of available irq_chip_type instances (usually 1) * @private: Private data for non generic chip callbacks * @installed: bitfield to denote installed interrupts * @unused: bitfield to denote unused interrupts * @domain: irq domain pointer * @list: List head for keeping track of instances * @chip_types: Array of interrupt irq_chip_types * * Note, that irq_chip_generic can have multiple irq_chip_type * implementations which can be associated to a particular irq line of * an irq_chip_generic instance. That allows to share and protect * state in an irq_chip_generic instance when we need to implement * different flow mechanisms (level/edge) for it. */ struct irq_chip_generic { raw_spinlock_t lock; void __iomem *reg_base; u32 (*reg_readl)(void __iomem *addr); void (*reg_writel)(u32 val, void __iomem *addr); void (*suspend)(struct irq_chip_generic *gc); void (*resume)(struct irq_chip_generic *gc); unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; void *private; unsigned long installed; unsigned long unused; struct irq_domain *domain; struct list_head list; struct irq_chip_type chip_types[]; }; /** * enum irq_gc_flags - Initialization flags for generic irq chips * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for * irq chips which need to call irq_set_wake() on * the parent irq. Usually GPIO implementations * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) */ enum irq_gc_flags { IRQ_GC_INIT_MASK_CACHE = 1 << 0, IRQ_GC_INIT_NESTED_LOCK = 1 << 1, IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, IRQ_GC_NO_MASK = 1 << 3, IRQ_GC_BE_IO = 1 << 4, }; /* * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains * @irqs_per_chip: Number of interrupts per chip * @num_chips: Number of chips * @irq_flags_to_set: IRQ* flags to set on irq setup * @irq_flags_to_clear: IRQ* flags to clear on irq setup * @gc_flags: Generic chip specific setup flags * @exit: Function called on each chip when they are destroyed. * @gc: Array of pointers to generic interrupt chips */ struct irq_domain_chip_generic { unsigned int irqs_per_chip; unsigned int num_chips; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; void (*exit)(struct irq_chip_generic *gc); struct irq_chip_generic *gc[]; }; /** * struct irq_domain_chip_generic_info - Generic chip information structure * @name: Name of the generic interrupt chip * @handler: Interrupt handler used by the generic interrupt chip * @irqs_per_chip: Number of interrupts each chip handles (max 32) * @num_ct: Number of irq_chip_type instances associated with each * chip * @irq_flags_to_clear: IRQ_* bits to clear in the mapping function * @irq_flags_to_set: IRQ_* bits to set in the mapping function * @gc_flags: Generic chip specific setup flags * @init: Function called on each chip when they are created. * Allow to do some additional chip initialisation. * @exit: Function called on each chip when they are destroyed. * Allow to do some chip cleanup operation. */ struct irq_domain_chip_generic_info { const char *name; irq_flow_handler_t handler; unsigned int irqs_per_chip; unsigned int num_ct; unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; int (*init)(struct irq_chip_generic *gc); void (*exit)(struct irq_chip_generic *gc); }; /* Generic chip callback functions */ void irq_gc_noop(struct irq_data *d); void irq_gc_mask_disable_reg(struct irq_data *d); void irq_gc_mask_set_bit(struct irq_data *d); void irq_gc_mask_clr_bit(struct irq_data *d); void irq_gc_unmask_enable_reg(struct irq_data *d); void irq_gc_ack_set_bit(struct irq_data *d); void irq_gc_ack_clr_bit(struct irq_data *d); void irq_gc_mask_disable_and_ack_set(struct irq_data *d); void irq_gc_eoi(struct irq_data *d); int irq_gc_set_wake(struct irq_data *d, unsigned int on); /* Setup functions for irq_chip_generic */ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw_irq); void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq); struct irq_chip_generic * irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, enum irq_gc_flags flags, unsigned int clr, unsigned int set); int irq_setup_alt_chip(struct irq_data *d, unsigned int type); void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, unsigned int clr, unsigned int set); struct irq_chip_generic * devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc, u32 msk, enum irq_gc_flags flags, unsigned int clr, unsigned int set); struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); #ifdef CONFIG_GENERIC_IRQ_CHIP int irq_domain_alloc_generic_chips(struct irq_domain *d, const struct irq_domain_chip_generic_info *info); void irq_domain_remove_generic_chips(struct irq_domain *d); #else static inline int irq_domain_alloc_generic_chips(struct irq_domain *d, const struct irq_domain_chip_generic_info *info) { return -EINVAL; } static inline void irq_domain_remove_generic_chips(struct irq_domain *d) { } #endif /* CONFIG_GENERIC_IRQ_CHIP */ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, int num_ct, const char *name, irq_flow_handler_t handler, unsigned int clr, unsigned int set, enum irq_gc_flags flags); #define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \ handler, clr, set, flags) \ ({ \ MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \ __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\ handler, clr, set, flags); \ }) static inline void irq_free_generic_chip(struct irq_chip_generic *gc) { kfree(gc); } static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc, u32 msk, unsigned int clr, unsigned int set) { irq_remove_generic_chip(gc, msk, clr, set); irq_free_generic_chip(gc); } static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) { return container_of(d->chip, struct irq_chip_type, chip); } #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) static inline void irq_reg_writel(struct irq_chip_generic *gc, u32 val, int reg_offset) { if (gc->reg_writel) gc->reg_writel(val, gc->reg_base + reg_offset); else writel(val, gc->reg_base + reg_offset); } static inline u32 irq_reg_readl(struct irq_chip_generic *gc, int reg_offset) { if (gc->reg_readl) return gc->reg_readl(gc->reg_base + reg_offset); else return readl(gc->reg_base + reg_offset); } struct irq_matrix; struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, unsigned int alloc_start, unsigned int alloc_end); void irq_matrix_online(struct irq_matrix *m); void irq_matrix_offline(struct irq_matrix *m); void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, unsigned int *mapped_cpu); void irq_matrix_reserve(struct irq_matrix *m); void irq_matrix_remove_reserved(struct irq_matrix *m); int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, bool reserved, unsigned int *mapped_cpu); void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, unsigned int bit, bool managed); void irq_matrix_assign(struct irq_matrix *m, unsigned int bit); unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown); unsigned int irq_matrix_allocated(struct irq_matrix *m); unsigned int irq_matrix_reserved(struct irq_matrix *m); void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind); /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ #define INVALID_HWIRQ (~0UL) irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); int __ipi_send_single(struct irq_desc *desc, unsigned int cpu); int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); int ipi_send_single(unsigned int virq, unsigned int cpu); int ipi_send_mask(unsigned int virq, const struct cpumask *dest); void ipi_mux_process(void); int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu)); #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER /* * Registers a generic IRQ handling function as the top-level IRQ handler in * the system, which is generally the first C code called from an assembly * architecture-specific interrupt handler. * * Returns 0 on success, or -EBUSY if an IRQ handler has already been * registered. */ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); /* * Allows interrupt handlers to find the irqchip that's been registered as the * top-level IRQ handler. */ extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; asmlinkage void generic_handle_arch_irq(struct pt_regs *regs); #else #ifndef set_handle_irq #define set_handle_irq(handle_irq) \ do { \ (void)handle_irq; \ WARN_ON(1); \ } while (0) #endif #endif #endif /* _LINUX_IRQ_H */ |
52 52 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 | // SPDX-License-Identifier: GPL-2.0-or-later /* mpihelp-mul_3.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_submul_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb) { mpi_limb_t cy_limb; mpi_size_t j; mpi_limb_t prod_high, prod_low; mpi_limb_t x; /* The loop counter and index J goes from -SIZE to -1. This way * the loop becomes faster. */ j = -s1_size; res_ptr -= j; s1_ptr -= j; cy_limb = 0; do { umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); prod_low += cy_limb; cy_limb = (prod_low < cy_limb ? 1 : 0) + prod_high; x = res_ptr[j]; prod_low = x - prod_low; cy_limb += prod_low > x ? 1 : 0; res_ptr[j] = prod_low; } while (++j); return cy_limb; } |
13 13 1 19 53 21 53 34 23 34 26 34 31 31 25 25 4 4 25 1 25 31 31 14 14 14 77 74 4 70 70 68 31 77 18 10 103 103 65 56 52 19 18 19 19 19 18 16 5 1 14 9 18 15 15 5 3 11 5 13 2 11 2 21 4 3 4 4 3 4 11 18 4 18 18 13 18 38 38 38 38 2 2 2 2 37 37 37 28 26 26 28 20 17 25 25 37 37 37 17 4 4 3 3 3 3 3 3 2 32 32 31 31 25 25 18 25 21 31 15 4 15 12 18 15 7 1 7 6 6 2 6 3 3 2 31 31 23 21 32 37 37 37 25 37 22 9 9 6 6 127 82 125 80 78 77 78 13 78 58 80 8 8 7 19 19 19 19 19 19 4 19 19 19 5 61 14 62 58 42 3 42 69 69 4 1 1 100 100 53 59 59 59 100 68 76 7 75 76 2 76 75 2 73 1 1 1 1 72 73 5 73 3 71 38 11 11 3 8 8 8 45 3 69 69 7 2 2 2 68 37 37 69 68 1 67 68 68 2 66 2 66 5 64 2 63 15 61 62 62 47 12 62 62 61 19 62 19 15 15 7 44 2 43 62 58 61 62 41 7 4 4 1 4 14 14 37 37 37 14 14 62 62 3 62 6 62 48 58 37 2 37 3 2 37 31 1 58 51 21 58 19 58 23 42 6 19 1 1 1 1 56 56 6 6 12 12 12 2 11 14 14 14 15 15 15 15 20 21 21 7 3 7 4 23 23 1 23 23 23 23 3 23 18 9 23 16 16 5 22 22 12 22 6 22 6 16 9 22 13 22 13 22 22 22 22 20 1 20 1 20 11 22 22 21 21 21 21 13 13 13 8 21 21 21 21 10 1 3 3 1 2 1 1 1 1 1 1 1 4 4 4 4 4 2 190 189 2 9 55 113 23 113 113 113 7 7 13 7 8 31 3 1 4 4 3 1596 1599 201 190 1597 113 55 27 44 25 12 44 13 13 3 1 10 12 40 13 13 12 44 16 44 2 44 2 1 44 16 44 4 27 27 25 6 6 6 1 6 21 16 27 168 167 168 168 128 217 216 167 217 217 168 217 217 217 166 168 168 217 217 2 2 1 1 1 1 15 15 1 1 15 4 3 10 9 9 9 6 1 1 1 9 1 2 1 1 9 2 52 18 18 52 15 15 52 88 87 88 21 21 4 71 46 88 5 5 5 5 5 64 55 64 1 1 1 1 1 1 1 1 1 1 10 3 7 7 1 7 10 5 4 4 5 13 13 13 1 1 13 13 14 4 4 4 1 1 13 13 14 14 13 39 24 39 24 6 34 34 34 34 34 6 6 6 1 29 20 20 1 1 20 15 15 29 34 6 14 35 35 35 1 14 14 14 1 14 14 14 14 13 14 14 10 10 10 10 10 10 2 10 10 10 8 10 8 2 2 2 2 2 2 2 10 10 1 1 1 1 1 47 36 36 32 28 28 32 32 28 47 1 8 8 8 8 8 8 7 7 8 7 8 9 9 9 72 9 6 9 9 55 55 55 55 55 55 55 54 54 55 72 67 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 | // SPDX-License-Identifier: GPL-1.0+ /* * originally based on the dummy device. * * Copyright 1999, Thomas Davis, tadavis@lbl.gov. * Based on dummy.c, and eql.c devices. * * bonding.c: an Ethernet Bonding driver * * This is useful to talk to a Cisco EtherChannel compatible equipment: * Cisco 5500 * Sun Trunking (Solaris) * Alteon AceDirector Trunks * Linux Bonding * and probably many L2 switches ... * * How it works: * ifconfig bond0 ipaddress netmask up * will setup a network device, with an ip address. No mac address * will be assigned at this time. The hw mac address will come from * the first slave bonded to the channel. All slaves will then use * this hw mac address. * * ifconfig bond0 down * will release all slaves, marking them as down. * * ifenslave bond0 eth0 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either * a: be used as initial mac address * b: if a hw mac address already is there, eth0's hw mac address * will then be set from bond0. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/filter.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/in.h> #include <net/ip.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/socket.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/bitops.h> #include <linux/io.h> #include <asm/dma.h> #include <linux/uaccess.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/rtnetlink.h> #include <linux/smp.h> #include <linux/if_ether.h> #include <net/arp.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/if_bonding.h> #include <linux/phy.h> #include <linux/jiffies.h> #include <linux/preempt.h> #include <net/route.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/pkt_sched.h> #include <linux/rculist.h> #include <net/flow_dissector.h> #include <net/xfrm.h> #include <net/bonding.h> #include <net/bond_3ad.h> #include <net/bond_alb.h> #if IS_ENABLED(CONFIG_TLS_DEVICE) #include <net/tls.h> #endif #include <net/ip6_route.h> #include <net/netdev_lock.h> #include <net/xdp.h> #include "bonding_priv.h" /*---------------------------- Module parameters ----------------------------*/ /* monitor all links that often (in milliseconds). <=0 disables monitoring */ static int max_bonds = BOND_DEFAULT_MAX_BONDS; static int tx_queues = BOND_DEFAULT_TX_QUEUES; static int num_peer_notif = 1; static int miimon; static int updelay; static int downdelay; static int use_carrier = 1; static char *mode; static char *primary; static char *primary_reselect; static char *lacp_rate; static int min_links; static char *ad_select; static char *xmit_hash_policy; static int arp_interval; static char *arp_ip_target[BOND_MAX_ARP_TARGETS]; static char *arp_validate; static char *arp_all_targets; static char *fail_over_mac; static int all_slaves_active; static struct bond_params bonding_defaults; static int resend_igmp = BOND_DEFAULT_RESEND_IGMP; static int packets_per_slave = 1; static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL; module_param(max_bonds, int, 0); MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); module_param(tx_queues, int, 0); MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); module_param_named(num_grat_arp, num_peer_notif, int, 0644); MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " "failover event (alias of num_unsol_na)"); module_param_named(num_unsol_na, num_peer_notif, int, 0644); MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " "failover event (alias of num_grat_arp)"); module_param(miimon, int, 0); MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); module_param(updelay, int, 0); MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds"); module_param(downdelay, int, 0); MODULE_PARM_DESC(downdelay, "Delay before considering link down, " "in milliseconds"); module_param(use_carrier, int, 0); MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " "0 for off, 1 for on (default)"); module_param(mode, charp, 0); MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " "1 for active-backup, 2 for balance-xor, " "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " "6 for balance-alb"); module_param(primary, charp, 0); MODULE_PARM_DESC(primary, "Primary network device to use"); module_param(primary_reselect, charp, 0); MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " "once it comes up; " "0 for always (default), " "1 for only if speed of primary is " "better, " "2 for only on active slave " "failure"); module_param(lacp_rate, charp, 0); MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " "0 for slow, 1 for fast"); module_param(ad_select, charp, 0); MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " "0 for stable (default), 1 for bandwidth, " "2 for count"); module_param(min_links, int, 0); MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); module_param(xmit_hash_policy, charp, 0); MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " "0 for layer 2 (default), 1 for layer 3+4, " "2 for layer 2+3, 3 for encap layer 2+3, " "4 for encap layer 3+4, 5 for vlan+srcmac"); module_param(arp_interval, int, 0); MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); module_param_array(arp_ip_target, charp, NULL, 0); MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); module_param(arp_validate, charp, 0); MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " "0 for none (default), 1 for active, " "2 for backup, 3 for all"); module_param(arp_all_targets, charp, 0); MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all"); module_param(fail_over_mac, charp, 0); MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " "the same MAC; 0 for none (default), " "1 for active, 2 for follow"); module_param(all_slaves_active, int, 0); MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " "by setting active flag for all slaves; " "0 for never (default), 1 for always."); module_param(resend_igmp, int, 0); MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " "link failure"); module_param(packets_per_slave, int, 0); MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr " "mode; 0 for a random slave, 1 packet per " "slave (default), >1 packets per slave."); module_param(lp_interval, uint, 0); MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where " "the bonding driver sends learning packets to " "each slaves peer switch. The default is 1."); /*----------------------------- Global variables ----------------------------*/ #ifdef CONFIG_NET_POLL_CONTROLLER atomic_t netpoll_block_tx = ATOMIC_INIT(0); #endif unsigned int bond_net_id __read_mostly; static const struct flow_dissector_key flow_keys_bonding_keys[] = { { .key_id = FLOW_DISSECTOR_KEY_CONTROL, .offset = offsetof(struct flow_keys, control), }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), }, { .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, .offset = offsetof(struct flow_keys, addrs.v4addrs), }, { .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, .offset = offsetof(struct flow_keys, addrs.v6addrs), }, { .key_id = FLOW_DISSECTOR_KEY_TIPC, .offset = offsetof(struct flow_keys, addrs.tipckey), }, { .key_id = FLOW_DISSECTOR_KEY_PORTS, .offset = offsetof(struct flow_keys, ports), }, { .key_id = FLOW_DISSECTOR_KEY_ICMP, .offset = offsetof(struct flow_keys, icmp), }, { .key_id = FLOW_DISSECTOR_KEY_VLAN, .offset = offsetof(struct flow_keys, vlan), }, { .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, .offset = offsetof(struct flow_keys, tags), }, { .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, .offset = offsetof(struct flow_keys, keyid), }, }; static struct flow_dissector flow_keys_bonding __read_mostly; /*-------------------------- Forward declarations ---------------------------*/ static int bond_init(struct net_device *bond_dev); static void bond_uninit(struct net_device *bond_dev); static void bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats); static void bond_slave_arr_handler(struct work_struct *work); static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, int mod); static void bond_netdev_notify_work(struct work_struct *work); /*---------------------------- General routines -----------------------------*/ const char *bond_mode_name(int mode) { static const char *names[] = { [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)", [BOND_MODE_XOR] = "load balancing (xor)", [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)", [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation", [BOND_MODE_TLB] = "transmit load balancing", [BOND_MODE_ALB] = "adaptive load balancing", }; if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB) return "unknown"; return names[mode]; } /** * bond_dev_queue_xmit - Prepare skb for xmit. * * @bond: bond device that got this skb for tx. * @skb: hw accel VLAN tagged skb to transmit * @slave_dev: slave that is supposed to xmit this skbuff */ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev) { skb->dev = slave_dev; BUILD_BUG_ON(sizeof(skb->queue_mapping) != sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); if (unlikely(netpoll_tx_running(bond->dev))) return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); return dev_queue_xmit(skb); } static bool bond_sk_check(struct bonding *bond) { switch (BOND_MODE(bond)) { case BOND_MODE_8023AD: case BOND_MODE_XOR: if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) return true; fallthrough; default: return false; } } bool bond_xdp_check(struct bonding *bond, int mode) { switch (mode) { case BOND_MODE_ROUNDROBIN: case BOND_MODE_ACTIVEBACKUP: return true; case BOND_MODE_8023AD: case BOND_MODE_XOR: /* vlan+srcmac is not supported with XDP as in most cases the 802.1q * payload is not in the packet due to hardware offload. */ if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) return true; fallthrough; default: return false; } } /*---------------------------------- VLAN -----------------------------------*/ /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, * b. The operation is protected by the RTNL semaphore in the 8021q code, * c. Holding a lock with BH disabled while directly calling a base driver * entry point is generally a BAD idea. * * The design of synchronization/protection for this operation in the 8021q * module is good for one or more VLAN devices over a single physical device * and cannot be extended for a teaming solution like bonding, so there is a * potential race condition here where a net device from the vlan group might * be referenced (either by a base driver or the 8021q code) while it is being * removed from the system. However, it turns out we're not making matters * worse, and if it works for regular VLAN usage it will work here too. */ /** * bond_vlan_rx_add_vid - Propagates adding an id to slaves * @bond_dev: bonding net device that got called * @proto: network protocol ID * @vid: vlan id being added */ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, __be16 proto, u16 vid) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *rollback_slave; struct list_head *iter; int res; bond_for_each_slave(bond, slave, iter) { res = vlan_vid_add(slave->dev, proto, vid); if (res) goto unwind; } return 0; unwind: /* unwind to the slave that failed */ bond_for_each_slave(bond, rollback_slave, iter) { if (rollback_slave == slave) break; vlan_vid_del(rollback_slave->dev, proto, vid); } return res; } /** * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves * @bond_dev: bonding net device that got called * @proto: network protocol ID * @vid: vlan id being removed */ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, __be16 proto, u16 vid) { struct bonding *bond = netdev_priv(bond_dev); struct list_head *iter; struct slave *slave; bond_for_each_slave(bond, slave, iter) vlan_vid_del(slave->dev, proto, vid); if (bond_is_lb(bond)) bond_alb_clear_vlan(bond, vid); return 0; } /*---------------------------------- XFRM -----------------------------------*/ #ifdef CONFIG_XFRM_OFFLOAD /** * bond_ipsec_dev - Get active device for IPsec offload * @xs: pointer to transformer state struct * * Context: caller must hold rcu_read_lock. * * Return: the device for ipsec offload, or NULL if not exist. **/ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; struct bonding *bond; struct slave *slave; bond = netdev_priv(bond_dev); if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) return NULL; slave = rcu_dereference(bond->curr_active_slave); if (!slave) return NULL; if (!xs->xso.real_dev) return NULL; if (xs->xso.real_dev != slave->dev) pr_warn_ratelimited("%s: (slave %s): not same with IPsec offload real dev %s\n", bond_dev->name, slave->dev->name, xs->xso.real_dev->name); return slave->dev; } /** * bond_ipsec_add_sa - program device with a security association * @bond_dev: pointer to the bond net device * @xs: pointer to transformer state struct * @extack: extack point to fill failure reason **/ static int bond_ipsec_add_sa(struct net_device *bond_dev, struct xfrm_state *xs, struct netlink_ext_ack *extack) { struct net_device *real_dev; netdevice_tracker tracker; struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; int err; if (!bond_dev) return -EINVAL; rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); real_dev = slave ? slave->dev : NULL; netdev_hold(real_dev, &tracker, GFP_ATOMIC); rcu_read_unlock(); if (!real_dev) { err = -ENODEV; goto out; } if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_add || netif_is_bond_master(real_dev)) { NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload"); err = -EINVAL; goto out; } ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL); if (!ipsec) { err = -ENOMEM; goto out; } err = real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, xs, extack); if (!err) { xs->xso.real_dev = real_dev; ipsec->xs = xs; INIT_LIST_HEAD(&ipsec->list); mutex_lock(&bond->ipsec_lock); list_add(&ipsec->list, &bond->ipsec_list); mutex_unlock(&bond->ipsec_lock); } else { kfree(ipsec); } out: netdev_put(real_dev, &tracker); return err; } static void bond_ipsec_add_sa_all(struct bonding *bond) { struct net_device *bond_dev = bond->dev; struct net_device *real_dev; struct bond_ipsec *ipsec; struct slave *slave; slave = rtnl_dereference(bond->curr_active_slave); real_dev = slave ? slave->dev : NULL; if (!real_dev) return; mutex_lock(&bond->ipsec_lock); if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_add || netif_is_bond_master(real_dev)) { if (!list_empty(&bond->ipsec_list)) slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_add\n", __func__); goto out; } list_for_each_entry(ipsec, &bond->ipsec_list, list) { /* If new state is added before ipsec_lock acquired */ if (ipsec->xs->xso.real_dev == real_dev) continue; if (real_dev->xfrmdev_ops->xdo_dev_state_add(real_dev, ipsec->xs, NULL)) { slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__); continue; } spin_lock_bh(&ipsec->xs->lock); /* xs might have been killed by the user during the migration * to the new dev, but bond_ipsec_del_sa() should have done * nothing, as xso.real_dev is NULL. * Delete it from the device we just added it to. The pending * bond_ipsec_free_sa() call will do the rest of the cleanup. */ if (ipsec->xs->km.state == XFRM_STATE_DEAD && real_dev->xfrmdev_ops->xdo_dev_state_delete) real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, ipsec->xs); ipsec->xs->xso.real_dev = real_dev; spin_unlock_bh(&ipsec->xs->lock); } out: mutex_unlock(&bond->ipsec_lock); } /** * bond_ipsec_del_sa - clear out this specific SA * @bond_dev: pointer to the bond net device * @xs: pointer to transformer state struct **/ static void bond_ipsec_del_sa(struct net_device *bond_dev, struct xfrm_state *xs) { struct net_device *real_dev; if (!bond_dev || !xs->xso.real_dev) return; real_dev = xs->xso.real_dev; if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_delete || netif_is_bond_master(real_dev)) { slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__); return; } real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, xs); } static void bond_ipsec_del_sa_all(struct bonding *bond) { struct net_device *bond_dev = bond->dev; struct net_device *real_dev; struct bond_ipsec *ipsec; struct slave *slave; slave = rtnl_dereference(bond->curr_active_slave); real_dev = slave ? slave->dev : NULL; if (!real_dev) return; mutex_lock(&bond->ipsec_lock); list_for_each_entry(ipsec, &bond->ipsec_list, list) { if (!ipsec->xs->xso.real_dev) continue; if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_delete || netif_is_bond_master(real_dev)) { slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__); continue; } spin_lock_bh(&ipsec->xs->lock); ipsec->xs->xso.real_dev = NULL; /* Don't double delete states killed by the user. */ if (ipsec->xs->km.state != XFRM_STATE_DEAD) real_dev->xfrmdev_ops->xdo_dev_state_delete(real_dev, ipsec->xs); spin_unlock_bh(&ipsec->xs->lock); if (real_dev->xfrmdev_ops->xdo_dev_state_free) real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, ipsec->xs); } mutex_unlock(&bond->ipsec_lock); } static void bond_ipsec_free_sa(struct net_device *bond_dev, struct xfrm_state *xs) { struct net_device *real_dev; struct bond_ipsec *ipsec; struct bonding *bond; if (!bond_dev) return; bond = netdev_priv(bond_dev); mutex_lock(&bond->ipsec_lock); if (!xs->xso.real_dev) goto out; real_dev = xs->xso.real_dev; xs->xso.real_dev = NULL; if (real_dev->xfrmdev_ops && real_dev->xfrmdev_ops->xdo_dev_state_free) real_dev->xfrmdev_ops->xdo_dev_state_free(real_dev, xs); out: list_for_each_entry(ipsec, &bond->ipsec_list, list) { if (ipsec->xs == xs) { list_del(&ipsec->list); kfree(ipsec); break; } } mutex_unlock(&bond->ipsec_lock); } /** * bond_ipsec_offload_ok - can this packet use the xfrm hw offload * @skb: current data packet * @xs: pointer to transformer state struct **/ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *real_dev; rcu_read_lock(); real_dev = bond_ipsec_dev(xs); if (!real_dev || netif_is_bond_master(real_dev)) { rcu_read_unlock(); return false; } rcu_read_unlock(); return true; } /** * bond_advance_esn_state - ESN support for IPSec HW offload * @xs: pointer to transformer state struct **/ static void bond_advance_esn_state(struct xfrm_state *xs) { struct net_device *real_dev; rcu_read_lock(); real_dev = bond_ipsec_dev(xs); if (!real_dev) goto out; if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_advance_esn) { pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_advance_esn\n", __func__, real_dev->name); goto out; } real_dev->xfrmdev_ops->xdo_dev_state_advance_esn(xs); out: rcu_read_unlock(); } /** * bond_xfrm_update_stats - Update xfrm state * @xs: pointer to transformer state struct **/ static void bond_xfrm_update_stats(struct xfrm_state *xs) { struct net_device *real_dev; rcu_read_lock(); real_dev = bond_ipsec_dev(xs); if (!real_dev) goto out; if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_state_update_stats) { pr_warn_ratelimited("%s: %s doesn't support xdo_dev_state_update_stats\n", __func__, real_dev->name); goto out; } real_dev->xfrmdev_ops->xdo_dev_state_update_stats(xs); out: rcu_read_unlock(); } static const struct xfrmdev_ops bond_xfrmdev_ops = { .xdo_dev_state_add = bond_ipsec_add_sa, .xdo_dev_state_delete = bond_ipsec_del_sa, .xdo_dev_state_free = bond_ipsec_free_sa, .xdo_dev_offload_ok = bond_ipsec_offload_ok, .xdo_dev_state_advance_esn = bond_advance_esn_state, .xdo_dev_state_update_stats = bond_xfrm_update_stats, }; #endif /* CONFIG_XFRM_OFFLOAD */ /*------------------------------- Link status -------------------------------*/ /* Set the carrier state for the master according to the state of its * slaves. If any slaves are up, the master is up. In 802.3ad mode, * do special 802.3ad magic. * * Returns zero if carrier state does not change, nonzero if it does. */ int bond_set_carrier(struct bonding *bond) { struct list_head *iter; struct slave *slave; if (!bond_has_slaves(bond)) goto down; if (BOND_MODE(bond) == BOND_MODE_8023AD) return bond_3ad_set_carrier(bond); bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) { if (!netif_carrier_ok(bond->dev)) { netif_carrier_on(bond->dev); return 1; } return 0; } } down: if (netif_carrier_ok(bond->dev)) { netif_carrier_off(bond->dev); return 1; } return 0; } /* Get link speed and duplex from the slave's base driver * using ethtool. If for some reason the call fails or the * values are invalid, set speed and duplex to -1, * and return. Return 1 if speed or duplex settings are * UNKNOWN; 0 otherwise. */ static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; struct ethtool_link_ksettings ecmd; int res; slave->speed = SPEED_UNKNOWN; slave->duplex = DUPLEX_UNKNOWN; res = __ethtool_get_link_ksettings(slave_dev, &ecmd); if (res < 0) return 1; if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) return 1; switch (ecmd.base.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; default: return 1; } slave->speed = ecmd.base.speed; slave->duplex = ecmd.base.duplex; return 0; } const char *bond_slave_link_status(s8 link) { switch (link) { case BOND_LINK_UP: return "up"; case BOND_LINK_FAIL: return "going down"; case BOND_LINK_DOWN: return "down"; case BOND_LINK_BACK: return "going back"; default: return "unknown"; } } /* if <dev> supports MII link status reporting, check its link status. * * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), * depending upon the setting of the use_carrier parameter. * * Return either BMSR_LSTATUS, meaning that the link is up (or we * can't tell and just pretend it is), or 0, meaning that the link is * down. * * If reporting is non-zero, instead of faking link up, return -1 if * both ETHTOOL and MII ioctls fail (meaning the device does not * support them). If use_carrier is set, return whatever it says. * It'd be nice if there was a good way to tell if a driver supports * netif_carrier, but there really isn't. */ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting) { const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct mii_ioctl_data *mii; struct ifreq ifr; int ret; if (!reporting && !netif_running(slave_dev)) return 0; if (bond->params.use_carrier) return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; /* Try to get link status using Ethtool first. */ if (slave_dev->ethtool_ops->get_link) { netdev_lock_ops(slave_dev); ret = slave_dev->ethtool_ops->get_link(slave_dev); netdev_unlock_ops(slave_dev); return ret ? BMSR_LSTATUS : 0; } /* Ethtool can't be used, fallback to MII ioctls. */ if (slave_ops->ndo_eth_ioctl) { /* TODO: set pointer to correct ioctl on a per team member * bases to make this more efficient. that is, once * we determine the correct ioctl, we will always * call it and not the others for that team * member. */ /* We cannot assume that SIOCGMIIPHY will also read a * register; not all network drivers (e.g., e100) * support that. */ /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ); mii = if_mii(&ifr); if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { |